diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000000..81bdb1b6f9 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,7 @@ +[run] +branch = True +source = cinder +omit = cinder/tests/*,cinder/openstack/common/* + +[report] +ignore-errors = True diff --git a/.gitignore b/.gitignore index cae3ad60ff..1ab25d67f4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,24 +1,37 @@ +*.DS_Store +*.log +*.mo +*.pyc +*.sqlite +.*.sw? +.cinder-venv +.coverage +.testrepository +.tox +.venv +.idea AUTHORS Authors -ChangeLog -*.pyc -*.DS_Store -local_settings.py -CA/ -keeper -instances -keys build/* build-stamp +CA/ +ChangeLog cinder.egg-info -.cinder-venv -.venv -.tox -*.sqlite -*.log -*.mo -tools/conf/cinder.conf* cover/* -dist/* -.coverage covhtml +dist/* +instances +keeper +keys +local_settings.py +tools/conf/cinder.conf* +tools/lintstack.head.py +tools/pylint_exceptions +tags +# Files created by Sphinx build +doc/build +.autogenerated + +# Development environment files +.project +.pydevproject diff --git a/.mailmap b/.mailmap index 408a670b5e..5665c7478f 100644 --- a/.mailmap +++ b/.mailmap @@ -26,6 +26,7 @@ + @@ -79,3 +80,5 @@ + + diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 0000000000..8e23325b5b --- /dev/null +++ b/.testr.conf @@ -0,0 +1,8 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ + ${PYTHON:-python} -m subunit.run discover -t ./ ./cinder/tests $LISTOPT $IDOPTION + +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..836c0a0234 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,12 @@ +If you would like to contribute to the development of OpenStack, +you must follow the steps in the "If you're a developer, start here" +section of this page: [http://wiki.openstack.org/HowToContribute](http://wiki.openstack.org/HowToContribute#If_you.27re_a_developer.2C_start_here:) + +Once those steps have been completed, changes to OpenStack +should be submitted for review via the Gerrit tool, following +the workflow documented at [http://wiki.openstack.org/GerritWorkflow](http://wiki.openstack.org/GerritWorkflow). + +Pull requests submitted through GitHub will be ignored. + +Bugs should be filed [on Launchpad](https://bugs.launchpad.net/cinder), +not in GitHub's issue tracker. diff --git a/HACKING.rst b/HACKING.rst index e9c0162f54..cfee60986f 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -1,189 +1,25 @@ Cinder Style Commandments -======================= +========================= -- Step 1: Read http://www.python.org/dev/peps/pep-0008/ -- Step 2: Read http://www.python.org/dev/peps/pep-0008/ again -- Step 3: Read on +- Step 1: Read the OpenStack Style Commandments + http://docs.openstack.org/developer/hacking/ +- Step 2: Read on +Cinder Specific Commandments +---------------------------- General ------- -- Put two newlines between top-level code (funcs, classes, etc) -- Put one newline between methods in classes and anywhere else -- Do not write "except:", use "except Exception:" at the very least -- Include your name with TODOs as in "#TODO(termie)" -- Do not shadow a built-in or reserved word. Example:: +- Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised:: - def list(): - return [1, 2, 3] + except Exception as e: + ... + raise e # BAD - mylist = list() # BAD, shadows `list` built-in + except Exception: + ... + raise # OKAY - class Foo(object): - def list(self): - return [1, 2, 3] - - mylist = Foo().list() # OKAY, does not shadow built-in - - -Imports -------- -- Do not import objects, only modules (*) -- Do not import more than one module per line (*) -- Do not make relative imports -- Order your imports by the full module path -- Organize your imports according to the following template - -(*) exceptions are: - -- imports from ``migrate`` package -- imports from ``sqlalchemy`` package -- imports from ``cinder.db.sqlalchemy.session`` module - -Example:: - - # vim: tabstop=4 shiftwidth=4 softtabstop=4 - {{stdlib imports in human alphabetical order}} - \n - {{third-party lib imports in human alphabetical order}} - \n - {{cinder imports in human alphabetical order}} - \n - \n - {{begin your code}} - - -Human Alphabetical Order Examples ---------------------------------- -Example:: - - import httplib - import logging - import random - import StringIO - import time - import unittest - - import eventlet - import webob.exc - - import cinder.api.ec2 - from cinder.api import openstack - from cinder.auth import users - from cinder.endpoint import cloud - import cinder.flags - from cinder import test - - -Docstrings ----------- -Example:: - - """A one line docstring looks like this and ends in a period.""" - - - """A multi line docstring has a one-line summary, less than 80 characters. - - Then a new paragraph after a newline that explains in more detail any - general information about the function, class or method. Example usages - are also great to have here if it is a complex class for function. - - When writing the docstring for a class, an extra line should be placed - after the closing quotations. For more in-depth explanations for these - decisions see http://www.python.org/dev/peps/pep-0257/ - - If you are going to describe parameters and return values, use Sphinx, the - appropriate syntax is as follows. - - :param foo: the foo parameter - :param bar: the bar parameter - :returns: return_type -- description of the return value - :returns: description of the return value - :raises: AttributeError, KeyError - """ - - -Dictionaries/Lists ------------------- -If a dictionary (dict) or list object is longer than 80 characters, its items -should be split with newlines. Embedded iterables should have their items -indented. Additionally, the last item in the dictionary should have a trailing -comma. This increases readability and simplifies future diffs. - -Example:: - - my_dictionary = { - "image": { - "name": "Just a Snapshot", - "size": 2749573, - "properties": { - "user_id": 12, - "arch": "x86_64", - }, - "things": [ - "thing_one", - "thing_two", - ], - "status": "ACTIVE", - }, - } - - -Calling Methods ---------------- -Calls to methods 80 characters or longer should format each argument with -newlines. This is not a requirement, but a guideline:: - - unnecessarily_long_function_name('string one', - 'string two', - kwarg1=constants.ACTIVE, - kwarg2=['a', 'b', 'c']) - - -Rather than constructing parameters inline, it is better to break things up:: - - list_of_strings = [ - 'what_a_long_string', - 'not as long', - ] - - dict_of_numbers = { - 'one': 1, - 'two': 2, - 'twenty four': 24, - } - - object_one.call_a_method('string three', - 'string four', - kwarg1=list_of_strings, - kwarg2=dict_of_numbers) - - -Internationalization (i18n) Strings ------------------------------------ -In order to support multiple languages, we have a mechanism to support -automatic translations of exception and log strings. - -Example:: - - msg = _("An error occurred") - raise HTTPBadRequest(explanation=msg) - -If you have a variable to place within the string, first internationalize the -template string then do the replacement. - -Example:: - - msg = _("Missing parameter: %s") % ("flavor",) - LOG.error(msg) - -If you have multiple variables to place in the string, use keyword parameters. -This helps our translators reorder parameters when needed. - -Example:: - - msg = _("The server with id %(s_id)s has no key %(m_key)s") - LOG.error(msg % {"s_id": "1234", "m_key": "imageId"}) Creating Unit Tests @@ -194,20 +30,9 @@ bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. -For more information on creating unit tests and utilizing the testing -infrastructure in OpenStack Cinder, please read cinder/testing/README.rst. - - -openstack-common ----------------- - -A number of modules from openstack-common are imported into the project. +Cinder is transitioning to use mock, rather than mox, and so new tests should +use mock only. -These modules are "incubating" in openstack-common and are kept in sync -with the help of openstack-common's update.py script. See: - - http://wiki.openstack.org/CommonLibrary#Incubation - -The copy of the code should never be directly modified here. Please -always update openstack-common first and then run the script to copy -the changes across. +For more information on creating unit tests and utilizing the testing +infrastructure in OpenStack Cinder, please read the Cinder testing +`README.rst `_. diff --git a/README.rst b/README.rst index 822401a80a..17024f68c5 100644 --- a/README.rst +++ b/README.rst @@ -16,6 +16,4 @@ To taunt it with its weaknesses: use http://bugs.launchpad.net/cinder To watch it: http://jenkins.openstack.org -To hack at it: read HACKING - -To cry over its pylint problems: http://jenkins.openstack.org/job/cinder-pylint/violations +To hack at it: read `HACKING.rst `_ diff --git a/bin/cinder-all b/bin/cinder-all index e3c74eff8c..a3b2fcf7f7 100755 --- a/bin/cinder-all +++ b/bin/cinder-all @@ -1,6 +1,4 @@ #!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 OpenStack, LLC # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -27,42 +25,54 @@ continue attempting to launch the rest of the services. """ + import eventlet + eventlet.monkey_patch() import os import sys +from oslo.config import cfg -possible_topdir = os.path.normpath(os.path.join(os.path.abspath( - sys.argv[0]), os.pardir, os.pardir)) +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): sys.path.insert(0, possible_topdir) +from cinder.openstack.common import gettextutils +gettextutils.install('cinder', lazy=False) -from cinder import flags +# Need to register global_opts +from cinder.common import config # noqa from cinder.openstack.common import log as logging from cinder import service from cinder import utils +from cinder import version -LOG = logging.getLogger('cinder.all') +CONF = cfg.CONF + if __name__ == '__main__': - flags.parse_args(sys.argv) + CONF(sys.argv[1:], project='cinder', + version=version.version_string()) logging.setup("cinder") + LOG = logging.getLogger('cinder.all') + utils.monkey_patch() - servers = [] + launcher = service.process_launcher() # cinder-api try: - servers.append(service.WSGIService('osapi_volume')) + server = service.WSGIService('osapi_volume') + launcher.launch_service(server, workers=server.workers or 1) except (Exception, SystemExit): - logging.exception(_('Failed to load %s') % '%s-api' % api) + LOG.exception(_('Failed to load osapi_volume')) - for binary in ['cinder-volume', 'cinder-scheduler']: + for binary in ['cinder-volume', 'cinder-scheduler', 'cinder-backup']: try: - servers.append(service.Service.create(binary=binary)) + launcher.launch_service(service.Service.create(binary=binary)) except (Exception, SystemExit): LOG.exception(_('Failed to load %s'), binary) - service.serve(*servers) - service.wait() + launcher.wait() diff --git a/bin/cinder-api b/bin/cinder-api index 33d389fac0..953575c9ef 100755 --- a/bin/cinder-api +++ b/bin/cinder-api @@ -1,6 +1,4 @@ #!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -25,22 +23,35 @@ eventlet.monkey_patch() import os import sys +from oslo.config import cfg -possible_topdir = os.path.normpath(os.path.join(os.path.abspath( - sys.argv[0]), os.pardir, os.pardir)) +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): sys.path.insert(0, possible_topdir) +from cinder.openstack.common import gettextutils +gettextutils.install('cinder', lazy=False) -from cinder import flags +# Need to register global_opts +from cinder.common import config # noqa from cinder.openstack.common import log as logging from cinder import service from cinder import utils +from cinder import version + + +CONF = cfg.CONF + if __name__ == '__main__': - flags.parse_args(sys.argv) + CONF(sys.argv[1:], project='cinder', + version=version.version_string()) logging.setup("cinder") utils.monkey_patch() + + launcher = service.process_launcher() server = service.WSGIService('osapi_volume') - service.serve(server) - service.wait() + launcher.launch_service(server, workers=server.workers or 1) + launcher.wait() diff --git a/bin/cinder-backup b/bin/cinder-backup new file mode 100755 index 0000000000..2b5a634cca --- /dev/null +++ b/bin/cinder-backup @@ -0,0 +1,59 @@ +#!/usr/bin/env python + +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Volume Backup.""" + + +import os +import sys + +import eventlet + +eventlet.monkey_patch() + +from oslo.config import cfg + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + +# Need to register global_opts +from cinder.common import config # noqa +from cinder.openstack.common import log as logging +from cinder import service +from cinder import utils +from cinder import version + + +CONF = cfg.CONF + + +if __name__ == '__main__': + CONF(sys.argv[1:], project='cinder', + version=version.version_string()) + logging.setup("cinder") + utils.monkey_patch() + server = service.Service.create(binary='cinder-backup') + service.serve(server) + service.wait() diff --git a/bin/cinder-clear-rabbit-queues b/bin/cinder-clear-rabbit-queues index 321a94d90e..4a59b8b3d5 100755 --- a/bin/cinder-clear-rabbit-queues +++ b/bin/cinder-clear-rabbit-queues @@ -1,7 +1,5 @@ #!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Openstack, LLC. +# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -23,11 +21,10 @@ """ -import datetime -import gettext import os import sys -import time + +from oslo.config import cfg # If ../cinder/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... @@ -37,15 +34,14 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) -gettext.install('cinder', unicode=1) - +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') -from cinder import context -from cinder import exception -from cinder import flags +# Need to register global_opts +from cinder.common import config # noqa from cinder.openstack.common import log as logging -from cinder.openstack.common import cfg from cinder.openstack.common import rpc +from cinder import version delete_exchange_opt = \ @@ -53,8 +49,8 @@ delete_exchange_opt = \ default=False, help='delete cinder exchange too.') -FLAGS = flags.FLAGS -FLAGS.register_cli_opt(delete_exchange_opt) +CONF = cfg.CONF +CONF.register_cli_opt(delete_exchange_opt) def delete_exchange(exch): @@ -70,8 +66,9 @@ def delete_queues(queues): x.queue_delete(q) if __name__ == '__main__': - args = flags.parse_args(sys.argv) + args = CONF(sys.argv[1:], project='cinder', + version=version.version_string()) logging.setup("cinder") delete_queues(args[1:]) - if FLAGS.delete_exchange: - delete_exchange(FLAGS.control_exchange) + if CONF.delete_exchange: + delete_exchange(CONF.control_exchange) diff --git a/bin/cinder-manage b/bin/cinder-manage index 9d13217425..7b113c865f 100755 --- a/bin/cinder-manage +++ b/bin/cinder-manage @@ -1,6 +1,4 @@ #!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -54,14 +52,13 @@ CLI interface for cinder management. """ -import gettext -import optparse +from __future__ import print_function + + import os import sys -from sqlalchemy import create_engine, MetaData, Table -from sqlalchemy.orm import sessionmaker -from sqlalchemy.ext.declarative import declarative_base +from oslo.config import cfg # If ../cinder/__init__.py exists, add ../ to Python search path, so that @@ -72,26 +69,28 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) -gettext.install('cinder', unicode=1) +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') +# Need to register global_opts +from cinder.common import config # noqa from cinder import context from cinder import db from cinder.db import migration -from cinder import exception -from cinder import flags from cinder.openstack.common import log as logging -from cinder.openstack.common import cfg from cinder.openstack.common import rpc +from cinder.openstack.common import uuidutils from cinder import utils from cinder import version -FLAGS = flags.FLAGS + +CONF = cfg.CONF # Decorators for actions def args(*args, **kwargs): def _decorator(func): - func.__dict__.setdefault('options', []).insert(0, (args, kwargs)) + func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator @@ -100,34 +99,43 @@ def param2id(object_id): """Helper function to convert various id types to internal id. args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' """ - if '-' in object_id: + if uuidutils.is_uuid_like(object_id): + return object_id + elif '-' in object_id: # FIXME(ja): mapping occurs in nova? pass else: - return int(object_id) + try: + return int(object_id) + except ValueError: + return object_id class ShellCommands(object): def bpython(self): """Runs a bpython shell. - Falls back to Ipython/python shell if unavailable""" + Falls back to Ipython/python shell if unavailable + """ self.run('bpython') def ipython(self): """Runs an Ipython shell. - Falls back to Python shell if unavailable""" + Falls back to Python shell if unavailable + """ self.run('ipython') def python(self): """Runs a python shell. - Falls back to Python shell if unavailable""" + Falls back to Python shell if unavailable + """ self.run('python') - @args('--shell', dest="shell", metavar='', - help='Python shell') + @args('--shell', dest="shell", + metavar='', + help='Python shell') def run(self, shell=None): """Runs a Python interactive interpreter.""" if not shell: @@ -159,33 +167,36 @@ class ShellCommands(object): else: # We don't have to wrap the following import in a 'try', # because we already know 'readline' was imported successfully. - import rlcompleter + import rlcompleter # noqa readline.parse_and_bind("tab:complete") code.interact() - @args('--path', dest='path', metavar='', help='Script path') + @args('--path', required=True, help='Script path') def script(self, path): - """Runs the script from the specifed path with flags set properly. - arguments: path""" + """Runs the script from the specified path with flags set properly. + arguments: path + """ exec(compile(open(path).read(), path, 'exec'), locals(), globals()) def _db_error(caught_exception): - print caught_exception - print _("The above error may show that the database has not " + print('%s' % caught_exception) + print(_("The above error may show that the database has not " "been created.\nPlease create a database using " - "'cinder-manage db sync' before running this command.") + "'cinder-manage db sync' before running this command.")) exit(1) class HostCommands(object): - """List hosts""" + """List hosts.""" + @args('zone', nargs='?', default=None, + help='Availability Zone (default: %(default)s)') def list(self, zone=None): """Show a list of all physical hosts. Filter by zone. - args: [zone]""" - print "%-25s\t%-15s" % (_('host'), - _('zone')) + args: [zone] + """ + print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'}) ctxt = context.get_admin_context() services = db.service_get_all(ctxt) if zone: @@ -196,7 +207,9 @@ class HostCommands(object): hosts.append(srv) for h in hosts: - print "%-25s\t%-15s" % (h['host'], h['availability_zone']) + print(_("%(host)-25s\t%(availability_zone)-15s") + % {'host': h['host'], + 'availability_zone': h['availability_zone']}) class DbCommands(object): @@ -205,15 +218,15 @@ class DbCommands(object): def __init__(self): pass - @args('--version', dest='version', metavar='', - help='Database version') + @args('version', nargs='?', default=None, + help='Database version') def sync(self, version=None): """Sync the database up to the most recent version.""" return migration.db_sync(version) def version(self): """Print the current database version.""" - print migration.db_version() + print(migration.db_version()) class VersionCommands(object): @@ -223,369 +236,97 @@ class VersionCommands(object): pass def list(self): - print _("%(version)s (%(vcs)s)") % \ - {'version': version.version_string(), - 'vcs': version.version_string_with_vcs()} + print(version.version_string()) def __call__(self): self.list() -class ImportCommands(object): - """Methods for importing Nova volumes to Cinder. - - EXPECTATIONS: - These methods will do two things: - 1. Import relevant Nova DB info in to Cinder - 2. Import persistent tgt files from Nova to Cinder (see copy_tgt_files) - - If you're using VG's (local storage) for your backend YOU MUST install - Cinder on the same node that you're migrating from. - """ - def __init__(self): - pass - - def _map_table(self, table): - class Mapper(declarative_base()): - __table__ = table - return Mapper - - def _open_session(self, con_info): - # Note(jdg): The echo option below sets whether to dispaly db command - # debug info. - engine = create_engine(con_info, - convert_unicode=True, - echo=False) - session = sessionmaker(bind=engine) - return (session(), engine) - - def _backup_cinder_db(self): - #First, dump the dest_db as a backup incase this goes wrong - cinder_dump = utils.execute('mysqldump', 'cinder') - if 'Dump completed on' in cinder_dump[0]: - with open('./cinder_db_bkup.sql', 'w+') as fo: - for line in cinder_dump: - fo.write(line) - else: - raise exception.InvalidResults() - - def _import_db(self, src_db, dest_db, backup_db): - # Remember order matters due to FK's - table_list = ['sm_flavors', - 'sm_backend_config', - 'snapshots', - 'volume_types', - 'volumes', - 'iscsi_targets', - 'sm_volume', - 'volume_metadata', - 'volume_type_extra_specs'] - - quota_table_list = ['quota_classes', - 'quota_usages', - 'quotas', - 'reservations'] - - if backup_db > 0: - if 'mysql:' not in dest_db: - print (_('Sorry, only mysql backups are supported!')) - raise exception.InvalidRequest() - else: - self._backup_cinder_db() - - (src, src_engine) = self._open_session(src_db) - src_meta = MetaData(bind=src_engine) - (dest, dest_engine) = self._open_session(dest_db) - - # First make sure nova is at Folsom - table = Table('migrate_version', src_meta, autoload=True) - if src.query(table).first().version < 132: - print (_('ERROR: Specified Nova DB is not at a compatible ' - 'migration version!\nNova must be at Folsom or newer ' - 'to import into Cinder database.')) - sys.exit(2) - - for table_name in table_list: - print (_('Importing table %s...') % table_name) - table = Table(table_name, src_meta, autoload=True) - new_row = self._map_table(table) - columns = table.columns.keys() - for row in src.query(table).all(): - data = dict([(str(column), getattr(row, column)) - for column in columns]) - dest.add(new_row(**data)) - dest.commit() - - for table_name in quota_table_list: - print (_('Importing table %s...') % table_name) - table = Table(table_name, src_meta, autoload=True) - new_row = self._map_table(table) - columns = table.columns.keys() - for row in src.query(table).all(): - if row.resource == 'gigabytes' or row.resource == 'volumes': - data = dict([(str(column), getattr(row, column)) - for column in columns]) - dest.add(new_row(**data)) - dest.commit() - - @args('--src', dest='src_db', metavar='', - help='db-engine://db_user[:passwd]@db_host[:port]\t\t' - 'example: mysql://root:secrete@192.168.137.1') - @args('--dest', dest='dest_db', metavar='', - help='db-engine://db_user[:passwd]@db_host[:port]\t\t' - 'example: mysql://root:secrete@192.168.137.1') - @args('--backup', dest='backup_db', metavar='<0|1>', - help='Perform mysqldump of cinder db before writing to it') - def import_db(self, src_db, dest_db, backup_db=1): - """Import relevant volume DB entries from Nova into Cinder. - - NOTE: - Your Cinder DB should be clean WRT volume entries. - - NOTE: - We take an sqldump of the cinder DB before mods - If you're not using mysql, set backup_db=0 - and create your own backup. - """ - src_db = '%s/nova' % src_db - dest_db = '%s/cinder' % dest_db - self._import_db(src_db, dest_db, backup_db) - - @args('--src', dest='src_tgts', metavar='', - help='[login@src_host:]/opt/stack/nova/volumes/') - @args('--dest', dest='dest_tgts', metavar='', - help='[login@src_host:/opt/stack/cinder/volumes/]') - def copy_ptgt_files(self, src_tgts, dest_tgts=None): - """Copy persistent scsi tgt files from nova to cinder. - - Default destination is FLAGS.volume_dir or state_path/volumes/ - - PREREQUISITES: - Persistent tgts were introduced in Folsom. If you're running - Essex or other release, this script is unnecessary. - - NOTE: - If you're using local VG's and LVM for your nova volume backend - there's no point in copying these files over. Leave them on - your Nova system as they won't do any good here. - """ - if dest_tgts is None: - try: - dest_tgts = FLAGS.volumes_dir - except Exception: - dest_tgts = '%s/volumes' % FLAGS.state_path - - utils.execute('rsync', '-avz', src_tgts, dest_tgts) - - class VolumeCommands(object): - """Methods for dealing with a cloud in an odd state""" + """Methods for dealing with a cloud in an odd state.""" - @args('--volume', dest='volume_id', metavar='', - help='Volume ID') + @args('volume_id', + help='Volume ID to be deleted') def delete(self, volume_id): """Delete a volume, bypassing the check that it - must be available.""" + must be available. + """ ctxt = context.get_admin_context() volume = db.volume_get(ctxt, param2id(volume_id)) host = volume['host'] if not host: - print "Volume not yet assigned to host." - print "Deleting volume from database and skipping rpc." + print(_("Volume not yet assigned to host.")) + print(_("Deleting volume from database and skipping rpc.")) db.volume_destroy(ctxt, param2id(volume_id)) return if volume['status'] == 'in-use': - print "Volume is in-use." - print "Detach volume from instance and then try again." + print(_("Volume is in-use.")) + print(_("Detach volume from instance and then try again.")) return rpc.cast(ctxt, - rpc.queue_get_for(ctxt, FLAGS.volume_topic, host), + rpc.queue_get_for(ctxt, CONF.volume_topic, host), {"method": "delete_volume", "args": {"volume_id": volume['id']}}) - @args('--volume', dest='volume_id', metavar='', - help='Volume ID') + @args('volume_id', + help='Volume ID to be reattached') def reattach(self, volume_id): """Re-attach a volume that has previously been attached to an instance. Typically called after a compute host - has been rebooted.""" + has been rebooted. + """ ctxt = context.get_admin_context() volume = db.volume_get(ctxt, param2id(volume_id)) if not volume['instance_id']: - print "volume is not attached to an instance" + print(_("volume is not attached to an instance")) return instance = db.instance_get(ctxt, volume['instance_id']) host = instance['host'] rpc.cast(ctxt, - rpc.queue_get_for(ctxt, FLAGS.compute_topic, host), + rpc.queue_get_for(ctxt, CONF.compute_topic, host), {"method": "attach_volume", "args": {"instance_id": instance['id'], "volume_id": volume['id'], "mountpoint": volume['mountpoint']}}) -class StorageManagerCommands(object): - """Class for mangaging Storage Backends and Flavors""" - - def flavor_list(self, flavor=None): - ctxt = context.get_admin_context() - - try: - if flavor is None: - flavors = db.sm_flavor_get_all(ctxt) - else: - flavors = db.sm_flavor_get(ctxt, flavor) - except exception.NotFound as ex: - print "error: %s" % ex - sys.exit(2) - - print "%-18s\t%-20s\t%s" % (_('id'), - _('Label'), - _('Description')) - - for flav in flavors: - print "%-18s\t%-20s\t%s" % ( - flav['id'], - flav['label'], - flav['description']) - - def flavor_create(self, label, desc): - # TODO(renukaapte) flavor name must be unique - try: - db.sm_flavor_create(context.get_admin_context(), - dict(label=label, - description=desc)) - except exception.DBError, e: - _db_error(e) - - def flavor_delete(self, label): - try: - db.sm_flavor_delete(context.get_admin_context(), label) - - except exception.DBError, e: - _db_error(e) - - def _splitfun(self, item): - i = item.split("=") - return i[0:2] - - def backend_list(self, backend_conf_id=None): - ctxt = context.get_admin_context() - - try: - if backend_conf_id is None: - backends = db.sm_backend_conf_get_all(ctxt) - else: - backends = db.sm_backend_conf_get(ctxt, backend_conf_id) - - except exception.NotFound as ex: - print "error: %s" % ex - sys.exit(2) - - print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'), - _('Flavor id'), - _('SR UUID'), - _('SR Type'), - _('Config Parameters'),) - - for b in backends: - print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'], - b['flavor_id'], - b['sr_uuid'], - b['sr_type'], - b['config_params'],) - - def backend_add(self, flavor_label, sr_type, *args): - # TODO(renukaapte) Add backend_introduce. - ctxt = context.get_admin_context() - params = dict(map(self._splitfun, args)) - sr_uuid = utils.gen_uuid() - - if flavor_label is None: - print "error: backend needs to be associated with flavor" - sys.exit(2) - - try: - flavors = db.sm_flavor_get(ctxt, flavor_label) - - except exception.NotFound as ex: - print "error: %s" % ex - sys.exit(2) - - config_params = " ".join(['%s=%s' % - (key, params[key]) for key in params]) - - if 'sr_uuid' in params: - sr_uuid = params['sr_uuid'] - try: - backend = db.sm_backend_conf_get_by_sr(ctxt, sr_uuid) - except exception.DBError, e: - _db_error(e) - - if backend: - print 'Backend config found. Would you like to recreate this?' - print '(WARNING:Recreating will destroy all VDIs on backend!!)' - c = raw_input('Proceed? (y/n) ') - if c == 'y' or c == 'Y': - try: - db.sm_backend_conf_update(ctxt, backend['id'], - dict(created=False, - flavor_id=flavors['id'], - sr_type=sr_type, - config_params=config_params)) - except exception.DBError, e: - _db_error(e) - return - - else: - print 'Backend config not found. Would you like to create it?' - - print '(WARNING: Creating will destroy all data on backend!!!)' - c = raw_input('Proceed? (y/n) ') - if c == 'y' or c == 'Y': - try: - db.sm_backend_conf_create(ctxt, - dict(flavor_id=flavors['id'], - sr_uuid=sr_uuid, - sr_type=sr_type, - config_params=config_params)) - except exception.DBError, e: - _db_error(e) - - def backend_remove(self, backend_conf_id): - try: - db.sm_backend_conf_delete(context.get_admin_context(), - backend_conf_id) - - except exception.DBError, e: - _db_error(e) - - class ConfigCommands(object): """Class for exposing the flags defined by flag_file(s).""" def __init__(self): pass - def list(self): - for key, value in FLAGS.iteritems(): - if value is not None: - print '%s = %s' % (key, value) + @args('param', nargs='?', default=None, + help='Configuration parameter to display (default: %(default)s)') + def list(self, param=None): + """List parameters configured for cinder. + + Lists all parameters configured for cinder unless an optional argument + is specified. If the parameter is specified we only print the + requested parameter. If the parameter is not found an appropriate + error is produced by .get*(). + """ + param = param and param.strip() + if param: + print('%s = %s' % (param, CONF.get(param))) + else: + for key, value in CONF.iteritems(): + print('%s = %s' % (key, value)) class GetLogCommands(object): - """Get logging information""" + """Get logging information.""" def errors(self): - """Get all of the errors from the log files""" + """Get all of the errors from the log files.""" error_found = 0 - if FLAGS.logdir: - logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')] + if CONF.log_dir: + logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')] for file in logs: - log_file = os.path.join(FLAGS.logdir, file) + log_file = os.path.join(CONF.log_dir, file) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print_name = 0 @@ -593,14 +334,17 @@ class GetLogCommands(object): if line.find(" ERROR ") > 0: error_found += 1 if print_name == 0: - print log_file + ":-" + print(log_file + ":-") print_name = 1 - print "Line %d : %s" % (len(lines) - index, line) + print(_("Line %(dis)d : %(line)s") % + {'dis': len(lines) - index, 'line': line}) if error_found == 0: - print "No errors in logfiles!" + print(_("No errors in logfiles!")) + @args('num_entries', nargs='?', type=int, default=10, + help='Number of entries to list (default: %(default)d)') def syslog(self, num_entries=10): - """Get of the cinder syslog events""" + """Get of the cinder syslog events.""" entries = int(num_entries) count = 0 log_file = '' @@ -609,59 +353,100 @@ class GetLogCommands(object): elif os.path.exists('/var/log/messages'): log_file = '/var/log/messages' else: - print "Unable to find system log file!" + print(_("Unable to find system log file!")) sys.exit(1) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() - print "Last %s cinder syslog entries:-" % (entries) + print(_("Last %s cinder syslog entries:-") % (entries)) for line in lines: if line.find("cinder") > 0: count += 1 - print "%s" % (line) + print(_("%s") % (line)) if count == entries: break if count == 0: - print "No cinder entries in syslog!" - - -CATEGORIES = [ - ('config', ConfigCommands), - ('db', DbCommands), - ('host', HostCommands), - ('logs', GetLogCommands), - ('shell', ShellCommands), - ('sm', StorageManagerCommands), - ('version', VersionCommands), - ('volume', VolumeCommands), - ('migrate', ImportCommands), -] - - -def lazy_match(name, key_value_tuples): - """Finds all objects that have a key that case insensitively contains - [name] key_value_tuples is a list of tuples of the form (key, value) - returns a list of tuples of the form (key, value)""" - result = [] - for (k, v) in key_value_tuples: - if k.lower().find(name.lower()) == 0: - result.append((k, v)) - if len(result) == 0: - print "%s does not match any options:" % name - for k, _v in key_value_tuples: - print "\t%s" % k - sys.exit(2) - if len(result) > 1: - print "%s matched multiple options:" % name - for k, _v in result: - print "\t%s" % k - sys.exit(2) - return result + print(_("No cinder entries in syslog!")) + + +class BackupCommands(object): + """Methods for managing backups.""" + + def list(self): + """List all backups (including ones in progress) and the host + on which the backup operation is running. + """ + ctxt = context.get_admin_context() + backups = db.backup_get_all(ctxt) + + hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s" + print(hdr % (_('ID'), + _('User ID'), + _('Project ID'), + _('Host'), + _('Name'), + _('Container'), + _('Status'), + _('Size'), + _('Object Count'))) + + res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d" + for backup in backups: + object_count = 0 + if backup['object_count'] is not None: + object_count = backup['object_count'] + print(res % (backup['id'], + backup['user_id'], + backup['project_id'], + backup['host'], + backup['display_name'], + backup['container'], + backup['status'], + backup['size'], + object_count)) + + +class ServiceCommands(object): + """Methods for managing services.""" + def list(self): + """Show a list of all cinder services.""" + ctxt = context.get_admin_context() + services = db.service_get_all(ctxt) + print_format = "%-16s %-36s %-16s %-10s %-5s %-10s" + print(print_format % (_('Binary'), + _('Host'), + _('Zone'), + _('Status'), + _('State'), + _('Updated At'))) + for svc in services: + alive = utils.service_is_up(svc) + art = ":-)" if alive else "XXX" + status = 'enabled' + if svc['disabled']: + status = 'disabled' + print(print_format % (svc['binary'], svc['host'].partition('.')[0], + svc['availability_zone'], status, art, + svc['updated_at'])) + + +CATEGORIES = { + 'backup': BackupCommands, + 'config': ConfigCommands, + 'db': DbCommands, + 'host': HostCommands, + 'logs': GetLogCommands, + 'service': ServiceCommands, + 'shell': ShellCommands, + 'version': VersionCommands, + 'volume': VolumeCommands, +} def methods_of(obj): """Get all callable methods of an object that don't start with underscore - returns a list of tuples of the form (method_name, method)""" + returns a list of tuples of the form (method_name, method) + """ result = [] for i in dir(obj): if callable(getattr(obj, i)) and not i.startswith('_'): @@ -669,89 +454,91 @@ def methods_of(obj): return result +def add_command_parsers(subparsers): + for category in CATEGORIES: + command_object = CATEGORIES[category]() + + parser = subparsers.add_parser(category) + parser.set_defaults(command_object=command_object) + + category_subparsers = parser.add_subparsers(dest='action') + + for (action, action_fn) in methods_of(command_object): + parser = category_subparsers.add_parser(action) + + action_kwargs = [] + for args, kwargs in getattr(action_fn, 'args', []): + parser.add_argument(*args, **kwargs) + + parser.set_defaults(action_fn=action_fn) + parser.set_defaults(action_kwargs=action_kwargs) + + +category_opt = cfg.SubCommandOpt('category', + title='Command categories', + handler=add_command_parsers) + + +def get_arg_string(args): + arg = None + if args[0] == '-': + # (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars + # is optional args. Notice that cfg module takes care of + # actual ArgParser so prefix_chars is always '-'. + if args[1] == '-': + # This is long optional arg + arg = args[2:] + else: + arg = args[3:] + else: + arg = args + + return arg + + +def fetch_func_args(func): + fn_args = [] + for args, kwargs in getattr(func, 'args', []): + arg = get_arg_string(args[0]) + fn_args.append(getattr(CONF.category, arg)) + + return fn_args + + def main(): """Parse options and call the appropriate class/method.""" + CONF.register_cli_opt(category_opt) + script_name = sys.argv[0] + if len(sys.argv) < 2: + print(_("\nOpenStack Cinder version: %(version)s\n") % + {'version': version.version_string()}) + print(script_name + " category action []") + print(_("Available categories:")) + for category in CATEGORIES: + print(_("\t%s") % category) + sys.exit(2) try: - argv = flags.parse_args(sys.argv) + CONF(sys.argv[1:], project='cinder', + version=version.version_string()) logging.setup("cinder") except cfg.ConfigFilesNotFoundError: - cfgfile = FLAGS.config_file[-1] if FLAGS.config_file else None + cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) - print _("Could not read %s. Re-running with sudo") % cfgfile + print(_("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) except Exception: - print _('sudo failed, continuing as if nothing happened') + print(_('sudo failed, continuing as if nothing happened')) - print _('Please re-run cinder-manage as root.') + print(_('Please re-run cinder-manage as root.')) sys.exit(2) - script_name = argv.pop(0) - if len(argv) < 1: - print _("\nOpenStack Cinder version: %(version)s (%(vcs)s)\n") % \ - {'version': version.version_string(), - 'vcs': version.version_string_with_vcs()} - print script_name + " category action []" - print _("Available categories:") - for k, _v in CATEGORIES: - print "\t%s" % k - sys.exit(2) - category = argv.pop(0) - matches = lazy_match(category, CATEGORIES) - # instantiate the command group object - category, fn = matches[0] - command_object = fn() - actions = methods_of(command_object) - if len(argv) < 1: - if hasattr(command_object, '__call__'): - action = '' - fn = command_object.__call__ - else: - print script_name + " category action []" - print _("Available actions for %s category:") % category - for k, _v in actions: - print "\t%s" % k - sys.exit(2) - else: - action = argv.pop(0) - matches = lazy_match(action, actions) - action, fn = matches[0] - - # For not decorated methods - options = getattr(fn, 'options', []) - - usage = "%%prog %s %s [options]" % (category, action) - parser = optparse.OptionParser(usage=usage) - for ar, kw in options: - parser.add_option(*ar, **kw) - (opts, fn_args) = parser.parse_args(argv) - fn_kwargs = vars(opts) - - for k, v in fn_kwargs.items(): - if v is None: - del fn_kwargs[k] - elif isinstance(v, basestring): - fn_kwargs[k] = v.decode('utf-8') - else: - fn_kwargs[k] = v - - fn_args = [arg.decode('utf-8') for arg in fn_args] + fn = CONF.category.action_fn - # call the action with the remaining arguments - try: - fn(*fn_args, **fn_kwargs) - rpc.cleanup() - sys.exit(0) - except TypeError: - print _("Possible wrong number of arguments supplied") - print fn.__doc__ - parser.print_help() - raise - except Exception: - print _("Command failed, please check log for more info") - raise + fn_args = fetch_func_args(fn) + fn(*fn_args) if __name__ == '__main__': main() diff --git a/bin/cinder-rootwrap b/bin/cinder-rootwrap deleted file mode 100755 index 3ea6cc3704..0000000000 --- a/bin/cinder-rootwrap +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Openstack, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Root wrapper for Cinder - - Filters which commands cinder is allowed to run as another user. - - To use this, you should set the following in cinder.conf: - rootwrap_config=/etc/cinder/rootwrap.conf - - You also need to let the cinder user run cinder-rootwrap as root in sudoers: - cinder ALL = (root) NOPASSWD: /usr/bin/cinder-rootwrap - /etc/cinder/rootwrap.conf * - - To make allowed commands node-specific, your packaging should only - install volume.filters on volume nodes (i.e. cinder-api nodes should not - have any of those files installed). -""" - -import ConfigParser -import os -import signal -import subprocess -import sys - - -RC_UNAUTHORIZED = 99 -RC_NOCOMMAND = 98 -RC_BADCONFIG = 97 - - -def _subprocess_setup(): - # Python installs a SIGPIPE handler by default. This is usually not what - # non-Python subprocesses expect. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - -if __name__ == '__main__': - # Split arguments, require at least a command - execname = sys.argv.pop(0) - if len(sys.argv) < 2: - print "%s: %s" % (execname, "No command specified") - sys.exit(RC_NOCOMMAND) - - configfile = sys.argv.pop(0) - userargs = sys.argv[:] - - # Load configuration - config = ConfigParser.RawConfigParser() - config.read(configfile) - try: - filters_path = config.get("DEFAULT", "filters_path").split(",") - except ConfigParser.Error: - print "%s: Incorrect configuration file: %s" % (execname, configfile) - sys.exit(RC_BADCONFIG) - - # Add ../ to sys.path to allow running from branch - possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname), - os.pardir, os.pardir)) - if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): - sys.path.insert(0, possible_topdir) - - from cinder.rootwrap import wrapper - - # Execute command if it matches any of the loaded filters - filters = wrapper.load_filters(filters_path) - filtermatch = wrapper.match_filter(filters, userargs) - if filtermatch: - obj = subprocess.Popen(filtermatch.get_command(userargs), - stdin=sys.stdin, - stdout=sys.stdout, - stderr=sys.stderr, - preexec_fn=_subprocess_setup, - env=filtermatch.get_environment(userargs)) - obj.wait() - sys.exit(obj.returncode) - - print "Unauthorized command: %s" % ' '.join(userargs) - sys.exit(RC_UNAUTHORIZED) diff --git a/bin/cinder-rpc-zmq-receiver b/bin/cinder-rpc-zmq-receiver new file mode 100755 index 0000000000..07db167b0b --- /dev/null +++ b/bin/cinder-rpc-zmq-receiver @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +eventlet.monkey_patch() + +import contextlib +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +from oslo.config import cfg + +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common.rpc import impl_zmq + +CONF = cfg.CONF +CONF.register_opts(rpc.rpc_opts) +CONF.register_opts(impl_zmq.zmq_opts) + + +def main(): + CONF(sys.argv[1:], project='cinder') + logging.setup("cinder") + + with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: + reactor.consume_in_thread() + reactor.wait() + +if __name__ == '__main__': + main() diff --git a/bin/cinder-rtstool b/bin/cinder-rtstool new file mode 100755 index 0000000000..07fe5947d9 --- /dev/null +++ b/bin/cinder-rtstool @@ -0,0 +1,238 @@ +#!/usr/bin/env python +# vim: et tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 - 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gettext +import re +import sys + +import rtslib + +gettext.install('cinder-rtstool', unicode=1) + + +class RtstoolError(Exception): + pass + + +class RtstoolImportError(RtstoolError): + pass + + +def create(backing_device, name, userid, password, initiator_iqns=None): + try: + rtsroot = rtslib.root.RTSRoot() + except rtslib.utils.RTSLibError: + print(_('Ensure that configfs is mounted at /sys/kernel/config.')) + raise + + # Look to see if BlockStorageObject already exists + for x in rtsroot.storage_objects: + if x.dump()['name'] == name: + # Already exists, use this one + return + + so_new = rtslib.BlockStorageObject(name=name, + dev=backing_device) + + target_new = rtslib.Target(rtslib.FabricModule('iscsi'), name, 'create') + + tpg_new = rtslib.TPG(target_new, mode='create') + tpg_new.set_attribute('authentication', '1') + + lun_new = rtslib.LUN(tpg_new, storage_object=so_new) + + initiator_name = None + name_file = '/etc/iscsi/initiatorname.iscsi' + + try: + with open(name_file, 'r') as f: + for line in f: + m = re.match('InitiatorName=(.+)', line) + if m != None: + initiator_name = m.group(1) + break + except IOError: + raise RtstoolError(_('Could not open %s') % name_file) + + if initiator_name == None: + raise RtstoolError(_('Could not read InitiatorName from %s') % + name_file) + + acl_new = rtslib.NodeACL(tpg_new, initiator_name, mode='create') + + acl_new.chap_userid = userid + acl_new.chap_password = password + + rtslib.MappedLUN(acl_new, lun_new.lun, lun_new.lun) + + if initiator_iqns: + initiator_iqns = initiator_iqns.strip(' ') + for i in initiator_iqns.split(','): + acl_new = rtslib.NodeACL(tpg_new, i, mode='create') + acl_new.chap_userid = userid + acl_new.chap_password = password + + rtslib.MappedLUN(acl_new, lun_new.lun, lun_new.lun) + + tpg_new.enable = 1 + + try: + rtslib.NetworkPortal(tpg_new, '0.0.0.0', 3260, mode='any') + except rtslib.utils.RTSLibError: + print(_('Error creating NetworkPortal: ensure port 3260 ' + 'is not in use by another service.')) + raise + + try: + rtslib.NetworkPortal(tpg_new, '::0', 3260, mode='any') + except rtslib.utils.RTSLibError: + # TODO(emh): Binding to IPv6 fails sometimes -- let pass for now. + pass + + +def add_initiator(target_iqn, initiator_iqn, userid, password): + try: + rtsroot = rtslib.root.RTSRoot() + except rtslib.utils.RTSLibError: + print(_('Ensure that configfs is mounted at /sys/kernel/config.')) + raise + + # Look for the target + target = None + for t in rtsroot.targets: + if t.dump()['wwn'] == target_iqn: + target = t + break + if target == None: + raise RtstoolError(_('Could not find target %s') % target_iqn) + + tpg = target.tpgs.next() # get the first one + for acl in tpg.dump()['node_acls']: + # See if this ACL configuration already exists + if acl['node_wwn'] == initiator_iqn: + # No further action required + return + + acl_new = rtslib.NodeACL(tpg, initiator_iqn, mode='create') + acl_new.chap_userid = userid + acl_new.chap_password = password + + rtslib.MappedLUN(acl_new, 0, tpg_lun=0) + + +def get_targets(): + rtsroot = rtslib.root.RTSRoot() + for x in rtsroot.targets: + print(x.dump()['wwn']) + + +def delete(iqn): + rtsroot = rtslib.root.RTSRoot() + for x in rtsroot.targets: + if x.dump()['wwn'] == iqn: + x.delete() + break + + for x in rtsroot.storage_objects: + if x.dump()['name'] == iqn: + x.delete() + break + + +def verify_rtslib(): + for member in ['BlockStorageObject', 'FabricModule', 'LUN', + 'MappedLUN', 'NetworkPortal', 'NodeACL', 'root', + 'Target', 'TPG']: + if not hasattr(rtslib, member): + raise RtstoolImportError(_("rtslib is missing member %s: " + "You may need a newer python-rtslib.") % + member) + + +def usage(): + print("Usage:") + print(sys.argv[0] + + " create [device] [name] [userid] [password]" + + " ") + print(sys.argv[0] + + " add-initiator [target_iqn] [userid] [password] [initiator_iqn]") + print(sys.argv[0] + " get-targets") + print(sys.argv[0] + " delete [iqn]") + print(sys.argv[0] + " verify") + sys.exit(1) + + +def main(argv=None): + if argv is None: + argv = sys.argv + + if len(argv) < 2: + usage() + + if argv[1] == 'create': + if len(argv) < 6: + usage() + + if len(argv) > 7: + usage() + + backing_device = argv[2] + name = argv[3] + userid = argv[4] + password = argv[5] + initiator_iqns = None + + if len(argv) > 6: + initiator_iqns = argv[6] + + create(backing_device, name, userid, password, initiator_iqns) + + elif argv[1] == 'add-initiator': + if len(argv) < 6: + usage() + + target_iqn = argv[2] + userid = argv[3] + password = argv[4] + initiator_iqn = argv[5] + + add_initiator(target_iqn, initiator_iqn, userid, password) + + elif argv[1] == 'get-targets': + get_targets() + + elif argv[1] == 'delete': + if len(argv) < 3: + usage() + + iqn = argv[2] + delete(iqn) + + elif argv[1] == 'verify': + # This is used to verify that this script can be called by cinder, + # and that rtslib is new enough to work. + verify_rtslib() + return 0 + + else: + usage() + + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/bin/cinder-scheduler b/bin/cinder-scheduler index 33015b3cad..e8ee1175ba 100755 --- a/bin/cinder-scheduler +++ b/bin/cinder-scheduler @@ -1,6 +1,4 @@ #!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -19,13 +17,16 @@ """Starter script for Cinder Scheduler.""" + import eventlet + eventlet.monkey_patch() -import gettext import os import sys +from oslo.config import cfg + # If ../cinder/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), @@ -34,15 +35,23 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('cinder', unicode=1) +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') -from cinder import flags +# Need to register global_opts +from cinder.common import config # noqa from cinder.openstack.common import log as logging from cinder import service from cinder import utils +from cinder import version + + +CONF = cfg.CONF + if __name__ == '__main__': - flags.parse_args(sys.argv) + CONF(sys.argv[1:], project='cinder', + version=version.version_string()) logging.setup("cinder") utils.monkey_patch() server = service.Service.create(binary='cinder-scheduler') diff --git a/bin/cinder-volume b/bin/cinder-volume index 47db9518ab..0a04626c3b 100755 --- a/bin/cinder-volume +++ b/bin/cinder-volume @@ -1,6 +1,4 @@ #!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -20,11 +18,19 @@ """Starter script for Cinder Volume.""" import eventlet -eventlet.monkey_patch() - import os + +if os.name == 'nt': + # eventlet monkey patching the os module causes subprocess.Popen to fail + # on Windows when using pipes due to missing non-blocking IO support. + eventlet.monkey_patch(os=False) +else: + eventlet.monkey_patch() + import sys +from oslo.config import cfg + # If ../cinder/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), @@ -33,16 +39,36 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): sys.path.insert(0, possible_topdir) +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') -from cinder import flags +# Need to register global_opts +from cinder.common import config # noqa from cinder.openstack.common import log as logging from cinder import service from cinder import utils +from cinder import version + + +CONF = cfg.CONF + if __name__ == '__main__': - flags.parse_args(sys.argv) + CONF(sys.argv[1:], project='cinder', + version=version.version_string()) logging.setup("cinder") utils.monkey_patch() - server = service.Service.create(binary='cinder-volume') - service.serve(server) - service.wait() + # Note(zhiteng): Since Windows (os='nt') has already ignored monkey + # patching 'os' module, there is no need to treat it differently + # when creating launcher. + launcher = service.process_launcher() + if CONF.enabled_backends: + for backend in CONF.enabled_backends: + host = "%s@%s" % (CONF.host, backend) + server = service.Service.create(host=host, + service_name=backend) + launcher.launch_service(server) + else: + server = service.Service.create(binary='cinder-volume') + launcher.launch_service(server) + launcher.wait() diff --git a/bin/cinder-volume-usage-audit b/bin/cinder-volume-usage-audit index 1e840b5fc9..53ea460ae2 100755 --- a/bin/cinder-volume-usage-audit +++ b/bin/cinder-volume-usage-audit @@ -1,7 +1,5 @@ #!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Openstack, LLC. +# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -34,11 +32,15 @@ Jan 1 through Dec 31 of the previous year. """ -import gettext +from __future__ import print_function + +from datetime import datetime import os import sys import traceback +from oslo.config import cfg + # If ../cinder/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), @@ -47,33 +49,211 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) -gettext.install('cinder', unicode=1) +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') + from cinder import context from cinder import db -from cinder import flags from cinder.openstack.common import log as logging -from cinder.openstack.common import rpc from cinder import utils +from cinder import version import cinder.volume.utils -FLAGS = flags.FLAGS +CONF = cfg.CONF +script_opts = [ + cfg.StrOpt('start_time', + default=None, + help="If this option is specified then the start time " + "specified is used instead of the start time of the " + "last completed audit period."), + cfg.StrOpt('end_time', + default=None, + help="If this option is specified then the end time " + "specified is used instead of the end time of the " + "last completed audit period."), + cfg.BoolOpt('send_actions', + default=False, + help="Send the volume and snapshot create and delete " + "notifications generated in the specified period."), +] +CONF.register_cli_opts(script_opts) + if __name__ == '__main__': admin_context = context.get_admin_context() - flags.parse_args(sys.argv) + CONF(sys.argv[1:], project='cinder', + version=version.version_string()) logging.setup("cinder") + LOG = logging.getLogger("cinder") begin, end = utils.last_completed_audit_period() - print "Starting volume usage audit" - print "Creating usages for %s until %s" % (str(begin), str(end)) + if CONF.start_time: + begin = datetime.strptime(CONF.start_time, "%Y-%m-%d %H:%M:%S") + if CONF.end_time: + end = datetime.strptime(CONF.end_time, "%Y-%m-%d %H:%M:%S") + if not end > begin: + msg = _("The end time (%(end)s) must be after the start " + "time (%(start)s).") % {'start': begin, + 'end': end} + print(msg) + LOG.error(msg) + sys.exit(-1) + print(_("Starting volume usage audit")) + msg = _("Creating usages for %(begin_period)s until %(end_period)s") + print(msg % {"begin_period": str(begin), "end_period": str(end)}) + + extra_info = { + 'audit_period_beginning': str(begin), + 'audit_period_ending': str(end), + } + volumes = db.volume_get_active_by_window(admin_context, begin, end) - print "Found %d volumes" % len(volumes) + print(_("Found %d volumes") % len(volumes)) for volume_ref in volumes: try: - cinder.volume.utils.notify_usage_exists( - admin_context, volume_ref) - except Exception, e: - print traceback.format_exc(e) - print "Volume usage audit completed" + LOG.debug(_("Send exists notification for " + "<%(extra_info)s>") % + {'volume_id': volume_ref.id, + 'project_id': volume_ref.project_id, + 'extra_info': extra_info}) + cinder.volume.utils.notify_about_volume_usage( + admin_context, + volume_ref, + 'exists', extra_usage_info=extra_info) + except Exception as e: + LOG.error(_("Failed to send exists notification for volume %s.") % + volume_ref.id) + print(traceback.format_exc(e)) + + if (CONF.send_actions and + volume_ref.created_at > begin and + volume_ref.created_at < end): + try: + local_extra_info = { + 'audit_period_beginning': str(volume_ref.created_at), + 'audit_period_ending': str(volume_ref.created_at), + } + LOG.debug(_("Send create notification for " + " " + " <%(extra_info)s>") % + {'volume_id': volume_ref.id, + 'project_id': volume_ref.project_id, + 'extra_info': local_extra_info}) + cinder.volume.utils.notify_about_volume_usage( + admin_context, + volume_ref, + 'create.start', extra_usage_info=local_extra_info) + cinder.volume.utils.notify_about_volume_usage( + admin_context, + volume_ref, + 'create.end', extra_usage_info=local_extra_info) + except Exception as e: + LOG.error(_("Failed to send create notification for " + "volume %s.") % volume_ref.id) + print(traceback.format_exc(e)) + + if (CONF.send_actions and volume_ref.deleted_at and + volume_ref.deleted_at > begin and + volume_ref.deleted_at < end): + try: + local_extra_info = { + 'audit_period_beginning': str(volume_ref.deleted_at), + 'audit_period_ending': str(volume_ref.deleted_at), + } + LOG.debug(_("Send delete notification for " + " " + " <%(extra_info)s>") % + {'volume_id': volume_ref.id, + 'project_id': volume_ref.project_id, + 'extra_info': local_extra_info}) + cinder.volume.utils.notify_about_volume_usage( + admin_context, + volume_ref, + 'delete.start', extra_usage_info=local_extra_info) + cinder.volume.utils.notify_about_volume_usage( + admin_context, + volume_ref, + 'delete.end', extra_usage_info=local_extra_info) + except Exception as e: + LOG.error(_("Failed to send delete notification for volume " + "%s.") % volume_ref.id) + print(traceback.format_exc(e)) + + snapshots = db.snapshot_get_active_by_window(admin_context, + begin, + end) + print(_("Found %d snapshots") % len(snapshots)) + for snapshot_ref in snapshots: + try: + LOG.debug(_("Send notification for " + " <%(extra_info)s>") % + {'snapshot_id': snapshot_ref.id, + 'project_id': snapshot_ref.project_id, + 'extra_info': extra_info}) + cinder.volume.utils.notify_about_snapshot_usage(admin_context, + snapshot_ref, + 'exists', + extra_info) + except Exception as e: + LOG.error(_("Failed to send exists notification for snapshot %s.") + % snapshot_ref.id) + print(traceback.format_exc(e)) + + if (CONF.send_actions and + snapshot_ref.created_at > begin and + snapshot_ref.created_at < end): + try: + local_extra_info = { + 'audit_period_beginning': str(snapshot_ref.created_at), + 'audit_period_ending': str(snapshot_ref.created_at), + } + LOG.debug(_("Send create notification for " + " " + " <%(extra_info)s>") % + {'snapshot_id': snapshot_ref.id, + 'project_id': snapshot_ref.project_id, + 'extra_info': local_extra_info}) + cinder.volume.utils.notify_about_snapshot_usage( + admin_context, + snapshot_ref, + 'create.start', extra_usage_info=local_extra_info) + cinder.volume.utils.notify_about_snapshot_usage( + admin_context, + snapshot_ref, + 'create.end', extra_usage_info=local_extra_info) + except Exception as e: + LOG.error(_("Failed to send create notification for snapshot " + "%s.") % snapshot_ref.id) + print(traceback.format_exc(e)) + + if (CONF.send_actions and snapshot_ref.deleted_at and + snapshot_ref.deleted_at > begin and + snapshot_ref.deleted_at < end): + try: + local_extra_info = { + 'audit_period_beginning': str(snapshot_ref.deleted_at), + 'audit_period_ending': str(snapshot_ref.deleted_at), + } + LOG.debug(_("Send delete notification for " + " " + " <%(extra_info)s>") % + {'snapshot_id': snapshot_ref.id, + 'project_id': snapshot_ref.project_id, + 'extra_info': local_extra_info}) + cinder.volume.utils.notify_about_snapshot_usage( + admin_context, + snapshot_ref, + 'delete.start', extra_usage_info=local_extra_info) + cinder.volume.utils.notify_about_snapshot_usage( + admin_context, + snapshot_ref, + 'delete.end', extra_usage_info=local_extra_info) + except Exception as e: + LOG.error(_("Failed to send delete notification for snapshot " + "%s.") % snapshot_ref.id) + print(traceback.format_exc(e)) + + print(_("Volume usage audit completed")) diff --git a/cinder/__init__.py b/cinder/__init__.py index f8db8e875c..2b43c2fafd 100644 --- a/cinder/__init__.py +++ b/cinder/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -30,8 +28,3 @@ .. moduleauthor:: Manish Singh .. moduleauthor:: Andy Smith """ - -import gettext - - -gettext.install('cinder', unicode=1) diff --git a/cinder/api/__init__.py b/cinder/api/__init__.py index 747015af53..a3a0371ebc 100644 --- a/cinder/api/__init__.py +++ b/cinder/api/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -15,3 +13,18 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + + +from oslo.config import cfg +import paste.urlmap + + +CONF = cfg.CONF + + +def root_app_factory(loader, global_conf, **local_conf): + if not CONF.enable_v1_api: + del local_conf['/v1'] + if not CONF.enable_v2_api: + del local_conf['/v2'] + return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/cinder/api/auth.py b/cinder/api/auth.py index 9930b7352f..4b5e3379d1 100644 --- a/cinder/api/auth.py +++ b/cinder/api/auth.py @@ -1,6 +1,6 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 OpenStack, LLC +# Copyright (c) 2013 OpenStack Foundation +# +# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,91 +13,24 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Common Auth Middleware. - -""" - -import webob.dec -import webob.exc -from cinder import context -from cinder import flags +from cinder.api.middleware import auth from cinder.openstack.common import log as logging -from cinder.openstack.common import cfg -from cinder import wsgi -use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for', - default=False, - help='Treat X-Forwarded-For as the canonical remote address. ' - 'Only enable this if you have a sanitizing proxy.') - -FLAGS = flags.FLAGS -FLAGS.register_opt(use_forwarded_for_opt) LOG = logging.getLogger(__name__) -def pipeline_factory(loader, global_conf, **local_conf): - """A paste pipeline replica that keys off of auth_strategy.""" - pipeline = local_conf[FLAGS.auth_strategy] - if not FLAGS.api_rate_limit: - limit_name = FLAGS.auth_strategy + '_nolimit' - pipeline = local_conf.get(limit_name, pipeline) - pipeline = pipeline.split() - filters = [loader.get_filter(n) for n in pipeline[:-1]] - app = loader.get_app(pipeline[-1]) - filters.reverse() - for filter in filters: - app = filter(app) - return app - - -class InjectContext(wsgi.Middleware): - """Add a 'cinder.context' to WSGI environ.""" +class CinderKeystoneContext(auth.CinderKeystoneContext): + def __init__(self, application): + LOG.warn(_('cinder.api.auth:CinderKeystoneContext is deprecated. ' + 'Please use ' + 'cinder.api.middleware.auth:CinderKeystoneContext ' + 'instead.')) + super(CinderKeystoneContext, self).__init__(application) - def __init__(self, context, *args, **kwargs): - self.context = context - super(InjectContext, self).__init__(*args, **kwargs) - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - req.environ['cinder.context'] = self.context - return self.application - - -class CinderKeystoneContext(wsgi.Middleware): - """Make a request context from keystone headers""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - user_id = req.headers.get('X_USER') - user_id = req.headers.get('X_USER_ID', user_id) - if user_id is None: - LOG.debug("Neither X_USER_ID nor X_USER found in request") - return webob.exc.HTTPUnauthorized() - # get the roles - roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] - if 'X_TENANT_ID' in req.headers: - # This is the new header since Keystone went to ID/Name - project_id = req.headers['X_TENANT_ID'] - else: - # This is for legacy compatibility - project_id = req.headers['X_TENANT'] - - # Get the auth token - auth_token = req.headers.get('X_AUTH_TOKEN', - req.headers.get('X_STORAGE_TOKEN')) - - # Build a context, including the auth_token... - remote_address = req.remote_addr - if FLAGS.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', remote_address) - ctx = context.RequestContext(user_id, - project_id, - roles=roles, - auth_token=auth_token, - remote_address=remote_address) - - req.environ['cinder.context'] = ctx - return self.application +def pipeline_factory(loader, global_conf, **local_conf): + LOG.warn(_('cinder.api.auth:pipeline_factory is deprecated. Please use ' + 'cinder.api.middleware.auth:pipeline_factory instead.')) + auth.pipeline_factory(loader, global_conf, **local_conf) diff --git a/cinder/api/openstack/common.py b/cinder/api/common.py similarity index 82% rename from cinder/api/openstack/common.py rename to cinder/api/common.py index 255a0a743a..30a32eb7df 100644 --- a/cinder/api/openstack/common.py +++ b/cinder/api/common.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. +# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,25 +13,57 @@ # License for the specific language governing permissions and limitations # under the License. + import os import re import urlparse +from oslo.config import cfg import webob -from cinder import flags from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil +from cinder.api import xmlutil from cinder.openstack.common import log as logging +from cinder import utils + + +api_common_opts = [ + cfg.IntOpt('osapi_max_limit', + default=1000, + help='the maximum number of items returned in a single ' + 'response from a collection resource'), + cfg.StrOpt('osapi_volume_base_URL', + default=None, + help='Base URL that will be presented to users in links ' + 'to the OpenStack Volume API', + deprecated_name='osapi_compute_link_prefix'), +] +CONF = cfg.CONF +CONF.register_opts(api_common_opts) LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS XML_NS_V1 = 'http://docs.openstack.org/volume/api/v1' +# Regex that matches alphanumeric characters, periods, hypens, +# colons and underscores: +# ^ assert position at start of the string +# [\w\.\-\:\_] match expression +# $ assert position at end of the string +VALID_KEY_NAME_REGEX = re.compile(r"^[\w\.\-\:\_]+$", re.UNICODE) + + +def validate_key_names(key_names_list): + """Validate each item of the list to match key name regex.""" + for key_name in key_names_list: + if not VALID_KEY_NAME_REGEX.match(key_name): + return False + return True + + def get_pagination_params(request): """Return marker, limit tuple from request. @@ -55,7 +85,7 @@ def get_pagination_params(request): def _get_limit_param(request): - """Extract integer limit from request or fail""" + """Extract integer limit from request or fail.""" try: limit = int(request.GET['limit']) except ValueError: @@ -68,11 +98,11 @@ def _get_limit_param(request): def _get_marker_param(request): - """Extract marker id from request or fail""" + """Extract marker id from request or fail.""" return request.GET['marker'] -def limited(items, request, max_limit=FLAGS.osapi_max_limit): +def limited(items, request, max_limit=CONF.osapi_max_limit): """Return a slice of items according to requested offset and limit. :param items: A sliceable entity @@ -109,7 +139,7 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit): return items[offset:range_end] -def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): +def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit): """Return a slice of items according to the requested marker and limit.""" params = get_pagination_params(request) @@ -181,21 +211,17 @@ class ViewBuilder(object): _collection_name = None def _get_links(self, request, identifier): - return [{ - "rel": "self", - "href": self._get_href_link(request, identifier), - }, - { - "rel": "bookmark", - "href": self._get_bookmark_link(request, identifier), - }] + return [{"rel": "self", + "href": self._get_href_link(request, identifier), }, + {"rel": "bookmark", + "href": self._get_bookmark_link(request, identifier), }] def _get_next_link(self, request, identifier): """Return href string with proper limit and marker params.""" params = request.params.copy() params["marker"] = identifier prefix = self._update_link_prefix(request.application_url, - FLAGS.osapi_compute_link_prefix) + CONF.osapi_volume_base_URL) url = os.path.join(prefix, request.environ["cinder.context"].project_id, self._collection_name) @@ -204,7 +230,7 @@ def _get_next_link(self, request, identifier): def _get_href_link(self, request, identifier): """Return an href string pointing to this object.""" prefix = self._update_link_prefix(request.application_url, - FLAGS.osapi_compute_link_prefix) + CONF.osapi_volume_base_URL) return os.path.join(prefix, request.environ["cinder.context"].project_id, self._collection_name, @@ -214,7 +240,7 @@ def _get_bookmark_link(self, request, identifier): """Create a URL that refers to a specific resource.""" base_url = remove_version_from_href(request.application_url) base_url = self._update_link_prefix(base_url, - FLAGS.osapi_compute_link_prefix) + CONF.osapi_volume_base_URL) return os.path.join(base_url, request.environ["cinder.context"].project_id, self._collection_name, @@ -247,7 +273,7 @@ def _update_link_prefix(self, orig_url, prefix): class MetadataDeserializer(wsgi.MetadataXMLDeserializer): def deserialize(self, text): - dom = minidom.parseString(text) + dom = utils.safe_minidom_parse_string(text) metadata_node = self.find_first_child_named(dom, "metadata") metadata = self.extract_metadata(metadata_node) return {'body': {'metadata': metadata}} @@ -255,7 +281,7 @@ def deserialize(self, text): class MetaItemDeserializer(wsgi.MetadataXMLDeserializer): def deserialize(self, text): - dom = minidom.parseString(text) + dom = utils.safe_minidom_parse_string(text) metadata_item = self.extract_metadata(dom) return {'body': {'meta': metadata_item}} @@ -263,7 +289,7 @@ def deserialize(self, text): class MetadataXMLDeserializer(wsgi.XMLDeserializer): def extract_metadata(self, metadata_node): - """Marshal the metadata attribute of a parsed request""" + """Marshal the metadata attribute of a parsed request.""" if metadata_node is None: return {} metadata = {} @@ -273,7 +299,7 @@ def extract_metadata(self, metadata_node): return metadata def _extract_metadata_container(self, datastring): - dom = minidom.parseString(datastring) + dom = utils.safe_minidom_parse_string(datastring) metadata_node = self.find_first_child_named(dom, "metadata") metadata = self.extract_metadata(metadata_node) return {'body': {'metadata': metadata}} @@ -285,7 +311,7 @@ def update_all(self, datastring): return self._extract_metadata_container(datastring) def update(self, datastring): - dom = minidom.parseString(datastring) + dom = utils.safe_minidom_parse_string(datastring) metadata_item = self.extract_metadata(dom) return {'body': {'meta': metadata_item}} diff --git a/cinder/api/openstack/volume/contrib/__init__.py b/cinder/api/contrib/__init__.py similarity index 84% rename from cinder/api/openstack/volume/contrib/__init__.py rename to cinder/api/contrib/__init__.py index 33b312f97e..48804e81e6 100644 --- a/cinder/api/openstack/volume/contrib/__init__.py +++ b/cinder/api/contrib/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # @@ -21,12 +19,13 @@ """ -from cinder import flags +from oslo.config import cfg + +from cinder.api import extensions from cinder.openstack.common import log as logging -from cinder.api.openstack import extensions -FLAGS = flags.FLAGS +CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -36,4 +35,4 @@ def standard_extensions(ext_mgr): def select_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, - FLAGS.osapi_volume_ext_list) + CONF.osapi_volume_ext_list) diff --git a/cinder/api/openstack/volume/contrib/admin_actions.py b/cinder/api/contrib/admin_actions.py similarity index 64% rename from cinder/api/openstack/volume/contrib/admin_actions.py rename to cinder/api/contrib/admin_actions.py index 8aa6863c03..588bc1a52e 100644 --- a/cinder/api/openstack/volume/contrib/admin_actions.py +++ b/cinder/api/contrib/admin_actions.py @@ -1,4 +1,4 @@ -# Copyright 2012 OpenStack, LLC. +# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -15,12 +15,13 @@ import webob from webob import exc -from cinder.api.openstack import extensions +from cinder.api import extensions from cinder.api.openstack import wsgi from cinder import db from cinder import exception -from cinder import volume from cinder.openstack.common import log as logging +from cinder.openstack.common import strutils +from cinder import volume LOG = logging.getLogger(__name__) @@ -82,7 +83,7 @@ def _reset_status(self, req, id, body): 'update': update}) try: self._update(context, id, update) - except exception.NotFound, e: + except exception.NotFound as e: raise exc.HTTPNotFound(e) return webob.Response(status_int=202) @@ -123,6 +124,69 @@ def validate_update(self, body): update['attach_status'] = body['attach_status'] return update + @wsgi.action('os-force_detach') + def _force_detach(self, req, id, body): + """Roll back a bad detach after the volume been disconnected.""" + context = req.environ['cinder.context'] + self.authorize(context, 'force_detach') + try: + volume = self._get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + self.volume_api.terminate_connection(context, volume, + {}, force=True) + self.volume_api.detach(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-migrate_volume') + def _migrate_volume(self, req, id, body): + """Migrate a volume to the specified host.""" + context = req.environ['cinder.context'] + self.authorize(context, 'migrate_volume') + try: + volume = self._get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + params = body['os-migrate_volume'] + try: + host = params['host'] + except KeyError: + raise exc.HTTPBadRequest("Must specify 'host'") + force_host_copy = params.get('force_host_copy', False) + if isinstance(force_host_copy, basestring): + try: + force_host_copy = strutils.bool_from_string(force_host_copy, + strict=True) + except ValueError: + raise exc.HTTPBadRequest("Bad value for 'force_host_copy'") + elif not isinstance(force_host_copy, bool): + raise exc.HTTPBadRequest("'force_host_copy' not string or bool") + self.volume_api.migrate_volume(context, volume, host, force_host_copy) + return webob.Response(status_int=202) + + @wsgi.action('os-migrate_volume_completion') + def _migrate_volume_completion(self, req, id, body): + """Complete an in-progress migration.""" + context = req.environ['cinder.context'] + self.authorize(context, 'migrate_volume_completion') + try: + volume = self._get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + params = body['os-migrate_volume_completion'] + try: + new_volume_id = params['new_volume'] + except KeyError: + raise exc.HTTPBadRequest("Must specify 'new_volume'") + try: + new_volume = self._get(context, new_volume_id) + except exception.NotFound: + raise exc.HTTPNotFound() + error = params.get('error', False) + ret = self.volume_api.migrate_volume_completion(context, volume, + new_volume, error) + return {'save_volume_id': ret} + class SnapshotAdminController(AdminController): """AdminController for Snapshots.""" diff --git a/cinder/api/contrib/availability_zones.py b/cinder/api/contrib/availability_zones.py new file mode 100644 index 0000000000..2f02811046 --- /dev/null +++ b/cinder/api/contrib/availability_zones.py @@ -0,0 +1,70 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import extensions +from cinder.api.openstack import wsgi +import cinder.api.views.availability_zones +from cinder.api import xmlutil +import cinder.exception +import cinder.volume.api + + +def make_availability_zone(elem): + elem.set('name', 'zoneName') + zoneStateElem = xmlutil.SubTemplateElement(elem, 'zoneState', + selector='zoneState') + zoneStateElem.set('available') + + +class ListTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('availabilityZones') + elem = xmlutil.SubTemplateElement(root, 'availabilityZone', + selector='availabilityZoneInfo') + make_availability_zone(elem) + alias = Availability_zones.alias + namespace = Availability_zones.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class Controller(wsgi.Controller): + + _view_builder_class = cinder.api.views.availability_zones.ViewBuilder + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(*args, **kwargs) + self.volume_api = cinder.volume.api.API() + + @wsgi.serializers(xml=ListTemplate) + def index(self, req): + """Describe all known availability zones.""" + azs = self.volume_api.list_availability_zones() + return self._view_builder.list(req, azs) + + +class Availability_zones(extensions.ExtensionDescriptor): + """Describe Availability Zones.""" + + name = 'AvailabilityZones' + alias = 'os-availability-zone' + namespace = ('http://docs.openstack.org/volume/ext/' + 'os-availability-zone/api/v1') + updated = '2013-06-27T00:00:00+00:00' + + def get_resources(self): + controller = Controller() + res = extensions.ResourceExtension(Availability_zones.alias, + controller) + return [res] diff --git a/cinder/api/contrib/backups.py b/cinder/api/contrib/backups.py new file mode 100644 index 0000000000..07c7e3a039 --- /dev/null +++ b/cinder/api/contrib/backups.py @@ -0,0 +1,279 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The backups api.""" + + +import webob +from webob import exc + +from cinder.api import common +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.views import backups as backup_views +from cinder.api import xmlutil +from cinder import backup as backupAPI +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import utils + + +LOG = logging.getLogger(__name__) + + +def make_backup(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('container') + elem.set('volume_id') + elem.set('object_count') + elem.set('availability_zone') + elem.set('created_at') + elem.set('name') + elem.set('description') + elem.set('fail_reason') + + +def make_backup_restore(elem): + elem.set('backup_id') + elem.set('volume_id') + + +class BackupTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('backup', selector='backup') + make_backup(root) + alias = Backups.alias + namespace = Backups.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class BackupsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('backups') + elem = xmlutil.SubTemplateElement(root, 'backup', selector='backups') + make_backup(elem) + alias = Backups.alias + namespace = Backups.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class BackupRestoreTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('restore', selector='restore') + make_backup_restore(root) + alias = Backups.alias + namespace = Backups.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class CreateDeserializer(wsgi.MetadataXMLDeserializer): + def default(self, string): + dom = utils.safe_minidom_parse_string(string) + backup = self._extract_backup(dom) + return {'body': {'backup': backup}} + + def _extract_backup(self, node): + backup = {} + backup_node = self.find_first_child_named(node, 'backup') + + attributes = ['container', 'display_name', + 'display_description', 'volume_id'] + + for attr in attributes: + if backup_node.getAttribute(attr): + backup[attr] = backup_node.getAttribute(attr) + return backup + + +class RestoreDeserializer(wsgi.MetadataXMLDeserializer): + def default(self, string): + dom = utils.safe_minidom_parse_string(string) + restore = self._extract_restore(dom) + return {'body': {'restore': restore}} + + def _extract_restore(self, node): + restore = {} + restore_node = self.find_first_child_named(node, 'restore') + if restore_node.getAttribute('volume_id'): + restore['volume_id'] = restore_node.getAttribute('volume_id') + return restore + + +class BackupsController(wsgi.Controller): + """The Backups API controller for the OpenStack API.""" + + _view_builder_class = backup_views.ViewBuilder + + def __init__(self): + self.backup_api = backupAPI.API() + super(BackupsController, self).__init__() + + @wsgi.serializers(xml=BackupTemplate) + def show(self, req, id): + """Return data about the given backup.""" + LOG.debug(_('show called for member %s'), id) + context = req.environ['cinder.context'] + + try: + backup = self.backup_api.get(context, backup_id=id) + except exception.BackupNotFound as error: + raise exc.HTTPNotFound(explanation=error.msg) + + return self._view_builder.detail(req, backup) + + def delete(self, req, id): + """Delete a backup.""" + LOG.debug(_('delete called for member %s'), id) + context = req.environ['cinder.context'] + + LOG.audit(_('Delete backup with id: %s'), id, context=context) + + try: + self.backup_api.delete(context, id) + except exception.BackupNotFound as error: + raise exc.HTTPNotFound(explanation=error.msg) + except exception.InvalidBackup as error: + raise exc.HTTPBadRequest(explanation=error.msg) + + return webob.Response(status_int=202) + + @wsgi.serializers(xml=BackupsTemplate) + def index(self, req): + """Returns a summary list of backups.""" + return self._get_backups(req, is_detail=False) + + @wsgi.serializers(xml=BackupsTemplate) + def detail(self, req): + """Returns a detailed list of backups.""" + return self._get_backups(req, is_detail=True) + + def _get_backups(self, req, is_detail): + """Returns a list of backups, transformed through view builder.""" + context = req.environ['cinder.context'] + backups = self.backup_api.get_all(context) + limited_list = common.limited(backups, req) + + if is_detail: + backups = self._view_builder.detail_list(req, limited_list) + else: + backups = self._view_builder.summary_list(req, limited_list) + return backups + + # TODO(frankm): Add some checks here including + # - whether requested volume_id exists so we can return some errors + # immediately + # - maybe also do validation of swift container name + @wsgi.response(202) + @wsgi.serializers(xml=BackupTemplate) + @wsgi.deserializers(xml=CreateDeserializer) + def create(self, req, body): + """Create a new backup.""" + LOG.debug(_('Creating new backup %s'), body) + if not self.is_valid_body(body, 'backup'): + raise exc.HTTPBadRequest() + + context = req.environ['cinder.context'] + + try: + backup = body['backup'] + volume_id = backup['volume_id'] + except KeyError: + msg = _("Incorrect request body format") + raise exc.HTTPBadRequest(explanation=msg) + container = backup.get('container', None) + name = backup.get('name', None) + description = backup.get('description', None) + + LOG.audit(_("Creating backup of volume %(volume_id)s in container" + " %(container)s"), + {'volume_id': volume_id, 'container': container}, + context=context) + + try: + new_backup = self.backup_api.create(context, name, description, + volume_id, container) + except exception.InvalidVolume as error: + raise exc.HTTPBadRequest(explanation=error.msg) + except exception.VolumeNotFound as error: + raise exc.HTTPNotFound(explanation=error.msg) + except exception.ServiceNotFound as error: + raise exc.HTTPInternalServerError(explanation=error.msg) + + retval = self._view_builder.summary(req, dict(new_backup.iteritems())) + return retval + + @wsgi.response(202) + @wsgi.serializers(xml=BackupRestoreTemplate) + @wsgi.deserializers(xml=RestoreDeserializer) + def restore(self, req, id, body): + """Restore an existing backup to a volume.""" + LOG.debug(_('Restoring backup %(backup_id)s (%(body)s)'), + {'backup_id': id, 'body': body}) + if not self.is_valid_body(body, 'restore'): + msg = _("Incorrect request body format") + raise exc.HTTPBadRequest(explanation=msg) + + context = req.environ['cinder.context'] + restore = body['restore'] + volume_id = restore.get('volume_id', None) + + LOG.audit(_("Restoring backup %(backup_id)s to volume %(volume_id)s"), + {'backup_id': id, 'volume_id': volume_id}, + context=context) + + try: + new_restore = self.backup_api.restore(context, + backup_id=id, + volume_id=volume_id) + except exception.InvalidInput as error: + raise exc.HTTPBadRequest(explanation=error.msg) + except exception.InvalidVolume as error: + raise exc.HTTPBadRequest(explanation=error.msg) + except exception.InvalidBackup as error: + raise exc.HTTPBadRequest(explanation=error.msg) + except exception.BackupNotFound as error: + raise exc.HTTPNotFound(explanation=error.msg) + except exception.VolumeNotFound as error: + raise exc.HTTPNotFound(explanation=error.msg) + except exception.VolumeSizeExceedsAvailableQuota as error: + raise exc.HTTPRequestEntityTooLarge( + explanation=error.msg, headers={'Retry-After': 0}) + except exception.VolumeLimitExceeded as error: + raise exc.HTTPRequestEntityTooLarge( + explanation=error.msg, headers={'Retry-After': 0}) + + retval = self._view_builder.restore_summary( + req, dict(new_restore.iteritems())) + return retval + + +class Backups(extensions.ExtensionDescriptor): + """Backups support.""" + + name = 'Backups' + alias = 'backups' + namespace = 'http://docs.openstack.org/volume/ext/backups/api/v1' + updated = '2012-12-12T00:00:00+00:00' + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + Backups.alias, BackupsController(), + collection_actions={'detail': 'GET'}, + member_actions={'restore': 'POST'}) + resources.append(res) + return resources diff --git a/cinder/api/openstack/volume/contrib/extended_snapshot_attributes.py b/cinder/api/contrib/extended_snapshot_attributes.py similarity index 93% rename from cinder/api/openstack/volume/contrib/extended_snapshot_attributes.py rename to cinder/api/contrib/extended_snapshot_attributes.py index 348ca510aa..1c4712b100 100644 --- a/cinder/api/openstack/volume/contrib/extended_snapshot_attributes.py +++ b/cinder/api/contrib/extended_snapshot_attributes.py @@ -1,4 +1,4 @@ -# Copyright 2012 OpenStack, LLC. +# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,27 +14,27 @@ """The Extended Snapshot Attributes API extension.""" + from webob import exc -from cinder.api.openstack import extensions +from cinder.api import extensions from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil -from cinder import volume +from cinder.api import xmlutil from cinder import exception -from cinder import flags from cinder.openstack.common import log as logging +from cinder import volume -FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) -authorize = extensions.soft_extension_authorizer('volume', - 'extended_snapshot_attributes') +authorize = extensions.soft_extension_authorizer( + 'volume', + 'extended_snapshot_attributes') class ExtendedSnapshotAttributesController(wsgi.Controller): def __init__(self, *args, **kwargs): super(ExtendedSnapshotAttributesController, self).__init__(*args, - **kwargs) + **kwargs) self.volume_api = volume.API() def _get_snapshots(self, context): diff --git a/cinder/api/contrib/hosts.py b/cinder/api/contrib/hosts.py new file mode 100644 index 0000000000..93c64b7c30 --- /dev/null +++ b/cinder/api/contrib/hosts.py @@ -0,0 +1,269 @@ +# Copyright (c) 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The hosts admin extension.""" + + +from oslo.config import cfg +import webob.exc +from xml.parsers import expat + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import utils +from cinder.volume import api as volume_api + + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) +authorize = extensions.extension_authorizer('volume', 'hosts') + + +class HostIndexTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('hosts') + elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts') + elem.set('service-status') + elem.set('service') + elem.set('zone') + elem.set('service-state') + elem.set('host_name') + elem.set('last-update') + + return xmlutil.MasterTemplate(root, 1) + + +class HostUpdateTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('host') + root.set('host') + root.set('status') + + return xmlutil.MasterTemplate(root, 1) + + +class HostActionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('host') + root.set('host') + + return xmlutil.MasterTemplate(root, 1) + + +class HostShowTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('host') + elem = xmlutil.make_flat_dict('resource', selector='host', + subselector='resource') + root.append(elem) + + return xmlutil.MasterTemplate(root, 1) + + +class HostDeserializer(wsgi.XMLDeserializer): + def default(self, string): + try: + node = utils.safe_minidom_parse_string(string) + except expat.ExpatError: + msg = _("cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) + + updates = {} + for child in node.childNodes[0].childNodes: + updates[child.tagName] = self.extract_text(child) + + return dict(body=updates) + + +def _list_hosts(req, service=None): + """Returns a summary list of hosts.""" + curr_time = timeutils.utcnow() + context = req.environ['cinder.context'] + services = db.service_get_all(context, False) + zone = '' + if 'zone' in req.GET: + zone = req.GET['zone'] + if zone: + services = [s for s in services if s['availability_zone'] == zone] + hosts = [] + for host in services: + delta = curr_time - (host['updated_at'] or host['created_at']) + alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time + status = (alive and "available") or "unavailable" + active = 'enabled' + if host['disabled']: + active = 'disabled' + LOG.debug('status, active and update: %s, %s, %s' % + (status, active, host['updated_at'])) + hosts.append({'host_name': host['host'], + 'service': host['topic'], + 'zone': host['availability_zone'], + 'service-status': status, + 'service-state': active, + 'last-update': host['updated_at']}) + if service: + hosts = [host for host in hosts + if host["service"] == service] + return hosts + + +def check_host(fn): + """Makes sure that the host exists.""" + def wrapped(self, req, id, service=None, *args, **kwargs): + listed_hosts = _list_hosts(req, service) + hosts = [h["host_name"] for h in listed_hosts] + if id in hosts: + return fn(self, req, id, *args, **kwargs) + else: + message = _("Host '%s' could not be found.") % id + raise webob.exc.HTTPNotFound(explanation=message) + return wrapped + + +class HostController(wsgi.Controller): + """The Hosts API controller for the OpenStack API.""" + def __init__(self): + self.api = volume_api.HostAPI() + super(HostController, self).__init__() + + @wsgi.serializers(xml=HostIndexTemplate) + def index(self, req): + authorize(req.environ['cinder.context']) + return {'hosts': _list_hosts(req)} + + @wsgi.serializers(xml=HostUpdateTemplate) + @wsgi.deserializers(xml=HostDeserializer) + @check_host + def update(self, req, id, body): + authorize(req.environ['cinder.context']) + update_values = {} + for raw_key, raw_val in body.iteritems(): + key = raw_key.lower().strip() + val = raw_val.lower().strip() + if key == "status": + if val in ("enable", "disable"): + update_values['status'] = val.startswith("enable") + else: + explanation = _("Invalid status: '%s'") % raw_val + raise webob.exc.HTTPBadRequest(explanation=explanation) + else: + explanation = _("Invalid update setting: '%s'") % raw_key + raise webob.exc.HTTPBadRequest(explanation=explanation) + update_setters = {'status': self._set_enabled_status} + result = {} + for key, value in update_values.iteritems(): + result.update(update_setters[key](req, id, value)) + return result + + def _set_enabled_status(self, req, host, enabled): + """Sets the specified host's ability to accept new volumes.""" + context = req.environ['cinder.context'] + state = "enabled" if enabled else "disabled" + LOG.audit(_("Setting host %(host)s to %(state)s."), + {'host': host, 'state': state}) + result = self.api.set_host_enabled(context, + host=host, + enabled=enabled) + if result not in ("enabled", "disabled"): + # An error message was returned + raise webob.exc.HTTPBadRequest(explanation=result) + return {"host": host, "status": result} + + @wsgi.serializers(xml=HostShowTemplate) + def show(self, req, id): + """Shows the volume usage info given by hosts. + + :param context: security context + :param host: hostname + :returns: expected to use HostShowTemplate. + ex.:: + + {'host': {'resource':D},..} + D: {'host': 'hostname','project': 'admin', + 'volume_count': 1, 'total_volume_gb': 2048} + """ + host = id + context = req.environ['cinder.context'] + if not context.is_admin: + msg = _("Describe-resource is admin only functionality") + raise webob.exc.HTTPForbidden(explanation=msg) + + try: + host_ref = db.service_get_by_host_and_topic(context, + host, + CONF.volume_topic) + except exception.ServiceNotFound: + raise webob.exc.HTTPNotFound(explanation=_("Host not found")) + + # Getting total available/used resource + # TODO(jdg): Add summary info for Snapshots + volume_refs = db.volume_get_all_by_host(context, host_ref['host']) + (count, sum) = db.volume_data_get_for_host(context, + host_ref['host']) + + snap_count_total = 0 + snap_sum_total = 0 + resources = [{'resource': {'host': host, 'project': '(total)', + 'volume_count': str(count), + 'total_volume_gb': str(sum), + 'snapshot_count': str(snap_count_total), + 'total_snapshot_gb': str(snap_sum_total)}}] + + project_ids = [v['project_id'] for v in volume_refs] + project_ids = list(set(project_ids)) + for project_id in project_ids: + (count, sum) = db.volume_data_get_for_project(context, project_id) + (snap_count, snap_sum) = db.snapshot_data_get_for_project( + context, + project_id) + resources.append( + {'resource': + {'host': host, + 'project': project_id, + 'volume_count': str(count), + 'total_volume_gb': str(sum), + 'snapshot_count': str(snap_count), + 'total_snapshot_gb': str(snap_sum)}}) + snap_count_total += int(snap_count) + snap_sum_total += int(snap_sum) + resources[0]['resource']['snapshot_count'] = str(snap_count_total) + resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total) + return {"host": resources} + + +class Hosts(extensions.ExtensionDescriptor): + """Admin-only host administration.""" + + name = "Hosts" + alias = "os-hosts" + namespace = "http://docs.openstack.org/volume/ext/hosts/api/v1.1" + updated = "2011-06-29T00:00:00+00:00" + + def get_resources(self): + resources = [extensions.ResourceExtension('os-hosts', + HostController(), + collection_actions={ + 'update': 'PUT'}, + member_actions={ + 'startup': 'GET', + 'shutdown': 'GET', + 'reboot': 'GET'})] + return resources diff --git a/cinder/api/openstack/volume/contrib/image_create.py b/cinder/api/contrib/image_create.py similarity index 85% rename from cinder/api/openstack/volume/contrib/image_create.py rename to cinder/api/contrib/image_create.py index 51cf6bcbd2..3358831143 100644 --- a/cinder/api/openstack/volume/contrib/image_create.py +++ b/cinder/api/contrib/image_create.py @@ -1,31 +1,29 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 NTT. -# Copyright (c) 2012 OpenStack, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Create Volume from Image extension.""" - - -from cinder.api.openstack import extensions - - -class Image_create(extensions.ExtensionDescriptor): - """Allow creating a volume from an image in the Create Volume v1 API""" - - name = "CreateVolumeExtension" - alias = "os-image-create" - namespace = "http://docs.openstack.org/volume/ext/image-create/api/v1" - updated = "2012-08-13T00:00:00+00:00" +# Copyright (c) 2012 NTT. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The Create Volume from Image extension.""" + + +from cinder.api import extensions + + +class Image_create(extensions.ExtensionDescriptor): + """Allow creating a volume from an image in the Create Volume v1 API.""" + + name = "CreateVolumeExtension" + alias = "os-image-create" + namespace = "http://docs.openstack.org/volume/ext/image-create/api/v1" + updated = "2012-08-13T00:00:00+00:00" diff --git a/cinder/api/contrib/qos_specs_manage.py b/cinder/api/contrib/qos_specs_manage.py new file mode 100644 index 0000000000..d73862d666 --- /dev/null +++ b/cinder/api/contrib/qos_specs_manage.py @@ -0,0 +1,437 @@ +# Copyright (c) 2013 eBay Inc. +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The QoS specs extension""" + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.views import qos_specs as view_qos_specs +from cinder.api import xmlutil +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common import strutils +from cinder.volume import qos_specs + + +LOG = logging.getLogger(__name__) + +authorize = extensions.extension_authorizer('volume', 'qos_specs_manage') + + +def make_qos_specs(elem): + elem.set('id') + elem.set('name') + elem.set('consumer') + elem.append(SpecsTemplate()) + + +def make_associations(elem): + elem.set('association_type') + elem.set('name') + elem.set('id') + + +class SpecsTemplate(xmlutil.TemplateBuilder): + def construct(self): + return xmlutil.MasterTemplate(xmlutil.make_flat_dict('specs'), 1) + + +class QoSSpecsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('qos_specs') + elem = xmlutil.SubTemplateElement(root, 'qos_spec', + selector='qos_specs') + make_qos_specs(elem) + return xmlutil.MasterTemplate(root, 1) + + +class AssociationsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('qos_associations') + elem = xmlutil.SubTemplateElement(root, 'associations', + selector='qos_associations') + make_associations(elem) + return xmlutil.MasterTemplate(root, 1) + + +def _check_specs(context, specs_id): + try: + qos_specs.get_qos_specs(context, specs_id) + except exception.NotFound as ex: + raise webob.exc.HTTPNotFound(explanation=unicode(ex)) + + +class QoSSpecsController(wsgi.Controller): + """The volume type extra specs API controller for the OpenStack API.""" + + _view_builder_class = view_qos_specs.ViewBuilder + + @staticmethod + def _notify_qos_specs_error(context, method, payload): + notifier_api.notify(context, + 'QoSSpecs', + method, + notifier_api.ERROR, + payload) + + @wsgi.serializers(xml=QoSSpecsTemplate) + def index(self, req): + """Returns the list of qos_specs.""" + context = req.environ['cinder.context'] + authorize(context) + specs = qos_specs.get_all_specs(context) + return self._view_builder.summary_list(req, specs) + + @wsgi.serializers(xml=QoSSpecsTemplate) + def create(self, req, body=None): + context = req.environ['cinder.context'] + authorize(context) + + if not self.is_valid_body(body, 'qos_specs'): + raise webob.exc.HTTPBadRequest() + + specs = body['qos_specs'] + name = specs.get('name', None) + if name is None or name == "": + msg = _("Please specify a name for QoS specs.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + qos_specs.create(context, name, specs) + spec = qos_specs.get_qos_specs_by_name(context, name) + notifier_info = dict(name=name, specs=specs) + notifier_api.notify(context, 'QoSSpecs', + 'QoSSpecs.create', + notifier_api.INFO, notifier_info) + except exception.InvalidInput as err: + notifier_err = dict(name=name, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.create', + notifier_err) + raise webob.exc.HTTPBadRequest(explanation=str(err)) + except exception.QoSSpecsExists as err: + notifier_err = dict(name=name, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.create', + notifier_err) + raise webob.exc.HTTPConflict(explanation=str(err)) + except exception.QoSSpecsCreateFailed as err: + notifier_err = dict(name=name, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.create', + notifier_err) + raise webob.exc.HTTPInternalServerError(explanation=str(err)) + + return self._view_builder.detail(req, spec) + + @wsgi.serializers(xml=QoSSpecsTemplate) + def update(self, req, id, body=None): + context = req.environ['cinder.context'] + authorize(context) + + if not self.is_valid_body(body, 'qos_specs'): + raise webob.exc.HTTPBadRequest() + specs = body['qos_specs'] + try: + qos_specs.update(context, id, specs) + notifier_info = dict(id=id, specs=specs) + notifier_api.notify(context, 'QoSSpecs', + 'qos_specs.update', + notifier_api.INFO, notifier_info) + except exception.QoSSpecsNotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.update', + notifier_err) + raise webob.exc.HTTPNotFound(explanation=str(err)) + except exception.InvalidQoSSpecs as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.update', + notifier_err) + raise webob.exc.HTTPBadRequest(explanation=str(err)) + except exception.QoSSpecsUpdateFailed as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.update', + notifier_err) + raise webob.exc.HTTPInternalServerError(explanation=str(err)) + + return body + + @wsgi.serializers(xml=QoSSpecsTemplate) + def show(self, req, id): + """Return a single qos spec item.""" + context = req.environ['cinder.context'] + authorize(context) + + try: + spec = qos_specs.get_qos_specs(context, id) + except exception.QoSSpecsNotFound as err: + raise webob.exc.HTTPNotFound(explanation=str(err)) + + return self._view_builder.detail(req, spec) + + def delete(self, req, id): + """Deletes an existing qos specs.""" + context = req.environ['cinder.context'] + authorize(context) + + force = req.params.get('force', None) + + #convert string to bool type in strict manner + force = strutils.bool_from_string(force) + LOG.debug("Delete qos_spec: %(id)s, force: %(force)s" % + {'id': id, 'force': force}) + + try: + qos_specs.delete(context, id, force) + notifier_info = dict(id=id) + notifier_api.notify(context, 'QoSSpecs', + 'qos_specs.delete', + notifier_api.INFO, notifier_info) + except exception.QoSSpecsNotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.delete', + notifier_err) + raise webob.exc.HTTPNotFound(explanation=str(err)) + except exception.QoSSpecsInUse as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.delete', + notifier_err) + if force: + msg = _('Failed to disassociate qos specs.') + raise webob.exc.HTTPInternalServerError(explanation=msg) + msg = _('Qos specs still in use.') + raise webob.exc.HTTPBadRequest(explanation=msg) + + return webob.Response(status_int=202) + + def delete_keys(self, req, id, body): + """Deletes specified keys in qos specs.""" + context = req.environ['cinder.context'] + authorize(context) + + if not (body and 'keys' in body + and isinstance(body.get('keys'), list)): + raise webob.exc.HTTPBadRequest() + + keys = body['keys'] + LOG.debug("Delete_key spec: %(id)s, keys: %(keys)s" % + {'id': id, 'keys': keys}) + + try: + qos_specs.delete_keys(context, id, keys) + notifier_info = dict(id=id) + notifier_api.notify(context, 'QoSSpecs', + 'qos_specs.delete_keys', + notifier_api.INFO, notifier_info) + except exception.QoSSpecsNotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.delete_keys', + notifier_err) + raise webob.exc.HTTPNotFound(explanation=str(err)) + except exception.QoSSpecsKeyNotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.delete_keys', + notifier_err) + raise webob.exc.HTTPBadRequest(explanation=str(err)) + + return webob.Response(status_int=202) + + @wsgi.serializers(xml=AssociationsTemplate) + def associations(self, req, id): + """List all associations of given qos specs.""" + context = req.environ['cinder.context'] + authorize(context) + + LOG.debug("Get associations for qos_spec id: %s" % id) + + try: + associates = qos_specs.get_associations(context, id) + notifier_info = dict(id=id) + notifier_api.notify(context, 'QoSSpecs', + 'qos_specs.associations', + notifier_api.INFO, notifier_info) + except exception.QoSSpecsNotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.associations', + notifier_err) + raise webob.exc.HTTPNotFound(explanation=str(err)) + except exception.CinderException as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.associations', + notifier_err) + raise webob.exc.HTTPInternalServerError(explanation=str(err)) + + return self._view_builder.associations(req, associates) + + def associate(self, req, id): + """Associate a qos specs with a volume type.""" + context = req.environ['cinder.context'] + authorize(context) + + type_id = req.params.get('vol_type_id', None) + + if not type_id: + msg = _('Volume Type id must not be None.') + notifier_err = dict(id=id, error_message=msg) + self._notify_qos_specs_error(context, + 'qos_specs.delete', + notifier_err) + raise webob.exc.HTTPBadRequest(explanation=msg) + LOG.debug("Associate qos_spec: %(id)s with type: %(type_id)s" % + {'id': id, 'type_id': type_id}) + + try: + qos_specs.associate_qos_with_type(context, id, type_id) + notifier_info = dict(id=id, type_id=type_id) + notifier_api.notify(context, 'QoSSpecs', + 'qos_specs.associate', + notifier_api.INFO, notifier_info) + except exception.VolumeTypeNotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.associate', + notifier_err) + raise webob.exc.HTTPNotFound(explanation=str(err)) + except exception.QoSSpecsNotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.associate', + notifier_err) + raise webob.exc.HTTPNotFound(explanation=str(err)) + except exception.InvalidVolumeType as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.associate', + notifier_err) + self._notify_qos_specs_error(context, + 'qos_specs.associate', + notifier_err) + raise webob.exc.HTTPBadRequest(explanation=str(err)) + except exception.QoSSpecsAssociateFailed as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.associate', + notifier_err) + raise webob.exc.HTTPInternalServerError(explanation=str(err)) + + return webob.Response(status_int=202) + + def disassociate(self, req, id): + """Disassociate a qos specs from a volume type.""" + context = req.environ['cinder.context'] + authorize(context) + + type_id = req.params.get('vol_type_id', None) + + if not type_id: + msg = _('Volume Type id must not be None.') + notifier_err = dict(id=id, error_message=msg) + self._notify_qos_specs_error(context, + 'qos_specs.delete', + notifier_err) + raise webob.exc.HTTPBadRequest(explanation=msg) + LOG.debug("Disassociate qos_spec: %(id)s from type: %(type_id)s" % + {'id': id, 'type_id': type_id}) + + try: + qos_specs.disassociate_qos_specs(context, id, type_id) + notifier_info = dict(id=id, type_id=type_id) + notifier_api.notify(context, 'QoSSpecs', + 'qos_specs.disassociate', + notifier_api.INFO, notifier_info) + except exception.VolumeTypeNotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.disassociate', + notifier_err) + raise webob.exc.HTTPNotFound(explanation=str(err)) + except exception.QoSSpecsNotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.disassociate', + notifier_err) + raise webob.exc.HTTPNotFound(explanation=str(err)) + except exception.QoSSpecsDisassociateFailed as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.disassociate', + notifier_err) + raise webob.exc.HTTPInternalServerError(explanation=str(err)) + + return webob.Response(status_int=202) + + def disassociate_all(self, req, id): + """Disassociate a qos specs from all volume types.""" + context = req.environ['cinder.context'] + authorize(context) + + LOG.debug("Disassociate qos_spec: %s from all." % id) + + try: + qos_specs.disassociate_all(context, id) + notifier_info = dict(id=id) + notifier_api.notify(context, 'QoSSpecs', + 'qos_specs.disassociate_all', + notifier_api.INFO, notifier_info) + except exception.QoSSpecsNotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.disassociate_all', + notifier_err) + raise webob.exc.HTTPNotFound(explanation=str(err)) + except exception.QoSSpecsDisassociateFailed as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_qos_specs_error(context, + 'qos_specs.disassociate_all', + notifier_err) + raise webob.exc.HTTPInternalServerError(explanation=str(err)) + + return webob.Response(status_int=202) + + +class Qos_specs_manage(extensions.ExtensionDescriptor): + """QoS specs support.""" + + name = "Qos_specs_manage" + alias = "qos-specs" + namespace = "http://docs.openstack.org/volume/ext/qos-specs/api/v1" + updated = "2013-08-02T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + Qos_specs_manage.alias, + QoSSpecsController(), + member_actions={"associations": "GET", + "associate": "GET", + "disassociate": "GET", + "disassociate_all": "GET", + "delete_keys": "PUT"}) + + resources.append(res) + + return resources diff --git a/cinder/api/openstack/volume/contrib/quota_classes.py b/cinder/api/contrib/quota_classes.py similarity index 71% rename from cinder/api/openstack/volume/contrib/quota_classes.py rename to cinder/api/contrib/quota_classes.py index cbad0e3af8..ffa1bf7f95 100644 --- a/cinder/api/openstack/volume/contrib/quota_classes.py +++ b/cinder/api/contrib/quota_classes.py @@ -1,4 +1,4 @@ -# Copyright 2012 OpenStack LLC. +# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,9 +15,9 @@ import webob -from cinder.api.openstack import extensions +from cinder.api import extensions from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil +from cinder.api import xmlutil from cinder import db from cinder import exception from cinder import quota @@ -42,17 +42,14 @@ def construct(self): return xmlutil.MasterTemplate(root, 1) -class QuotaClassSetsController(object): +class QuotaClassSetsController(wsgi.Controller): def _format_quota_set(self, quota_class, quota_set): - """Convert the quota object to a result dict""" + """Convert the quota object to a result dict.""" - result = dict(id=str(quota_class)) + quota_set['id'] = str(quota_class) - for resource in QUOTAS.resources: - result[resource] = quota_set[resource] - - return dict(quota_class_set=result) + return dict(quota_class_set=quota_set) @wsgi.serializers(xml=QuotaClassTemplate) def show(self, req, id): @@ -63,19 +60,30 @@ def show(self, req, id): except exception.NotAuthorized: raise webob.exc.HTTPForbidden() - return self._format_quota_set( - id, - QUOTAS.get_class_quotas(context, id) - ) + return self._format_quota_set(id, + QUOTAS.get_class_quotas(context, id)) @wsgi.serializers(xml=QuotaClassTemplate) def update(self, req, id, body): context = req.environ['cinder.context'] authorize(context) quota_class = id + if not self.is_valid_body(body, 'quota_class_set'): + msg = (_("Missing required element quota_class_set" + " in request body.")) + raise webob.exc.HTTPBadRequest(explanation=msg) + for key in body['quota_class_set'].keys(): if key in QUOTAS: - value = int(body['quota_class_set'][key]) + try: + value = int(body['quota_class_set'][key]) + except ValueError: + msg = _("Quota class limit must be specified as an" + " integer value.") + raise webob.exc.HTTPBadRequest(explanation=msg) + if value < -1: + msg = _("Quota class limit must be -1 or greater.") + raise webob.exc.HTTPBadRequest(explanation=msg) try: db.quota_class_update(context, quota_class, key, value) except exception.QuotaClassNotFound: @@ -87,7 +95,7 @@ def update(self, req, id, body): class Quota_classes(extensions.ExtensionDescriptor): - """Quota classes management support""" + """Quota classes management support.""" name = "QuotaClasses" alias = "os-quota-class-sets" diff --git a/cinder/api/openstack/volume/contrib/quotas.py b/cinder/api/contrib/quotas.py similarity index 56% rename from cinder/api/openstack/volume/contrib/quotas.py rename to cinder/api/contrib/quotas.py index 7f00863e4a..3dd9919b43 100644 --- a/cinder/api/openstack/volume/contrib/quotas.py +++ b/cinder/api/contrib/quotas.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,20 +15,23 @@ import webob -from cinder.api.openstack import extensions +from cinder.api import extensions from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil +from cinder.api import xmlutil from cinder import db from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import strutils from cinder import quota QUOTAS = quota.QUOTAS +NON_QUOTA_KEYS = ['tenant_id', 'id'] -authorize_update = extensions.extension_authorizer('compute', 'quotas:update') -authorize_show = extensions.extension_authorizer('compute', 'quotas:show') +authorize_update = extensions.extension_authorizer('volume', 'quotas:update') +authorize_show = extensions.extension_authorizer('volume', 'quotas:show') class QuotaTemplate(xmlutil.TemplateBuilder): @@ -45,19 +46,20 @@ def construct(self): return xmlutil.MasterTemplate(root, 1) -class QuotaSetsController(object): +class QuotaSetsController(wsgi.Controller): def _format_quota_set(self, project_id, quota_set): - """Convert the quota object to a result dict""" - - result = dict(id=str(project_id)) + """Convert the quota object to a result dict.""" - for resource in QUOTAS.resources: - result[resource] = quota_set[resource] + quota_set['id'] = str(project_id) - return dict(quota_set=result) + return dict(quota_set=quota_set) def _validate_quota_limit(self, limit): + if not isinstance(limit, int): + msg = _("Quota limit must be specified as an integer value.") + raise webob.exc.HTTPBadRequest(explanation=msg) + # NOTE: -1 is a flag value for unlimited if limit < -1: msg = _("Quota limit must be -1 or greater.") @@ -75,28 +77,52 @@ def _get_quotas(self, context, id, usages=False): def show(self, req, id): context = req.environ['cinder.context'] authorize_show(context) + + params = req.params + if not hasattr(params, '__call__') and 'usage' in params: + usage = strutils.bool_from_string(params['usage']) + else: + usage = False + try: sqlalchemy_api.authorize_project_context(context, id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() - return self._format_quota_set(id, self._get_quotas(context, id)) + return self._format_quota_set(id, self._get_quotas(context, id, usage)) @wsgi.serializers(xml=QuotaTemplate) def update(self, req, id, body): context = req.environ['cinder.context'] authorize_update(context) project_id = id + if not self.is_valid_body(body, 'quota_set'): + msg = (_("Missing required element quota_set in request body.")) + raise webob.exc.HTTPBadRequest(explanation=msg) + + bad_keys = [] + + for key, value in body['quota_set'].items(): + if (key not in QUOTAS and key not in NON_QUOTA_KEYS): + bad_keys.append(key) + continue + + if len(bad_keys) > 0: + msg = _("Bad key(s) in quota set: %s") % ",".join(bad_keys) + raise webob.exc.HTTPBadRequest(explanation=msg) + for key in body['quota_set'].keys(): - if key in QUOTAS: - value = int(body['quota_set'][key]) - self._validate_quota_limit(value) - try: - db.quota_update(context, project_id, key, value) - except exception.ProjectQuotaNotFound: - db.quota_create(context, project_id, key, value) - except exception.AdminRequired: - raise webob.exc.HTTPForbidden() + if key in NON_QUOTA_KEYS: + continue + + self._validate_quota_limit(body['quota_set'][key]) + value = int(body['quota_set'][key]) + try: + db.quota_update(context, project_id, key, value) + except exception.ProjectQuotaNotFound: + db.quota_create(context, project_id, key, value) + except exception.AdminRequired: + raise webob.exc.HTTPForbidden() return {'quota_set': self._get_quotas(context, id)} @wsgi.serializers(xml=QuotaTemplate) @@ -107,19 +133,19 @@ def defaults(self, req, id): class Quotas(extensions.ExtensionDescriptor): - """Quotas management support""" + """Quota management support.""" name = "Quotas" alias = "os-quota-sets" - namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1" + namespace = "http://docs.openstack.org/volume/ext/quotas-sets/api/v1.1" updated = "2011-08-08T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension('os-quota-sets', - QuotaSetsController(), - member_actions={'defaults': 'GET'}) + QuotaSetsController(), + member_actions={'defaults': 'GET'}) resources.append(res) return resources diff --git a/cinder/api/contrib/scheduler_hints.py b/cinder/api/contrib/scheduler_hints.py new file mode 100644 index 0000000000..b01601f1e5 --- /dev/null +++ b/cinder/api/contrib/scheduler_hints.py @@ -0,0 +1,61 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.exc + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.v2 import volumes +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class SchedulerHintsController(wsgi.Controller): + + @staticmethod + def _extract_scheduler_hints(body): + hints = {} + + attr = '%s:scheduler_hints' % Scheduler_hints.alias + try: + if attr in body: + hints.update(body[attr]) + except ValueError: + msg = _("Malformed scheduler_hints attribute") + raise webob.exc.HTTPBadRequest(explanation=msg) + + return hints + + @wsgi.extends + def create(self, req, body): + hints = self._extract_scheduler_hints(body) + + if 'volume' in body: + body['volume']['scheduler_hints'] = hints + yield + + +class Scheduler_hints(extensions.ExtensionDescriptor): + """Pass arbitrary key/value pairs to the scheduler.""" + + name = "SchedulerHints" + alias = "OS-SCH-HNT" + namespace = volumes.SCHEDULER_HINTS_NAMESPACE + updated = "2013-04-18T00:00:00+00:00" + + def get_controller_extensions(self): + controller = SchedulerHintsController() + ext = extensions.ControllerExtension(self, 'volumes', controller) + return [ext] diff --git a/cinder/api/contrib/services.py b/cinder/api/contrib/services.py new file mode 100644 index 0000000000..998208665b --- /dev/null +++ b/cinder/api/contrib/services.py @@ -0,0 +1,167 @@ +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo.config import cfg +import webob.exc + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import utils + + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) +authorize = extensions.extension_authorizer('volume', 'services') + + +class ServicesIndexTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('services') + elem = xmlutil.SubTemplateElement(root, 'service', selector='services') + elem.set('binary') + elem.set('host') + elem.set('zone') + elem.set('status') + elem.set('state') + elem.set('update_at') + + return xmlutil.MasterTemplate(root, 1) + + +class ServicesUpdateTemplate(xmlutil.TemplateBuilder): + def construct(self): + # TODO(uni): template elements of 'host', 'service' and 'disabled' + # should be deprecated to make ServicesUpdateTemplate consistent + # with ServicesIndexTemplate. Still keeping it here for API + # compatibility sake. + root = xmlutil.TemplateElement('host') + root.set('host') + root.set('service') + root.set('disabled') + root.set('binary') + root.set('status') + + return xmlutil.MasterTemplate(root, 1) + + +class ServiceController(wsgi.Controller): + @wsgi.serializers(xml=ServicesIndexTemplate) + def index(self, req): + """Return a list of all running services. + + Filter by host & service name. + """ + context = req.environ['cinder.context'] + authorize(context) + now = timeutils.utcnow() + services = db.service_get_all(context) + + host = '' + if 'host' in req.GET: + host = req.GET['host'] + service = '' + if 'service' in req.GET: + service = req.GET['service'] + LOG.deprecated(_("Query by service parameter is deprecated. " + "Please use binary parameter instead.")) + binary = '' + if 'binary' in req.GET: + binary = req.GET['binary'] + + if host: + services = [s for s in services if s['host'] == host] + # NOTE(uni): deprecating service request key, binary takes precedence + binary_key = binary or service + if binary_key: + services = [s for s in services if s['binary'] == binary_key] + + svcs = [] + for svc in services: + delta = now - (svc['updated_at'] or svc['created_at']) + alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time + art = (alive and "up") or "down" + active = 'enabled' + if svc['disabled']: + active = 'disabled' + svcs.append({"binary": svc['binary'], 'host': svc['host'], + 'zone': svc['availability_zone'], + 'status': active, 'state': art, + 'updated_at': svc['updated_at']}) + return {'services': svcs} + + @wsgi.serializers(xml=ServicesUpdateTemplate) + def update(self, req, id, body): + """Enable/Disable scheduling for a service.""" + context = req.environ['cinder.context'] + authorize(context) + + if id == "enable": + disabled = False + elif id == "disable": + disabled = True + else: + raise webob.exc.HTTPNotFound("Unknown action") + + try: + host = body['host'] + except (TypeError, KeyError): + raise webob.exc.HTTPBadRequest() + + # NOTE(uni): deprecating service request key, binary takes precedence + # Still keeping service key here for API compatibility sake. + service = body.get('service', '') + binary = body.get('binary', '') + binary_key = binary or service + if not binary_key: + raise webob.exc.HTTPBadRequest() + + try: + svc = db.service_get_by_args(context, host, binary_key) + if not svc: + raise webob.exc.HTTPNotFound('Unknown service') + + db.service_update(context, svc['id'], {'disabled': disabled}) + except exception.ServiceNotFound: + raise webob.exc.HTTPNotFound("service not found") + + status = id + 'd' + return {'host': host, + 'service': service, + 'disabled': disabled, + 'binary': binary, + 'status': status} + + +class Services(extensions.ExtensionDescriptor): + """Services support.""" + + name = "Services" + alias = "os-services" + namespace = "http://docs.openstack.org/volume/ext/services/api/v2" + updated = "2012-10-28T00:00:00-00:00" + + def get_resources(self): + resources = [] + resource = extensions.ResourceExtension('os-services', + ServiceController()) + resources.append(resource) + return resources diff --git a/cinder/api/contrib/snapshot_actions.py b/cinder/api/contrib/snapshot_actions.py new file mode 100644 index 0000000000..eec07f8231 --- /dev/null +++ b/cinder/api/contrib/snapshot_actions.py @@ -0,0 +1,110 @@ +# Copyright 2013, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder import db +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def authorize(context, action_name): + action = 'snapshot_actions:%s' % action_name + extensions.extension_authorizer('snapshot', action)(context) + + +class SnapshotActionsController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(SnapshotActionsController, self).__init__(*args, **kwargs) + LOG.debug("SnapshotActionsController initialized") + + @wsgi.action('os-update_snapshot_status') + def _update_snapshot_status(self, req, id, body): + """Update database fields related to status of a snapshot. + + Intended for creation of snapshots, so snapshot state + must start as 'creating' and be changed to 'available', + 'creating', or 'error'. + """ + + context = req.environ['cinder.context'] + authorize(context, 'update_snapshot_status') + + LOG.debug("body: %s" % body) + try: + status = body['os-update_snapshot_status']['status'] + except KeyError: + msg = _("'status' must be specified.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + # Allowed state transitions + status_map = {'creating': ['creating', 'available', 'error'], + 'deleting': ['deleting', 'error_deleting']} + + current_snapshot = db.snapshot_get(context, id) + + if current_snapshot['status'] not in status_map: + msg = _("Snapshot status %(cur)s not allowed for " + "update_snapshot_status") % { + 'cur': current_snapshot['status']} + raise webob.exc.HTTPBadRequest(explanation=msg) + + if status not in status_map[current_snapshot['status']]: + msg = _("Provided snapshot status %(provided)s not allowed for " + "snapshot with status %(current)s.") % \ + {'provided': status, + 'current': current_snapshot['status']} + raise webob.exc.HTTPBadRequest(explanation=msg) + + update_dict = {'id': id, + 'status': status} + + progress = body['os-update_snapshot_status'].get('progress', None) + if progress: + # This is expected to be a string like '73%' + msg = _('progress must be an integer percentage') + try: + integer = int(progress[:-1]) + except ValueError: + raise webob.exc.HTTPBadRequest(explanation=msg) + if integer < 0 or integer > 100 or progress[-1] != '%': + raise webob.exc.HTTPBadRequest(explanation=msg) + + update_dict.update({'progress': progress}) + + LOG.info("Updating snapshot %(id)s with info %(dict)s" % + {'id': id, 'dict': update_dict}) + + db.snapshot_update(context, id, update_dict) + return webob.Response(status_int=202) + + +class Snapshot_actions(extensions.ExtensionDescriptor): + """Enable snapshot manager actions.""" + + name = "SnapshotActions" + alias = "os-snapshot-actions" + namespace = \ + "http://docs.openstack.org/volume/ext/snapshot-actions/api/v1.1" + updated = "2013-07-16T00:00:00+00:00" + + def get_controller_extensions(self): + controller = SnapshotActionsController() + extension = extensions.ControllerExtension(self, + 'snapshots', + controller) + return [extension] diff --git a/cinder/api/openstack/volume/contrib/types_extra_specs.py b/cinder/api/contrib/types_extra_specs.py similarity index 67% rename from cinder/api/openstack/volume/contrib/types_extra_specs.py rename to cinder/api/contrib/types_extra_specs.py index 50ddb234ab..f2131d58ba 100644 --- a/cinder/api/openstack/volume/contrib/types_extra_specs.py +++ b/cinder/api/contrib/types_extra_specs.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. +# Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -19,14 +17,15 @@ import webob -from cinder.api.openstack import extensions +from cinder.api import common +from cinder.api import extensions from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil +from cinder.api import xmlutil from cinder import db from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api from cinder.volume import volume_types - authorize = extensions.extension_authorizer('volume', 'types_extra_specs') @@ -51,7 +50,7 @@ def extraspec_sel(obj, do_raise=False): class VolumeTypeExtraSpecsController(wsgi.Controller): - """ The volume type extra specs API controller for the OpenStack API """ + """The volume type extra specs API controller for the OpenStack API.""" def _get_extra_specs(self, context, type_id): extra_specs = db.volume_type_extra_specs_get(context, type_id) @@ -64,11 +63,11 @@ def _check_type(self, context, type_id): try: volume_types.get_volume_type(context, type_id) except exception.NotFound as ex: - raise webob.exc.HTTPNotFound(explanation=unicode(ex)) + raise webob.exc.HTTPNotFound(explanation=ex.msg) @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate) def index(self, req, type_id): - """ Returns the list of extra specs for a given volume type """ + """Returns the list of extra specs for a given volume type.""" context = req.environ['cinder.context'] authorize(context) self._check_type(context, type_id) @@ -80,14 +79,18 @@ def create(self, req, type_id, body=None): authorize(context) if not self.is_valid_body(body, 'extra_specs'): - raise webob.exc.HTTPUnprocessableEntity() + raise webob.exc.HTTPBadRequest() self._check_type(context, type_id) - specs = body['extra_specs'] + self._check_key_names(specs.keys()) db.volume_type_extra_specs_update_or_create(context, type_id, specs) + notifier_info = dict(type_id=type_id, specs=specs) + notifier_api.notify(context, 'volumeTypeExtraSpecs', + 'volume_type_extra_specs.create', + notifier_api.INFO, notifier_info) return body @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) @@ -95,9 +98,10 @@ def update(self, req, type_id, id, body=None): context = req.environ['cinder.context'] authorize(context) if not body: - raise webob.exc.HTTPUnprocessableEntity() + expl = _('Request body empty') + raise webob.exc.HTTPBadRequest(explanation=expl) self._check_type(context, type_id) - if not id in body: + if id not in body: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) if len(body) > 1: @@ -106,6 +110,10 @@ def update(self, req, type_id, id, body=None): db.volume_type_extra_specs_update_or_create(context, type_id, body) + notifier_info = dict(type_id=type_id, id=id) + notifier_api.notify(context, 'volumeTypeExtraSpecs', + 'volume_type_extra_specs.update', + notifier_api.INFO, notifier_info) return body @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) @@ -121,16 +129,32 @@ def show(self, req, type_id, id): raise webob.exc.HTTPNotFound() def delete(self, req, type_id, id): - """ Deletes an existing extra spec """ + """Deletes an existing extra spec.""" context = req.environ['cinder.context'] self._check_type(context, type_id) authorize(context) - db.volume_type_extra_specs_delete(context, type_id, id) + + try: + db.volume_type_extra_specs_delete(context, type_id, id) + except exception.VolumeTypeExtraSpecsNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + + notifier_info = dict(type_id=type_id, id=id) + notifier_api.notify(context, 'volumeTypeExtraSpecs', + 'volume_type_extra_specs.delete', + notifier_api.INFO, notifier_info) return webob.Response(status_int=202) + def _check_key_names(self, keys): + if not common.validate_key_names(keys): + expl = _('Key names can only contain alphanumeric characters, ' + 'underscores, periods, colons and hyphens.') + + raise webob.exc.HTTPBadRequest(explanation=expl) + class Types_extra_specs(extensions.ExtensionDescriptor): - """Types extra specs support""" + """Type extra specs support.""" name = "TypesExtraSpecs" alias = "os-types-extra-specs" @@ -140,10 +164,10 @@ class Types_extra_specs(extensions.ExtensionDescriptor): def get_resources(self): resources = [] res = extensions.ResourceExtension('extra_specs', - VolumeTypeExtraSpecsController(), - parent=dict( - member_name='type', - collection_name='types')) + VolumeTypeExtraSpecsController(), + parent=dict(member_name='type', + collection_name='types') + ) resources.append(res) return resources diff --git a/cinder/api/openstack/volume/contrib/types_manage.py b/cinder/api/contrib/types_manage.py similarity index 50% rename from cinder/api/openstack/volume/contrib/types_manage.py rename to cinder/api/contrib/types_manage.py index 3ff2efebca..b6fcf1022f 100644 --- a/cinder/api/openstack/volume/contrib/types_manage.py +++ b/cinder/api/contrib/types_manage.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. +# Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -19,11 +17,12 @@ import webob -from cinder.api.openstack import extensions -from cinder.api.openstack.volume import types -from cinder.api.openstack.volume.views import types as views_types +from cinder.api import extensions from cinder.api.openstack import wsgi +from cinder.api.v1 import types +from cinder.api.views import types as views_types from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api from cinder.volume import volume_types @@ -31,10 +30,17 @@ class VolumeTypesManageController(wsgi.Controller): - """ The volume types API controller for the OpenStack API """ + """The volume types API controller for the OpenStack API.""" _view_builder_class = views_types.ViewBuilder + def _notify_volume_type_error(self, context, method, payload): + notifier_api.notify(context, + 'volumeType', + method, + notifier_api.ERROR, + payload) + @wsgi.action("create") @wsgi.serializers(xml=types.VolumeTypeTemplate) def _create(self, req, body): @@ -43,42 +49,72 @@ def _create(self, req, body): authorize(context) if not self.is_valid_body(body, 'volume_type'): - raise webob.exc.HTTPUnprocessableEntity() + raise webob.exc.HTTPBadRequest() vol_type = body['volume_type'] name = vol_type.get('name', None) specs = vol_type.get('extra_specs', {}) if name is None or name == "": - raise webob.exc.HTTPUnprocessableEntity() + raise webob.exc.HTTPBadRequest() try: volume_types.create(context, name, specs) vol_type = volume_types.get_volume_type_by_name(context, name) + notifier_info = dict(volume_types=vol_type) + notifier_api.notify(context, 'volumeType', + 'volume_type.create', + notifier_api.INFO, notifier_info) + except exception.VolumeTypeExists as err: + notifier_err = dict(volume_types=vol_type, error_message=str(err)) + self._notify_volume_type_error(context, + 'volume_type.create', + notifier_err) + raise webob.exc.HTTPConflict(explanation=str(err)) - except exception.NotFound: + except exception.NotFound as err: + notifier_err = dict(volume_types=vol_type, error_message=str(err)) + self._notify_volume_type_error(context, + 'volume_type.create', + notifier_err) raise webob.exc.HTTPNotFound() return self._view_builder.show(req, vol_type) @wsgi.action("delete") def _delete(self, req, id): - """ Deletes an existing volume type """ + """Deletes an existing volume type.""" context = req.environ['cinder.context'] authorize(context) try: vol_type = volume_types.get_volume_type(context, id) - volume_types.destroy(context, vol_type['name']) - except exception.NotFound: + volume_types.destroy(context, vol_type['id']) + notifier_info = dict(volume_types=vol_type) + notifier_api.notify(context, 'volumeType', + 'volume_type.delete', + notifier_api.INFO, notifier_info) + except exception.VolumeTypeInUse as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_volume_type_error(context, + 'volume_type.delete', + notifier_err) + msg = 'Target volume type is still in use.' + raise webob.exc.HTTPBadRequest(explanation=msg) + except exception.NotFound as err: + notifier_err = dict(id=id, error_message=str(err)) + self._notify_volume_type_error(context, + 'volume_type.delete', + notifier_err) + raise webob.exc.HTTPNotFound() return webob.Response(status_int=202) class Types_manage(extensions.ExtensionDescriptor): - """Types manage support""" + """Types manage support.""" name = "TypesManage" alias = "os-types-manage" diff --git a/cinder/api/contrib/used_limits.py b/cinder/api/contrib/used_limits.py new file mode 100644 index 0000000000..a36ceb9447 --- /dev/null +++ b/cinder/api/contrib/used_limits.py @@ -0,0 +1,58 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder import quota + +QUOTAS = quota.QUOTAS + +authorize = extensions.extension_authorizer('limits', 'used_limits') + + +class UsedLimitsController(wsgi.Controller): + + @wsgi.extends + def index(self, req, resp_obj): + context = req.environ['cinder.context'] + authorize(context) + + quotas = QUOTAS.get_project_quotas(context, context.project_id, + usages=True) + + quota_map = { + 'totalVolumesUsed': 'volumes', + 'totalGigabytesUsed': 'gigabytes', + } + + used_limits = {} + for display_name, quota in quota_map.iteritems(): + if quota in quotas: + used_limits[display_name] = quotas[quota]['in_use'] + + resp_obj.obj['limits']['absolute'].update(used_limits) + + +class Used_limits(extensions.ExtensionDescriptor): + """Provide data on limited resources that are being used.""" + + name = "UsedLimits" + alias = 'os-used-limits' + namespace = "http://docs.openstack.org/volume/ext/used-limits/api/v1.1" + updated = "2013-10-03T00:00:00+00:00" + + def get_controller_extensions(self): + controller = UsedLimitsController() + extension = extensions.ControllerExtension(self, 'limits', controller) + return [extension] diff --git a/cinder/api/contrib/volume_actions.py b/cinder/api/contrib/volume_actions.py new file mode 100644 index 0000000000..2e87b4e0fa --- /dev/null +++ b/cinder/api/contrib/volume_actions.py @@ -0,0 +1,345 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import common as rpc_common +from cinder.openstack.common import strutils +from cinder import utils +from cinder import volume + + +LOG = logging.getLogger(__name__) + + +def authorize(context, action_name): + action = 'volume_actions:%s' % action_name + extensions.extension_authorizer('volume', action)(context) + + +class VolumeToImageSerializer(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('os-volume_upload_image', + selector='os-volume_upload_image') + root.set('id') + root.set('updated_at') + root.set('status') + root.set('display_description') + root.set('size') + root.set('volume_type') + root.set('image_id') + root.set('container_format') + root.set('disk_format') + root.set('image_name') + return xmlutil.MasterTemplate(root, 1) + + +class VolumeToImageDeserializer(wsgi.XMLDeserializer): + """Deserializer to handle xml-formatted requests.""" + def default(self, string): + dom = utils.safe_minidom_parse_string(string) + action_node = dom.childNodes[0] + action_name = action_node.tagName + + action_data = {} + attributes = ["force", "image_name", "container_format", "disk_format"] + for attr in attributes: + if action_node.hasAttribute(attr): + action_data[attr] = action_node.getAttribute(attr) + if 'force' in action_data and action_data['force'] == 'True': + action_data['force'] = True + return {'body': {action_name: action_data}} + + +class VolumeActionsController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeActionsController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + @wsgi.action('os-attach') + def _attach(self, req, id, body): + """Add attachment metadata.""" + context = req.environ['cinder.context'] + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + + # instance uuid is an option now + instance_uuid = None + if 'instance_uuid' in body['os-attach']: + instance_uuid = body['os-attach']['instance_uuid'] + host_name = None + # Keep API backward compatibility + if 'host_name' in body['os-attach']: + host_name = body['os-attach']['host_name'] + mountpoint = body['os-attach']['mountpoint'] + if 'mode' in body['os-attach']: + mode = body['os-attach']['mode'] + else: + mode = 'rw' + + if instance_uuid and host_name: + msg = _("Invalid request to attach volume to an " + "instance %(instance_uuid)s and a " + "host %(host_name)s simultaneously") % { + 'instance_uuid': instance_uuid, + 'host_name': host_name, + } + raise webob.exc.HTTPBadRequest(explanation=msg) + elif instance_uuid is None and host_name is None: + msg = _("Invalid request to attach volume to an invalid target") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if mode not in ('rw', 'ro'): + msg = _("Invalid request to attach volume with an invalid mode. " + "Attaching mode should be 'rw' or 'ro'") + raise webob.exc.HTTPBadRequest(explanation=msg) + + self.volume_api.attach(context, volume, + instance_uuid, host_name, mountpoint, mode) + return webob.Response(status_int=202) + + @wsgi.action('os-detach') + def _detach(self, req, id, body): + """Clear attachment metadata.""" + context = req.environ['cinder.context'] + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + + self.volume_api.detach(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-reserve') + def _reserve(self, req, id, body): + """Mark volume as reserved.""" + context = req.environ['cinder.context'] + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + + self.volume_api.reserve_volume(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-unreserve') + def _unreserve(self, req, id, body): + """Unmark volume as reserved.""" + context = req.environ['cinder.context'] + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + + self.volume_api.unreserve_volume(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-begin_detaching') + def _begin_detaching(self, req, id, body): + """Update volume status to 'detaching'.""" + context = req.environ['cinder.context'] + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + + self.volume_api.begin_detaching(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-roll_detaching') + def _roll_detaching(self, req, id, body): + """Roll back volume status to 'in-use'.""" + context = req.environ['cinder.context'] + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + + self.volume_api.roll_detaching(context, volume) + return webob.Response(status_int=202) + + @wsgi.action('os-initialize_connection') + def _initialize_connection(self, req, id, body): + """Initialize volume attachment.""" + context = req.environ['cinder.context'] + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + try: + connector = body['os-initialize_connection']['connector'] + except KeyError: + raise webob.exc.HTTPBadRequest("Must specify 'connector'") + try: + info = self.volume_api.initialize_connection(context, + volume, + connector) + except exception.VolumeBackendAPIException as error: + msg = _("Unable to fetch connection information from backend.") + raise webob.exc.HTTPInternalServerError(msg) + + return {'connection_info': info} + + @wsgi.action('os-terminate_connection') + def _terminate_connection(self, req, id, body): + """Terminate volume attachment.""" + context = req.environ['cinder.context'] + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + try: + connector = body['os-terminate_connection']['connector'] + except KeyError: + raise webob.exc.HTTPBadRequest("Must specify 'connector'") + try: + self.volume_api.terminate_connection(context, volume, connector) + except exception.VolumeBackendAPIException as error: + msg = _("Unable to terminate volume connection from backend.") + raise webob.exc.HTTPInternalServerError(explanation=msg) + return webob.Response(status_int=202) + + @wsgi.response(202) + @wsgi.action('os-volume_upload_image') + @wsgi.serializers(xml=VolumeToImageSerializer) + @wsgi.deserializers(xml=VolumeToImageDeserializer) + def _volume_upload_image(self, req, id, body): + """Uploads the specified volume to image service.""" + context = req.environ['cinder.context'] + params = body['os-volume_upload_image'] + if not params.get("image_name"): + msg = _("No image_name was specified in request.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + force = params.get('force', False) + if isinstance(force, basestring): + try: + force = strutils.bool_from_string(force, strict=False) + except ValueError: + msg = _("Bad value for 'force' parameter.") + raise webob.exc.HTTPBadRequest(explanation=msg) + elif not isinstance(force, bool): + msg = _("'force' is not string or bool.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + + authorize(context, "upload_image") + image_metadata = {"container_format": params.get("container_format", + "bare"), + "disk_format": params.get("disk_format", "raw"), + "name": params["image_name"]} + try: + response = self.volume_api.copy_volume_to_image(context, + volume, + image_metadata, + force) + except exception.InvalidVolume as error: + raise webob.exc.HTTPBadRequest(explanation=error.msg) + except ValueError as error: + raise webob.exc.HTTPBadRequest(explanation=unicode(error)) + except rpc_common.RemoteError as error: + msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type, + 'err_msg': error.value} + raise webob.exc.HTTPBadRequest(explanation=msg) + return {'os-volume_upload_image': response} + + @wsgi.action('os-extend') + def _extend(self, req, id, body): + """Extend size of volume.""" + context = req.environ['cinder.context'] + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + + try: + int(body['os-extend']['new_size']) + except (KeyError, ValueError, TypeError): + msg = _("New volume size must be specified as an integer.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + size = body['os-extend']['new_size'] + self.volume_api.extend(context, volume, size) + return webob.Response(status_int=202) + + @wsgi.action('os-update_readonly_flag') + def _volume_readonly_update(self, req, id, body): + """Update volume readonly flag.""" + context = req.environ['cinder.context'] + try: + volume = self.volume_api.get(context, id) + except exception.VolumeNotFound as error: + raise webob.exc.HTTPNotFound(explanation=error.msg) + + try: + readonly_flag = body['os-update_readonly_flag']['readonly'] + except KeyError: + msg = _("Must specify readonly in request.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if isinstance(readonly_flag, basestring): + try: + readonly_flag = strutils.bool_from_string(readonly_flag, + strict=True) + except ValueError: + msg = _("Bad value for 'readonly'") + raise webob.exc.HTTPBadRequest(explanation=msg) + + elif not isinstance(readonly_flag, bool): + msg = _("'readonly' not string or bool") + raise webob.exc.HTTPBadRequest(explanation=msg) + + self.volume_api.update_readonly_flag(context, volume, readonly_flag) + return webob.Response(status_int=202) + + @wsgi.action('os-retype') + def _retype(self, req, id, body): + """Change type of existing volume.""" + context = req.environ['cinder.context'] + volume = self.volume_api.get(context, id) + try: + new_type = body['os-retype']['new_type'] + except KeyError: + msg = _("New volume type must be specified.") + raise webob.exc.HTTPBadRequest(explanation=msg) + policy = body['os-retype'].get('migration_policy') + + self.volume_api.retype(context, volume, new_type, policy) + return webob.Response(status_int=202) + + +class Volume_actions(extensions.ExtensionDescriptor): + """Enable volume actions + """ + + name = "VolumeActions" + alias = "os-volume-actions" + namespace = "http://docs.openstack.org/volume/ext/volume-actions/api/v1.1" + updated = "2012-05-31T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeActionsController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] diff --git a/cinder/api/contrib/volume_encryption_metadata.py b/cinder/api/contrib/volume_encryption_metadata.py new file mode 100644 index 0000000000..532d40b7f0 --- /dev/null +++ b/cinder/api/contrib/volume_encryption_metadata.py @@ -0,0 +1,86 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume encryption metadata extension.""" + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder.volume import volume_types + +authorize = extensions.extension_authorizer('volume', + 'volume_encryption_metadata') + + +class VolumeEncryptionMetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.make_flat_dict('encryption', selector='encryption') + return xmlutil.MasterTemplate(root, 1) + + +class VolumeEncryptionMetadataController(wsgi.Controller): + """The volume encryption metadata API extension.""" + + def _get_volume_encryption_metadata(self, context, volume_id): + return db.volume_encryption_metadata_get(context, volume_id) + + def _is_volume_type_encrypted(self, context, volume_id): + volume_ref = db.volume_get(context, volume_id) + volume_type_id = volume_ref['volume_type_id'] + return volume_types.is_encrypted(context, volume_type_id) + + def _get_metadata(self, req, volume_id): + context = req.environ['cinder.context'] + authorize(context) + if self._is_volume_type_encrypted(context, volume_id): + return self._get_volume_encryption_metadata(context, volume_id) + else: + return { + 'encryption_key_id': None, + # Additional metadata defaults could go here. + } + + @wsgi.serializers(xml=VolumeEncryptionMetadataTemplate) + def index(self, req, volume_id): + """Returns the encryption metadata for a given volume.""" + return self._get_metadata(req, volume_id) + + @wsgi.serializers(xml=VolumeEncryptionMetadataTemplate) + def show(self, req, volume_id, id): + """Return a single encryption item.""" + encryption_item = self.index(req, volume_id) + if encryption_item is not None: + return encryption_item[id] + else: + return None + + +class Volume_encryption_metadata(extensions.ExtensionDescriptor): + """Volume encryption metadata retrieval support.""" + + name = "VolumeEncryptionMetadata" + alias = "os-volume-encryption-metadata" + namespace = ("http://docs.openstack.org/volume/ext/" + "os-volume-encryption-metadata/api/v1") + updated = "2013-07-10T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + 'encryption', VolumeEncryptionMetadataController(), + parent=dict(member_name='volume', collection_name='volumes')) + resources.append(res) + return resources diff --git a/cinder/api/contrib/volume_host_attribute.py b/cinder/api/contrib/volume_host_attribute.py new file mode 100644 index 0000000000..18042b4690 --- /dev/null +++ b/cinder/api/contrib/volume_host_attribute.py @@ -0,0 +1,90 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder.openstack.common import log as logging +from cinder import volume + + +LOG = logging.getLogger(__name__) +authorize = extensions.soft_extension_authorizer('volume', + 'volume_host_attribute') + + +class VolumeHostAttributeController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeHostAttributeController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + def _add_volume_host_attribute(self, context, req, resp_volume): + db_volume = req.cached_resource_by_id(resp_volume['id']) + key = "%s:host" % Volume_host_attribute.alias + resp_volume[key] = db_volume['host'] + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeHostAttributeTemplate()) + volume = resp_obj.obj['volume'] + self._add_volume_host_attribute(context, req, volume) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeListHostAttributeTemplate()) + for volume in list(resp_obj.obj['volumes']): + self._add_volume_host_attribute(context, req, volume) + + +class Volume_host_attribute(extensions.ExtensionDescriptor): + """Expose host as an attribute of a volume.""" + + name = "VolumeHostAttribute" + alias = "os-vol-host-attr" + namespace = ("http://docs.openstack.org/volume/ext/" + "volume_host_attribute/api/v1") + updated = "2011-11-03T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeHostAttributeController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] + + +def make_volume(elem): + elem.set('{%s}host' % Volume_host_attribute.namespace, + '%s:host' % Volume_host_attribute.alias) + + +class VolumeHostAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + alias = Volume_host_attribute.alias + namespace = Volume_host_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class VolumeListHostAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + alias = Volume_host_attribute.alias + namespace = Volume_host_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/contrib/volume_image_metadata.py b/cinder/api/contrib/volume_image_metadata.py new file mode 100644 index 0000000000..1a06005c7f --- /dev/null +++ b/cinder/api/contrib/volume_image_metadata.py @@ -0,0 +1,131 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The Volume Image Metadata API extension.""" + +import logging + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import volume + + +LOG = logging.getLogger(__name__) + +authorize = extensions.soft_extension_authorizer('volume', + 'volume_image_metadata') + + +class VolumeImageMetadataController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeImageMetadataController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + def _get_all_images_metadata(self, context): + """Returns the image metadata for all volumes.""" + try: + all_metadata = self.volume_api.get_volumes_image_metadata(context) + except Exception as e: + LOG.debug('Problem retrieving volume image metadata. ' + 'It will be skipped. Error: %s', e) + all_metadata = {} + return all_metadata + + def _add_image_metadata(self, context, resp_volume, image_meta=None): + """Appends the image metadata to the given volume. + + :param context: the request context + :param resp_volume: the response volume + :param image_meta: The image metadata to append, if None is provided it + will be retrieved from the database. An empty dict + means there is no metadata and it should not be + retrieved from the db. + """ + if image_meta is None: + try: + image_meta = self.volume_api.get_volume_image_metadata( + context, resp_volume) + except Exception: + return + if image_meta: + resp_volume['volume_image_metadata'] = dict( + image_meta.iteritems()) + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeImageMetadataTemplate()) + self._add_image_metadata(context, resp_obj.obj['volume']) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumesImageMetadataTemplate()) + all_meta = self._get_all_images_metadata(context) + for volume in list(resp_obj.obj.get('volumes', [])): + image_meta = all_meta.get(volume['id'], {}) + self._add_image_metadata(context, volume, image_meta) + + +class Volume_image_metadata(extensions.ExtensionDescriptor): + """Show image metadata associated with the volume.""" + + name = "VolumeImageMetadata" + alias = "os-vol-image-meta" + namespace = ("http://docs.openstack.org/volume/ext/" + "volume_image_metadata/api/v1") + updated = "2012-12-07T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeImageMetadataController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] + + +class VolumeImageMetadataMetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_image_metadata', + selector='volume_image_metadata') + elem = xmlutil.SubTemplateElement(root, 'meta', + selector=xmlutil.get_items) + elem.set('key', 0) + elem.text = 1 + + return xmlutil.MasterTemplate(root, 1) + + +class VolumeImageMetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + root.append(VolumeImageMetadataMetadataTemplate()) + + alias = Volume_image_metadata.alias + namespace = Volume_image_metadata.namespace + + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class VolumesImageMetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volume') + elem.append(VolumeImageMetadataMetadataTemplate()) + + alias = Volume_image_metadata.alias + namespace = Volume_image_metadata.namespace + + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/contrib/volume_mig_status_attribute.py b/cinder/api/contrib/volume_mig_status_attribute.py new file mode 100644 index 0000000000..b0a52e5931 --- /dev/null +++ b/cinder/api/contrib/volume_mig_status_attribute.py @@ -0,0 +1,97 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import volume + + +authorize = extensions.soft_extension_authorizer('volume', + 'volume_mig_status_attribute') + + +class VolumeMigStatusAttributeController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeMigStatusAttributeController, self).__init__(*args, + **kwargs) + self.volume_api = volume.API() + + def _add_volume_mig_status_attribute(self, context, resp_volume): + try: + db_volume = self.volume_api.get(context, resp_volume['id']) + except Exception: + return + else: + key = "%s:migstat" % Volume_mig_status_attribute.alias + resp_volume[key] = db_volume['migration_status'] + key = "%s:name_id" % Volume_mig_status_attribute.alias + resp_volume[key] = db_volume['_name_id'] + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeMigStatusAttributeTemplate()) + self._add_volume_mig_status_attribute(context, + resp_obj.obj['volume']) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeListMigStatusAttributeTemplate()) + for volume in list(resp_obj.obj['volumes']): + self._add_volume_mig_status_attribute(context, volume) + + +class Volume_mig_status_attribute(extensions.ExtensionDescriptor): + """Expose migration_status as an attribute of a volume.""" + + name = "VolumeMigStatusAttribute" + alias = "os-vol-mig-status-attr" + namespace = ("http://docs.openstack.org/volume/ext/" + "volume_mig_status_attribute/api/v1") + updated = "2013-08-08T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeMigStatusAttributeController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] + + +def make_volume(elem): + elem.set('{%s}migstat' % Volume_mig_status_attribute.namespace, + '%s:migstat' % Volume_mig_status_attribute.alias) + elem.set('{%s}name_id' % Volume_mig_status_attribute.namespace, + '%s:name_id' % Volume_mig_status_attribute.alias) + + +class VolumeMigStatusAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + alias = Volume_mig_status_attribute.alias + namespace = Volume_mig_status_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class VolumeListMigStatusAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + alias = Volume_mig_status_attribute.alias + namespace = Volume_mig_status_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/contrib/volume_tenant_attribute.py b/cinder/api/contrib/volume_tenant_attribute.py new file mode 100644 index 0000000000..35b210ecb2 --- /dev/null +++ b/cinder/api/contrib/volume_tenant_attribute.py @@ -0,0 +1,88 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import volume + + +authorize = extensions.soft_extension_authorizer('volume', + 'volume_tenant_attribute') + + +class VolumeTenantAttributeController(wsgi.Controller): + def __init__(self, *args, **kwargs): + super(VolumeTenantAttributeController, self).__init__(*args, **kwargs) + self.volume_api = volume.API() + + def _add_volume_tenant_attribute(self, context, req, resp_volume): + db_volume = req.cached_resource_by_id(resp_volume['id']) + key = "%s:tenant_id" % Volume_tenant_attribute.alias + resp_volume[key] = db_volume['project_id'] + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeTenantAttributeTemplate()) + volume = resp_obj.obj['volume'] + self._add_volume_tenant_attribute(context, req, volume) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['cinder.context'] + if authorize(context): + resp_obj.attach(xml=VolumeListTenantAttributeTemplate()) + for volume in list(resp_obj.obj['volumes']): + self._add_volume_tenant_attribute(context, req, volume) + + +class Volume_tenant_attribute(extensions.ExtensionDescriptor): + """Expose the internal project_id as an attribute of a volume.""" + + name = "VolumeTenantAttribute" + alias = "os-vol-tenant-attr" + namespace = ("http://docs.openstack.org/volume/ext/" + "volume_tenant_attribute/api/v1") + updated = "2011-11-03T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeTenantAttributeController() + extension = extensions.ControllerExtension(self, 'volumes', controller) + return [extension] + + +def make_volume(elem): + elem.set('{%s}tenant_id' % Volume_tenant_attribute.namespace, + '%s:tenant_id' % Volume_tenant_attribute.alias) + + +class VolumeTenantAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + alias = Volume_tenant_attribute.alias + namespace = Volume_tenant_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class VolumeListTenantAttributeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + alias = Volume_tenant_attribute.alias + namespace = Volume_tenant_attribute.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/cinder/api/contrib/volume_transfer.py b/cinder/api/contrib/volume_transfer.py new file mode 100644 index 0000000000..e4b25cdcf4 --- /dev/null +++ b/cinder/api/contrib/volume_transfer.py @@ -0,0 +1,244 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob +from webob import exc + +from cinder.api import common +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api.views import transfers as transfer_view +from cinder.api import xmlutil + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import transfer as transferAPI +from cinder import utils + +LOG = logging.getLogger(__name__) + + +def make_transfer(elem): + elem.set('id') + elem.set('volume_id') + elem.set('created_at') + elem.set('name') + elem.set('auth_key') + + +class TransferTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('transfer', selector='transfer') + make_transfer(root) + alias = Volume_transfer.alias + namespace = Volume_transfer.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class TransfersTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('transfers') + elem = xmlutil.SubTemplateElement(root, 'transfer', + selector='transfers') + make_transfer(elem) + alias = Volume_transfer.alias + namespace = Volume_transfer.namespace + return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) + + +class CreateDeserializer(wsgi.MetadataXMLDeserializer): + def default(self, string): + dom = utils.safe_minidom_parse_string(string) + transfer = self._extract_transfer(dom) + return {'body': {'transfer': transfer}} + + def _extract_transfer(self, node): + transfer = {} + transfer_node = self.find_first_child_named(node, 'transfer') + + attributes = ['volume_id', 'name'] + + for attr in attributes: + if transfer_node.getAttribute(attr): + transfer[attr] = transfer_node.getAttribute(attr) + return transfer + + +class AcceptDeserializer(wsgi.MetadataXMLDeserializer): + def default(self, string): + dom = utils.safe_minidom_parse_string(string) + transfer = self._extract_transfer(dom) + return {'body': {'accept': transfer}} + + def _extract_transfer(self, node): + transfer = {} + transfer_node = self.find_first_child_named(node, 'accept') + + attributes = ['auth_key'] + + for attr in attributes: + if transfer_node.getAttribute(attr): + transfer[attr] = transfer_node.getAttribute(attr) + return transfer + + +class VolumeTransferController(wsgi.Controller): + """The Volume Transfer API controller for the Openstack API.""" + + _view_builder_class = transfer_view.ViewBuilder + + def __init__(self): + self.transfer_api = transferAPI.API() + super(VolumeTransferController, self).__init__() + + @wsgi.serializers(xml=TransferTemplate) + def show(self, req, id): + """Return data about active transfers.""" + context = req.environ['cinder.context'] + + try: + transfer = self.transfer_api.get(context, transfer_id=id) + except exception.TransferNotFound as error: + raise exc.HTTPNotFound(explanation=error.msg) + + return self._view_builder.detail(req, transfer) + + @wsgi.serializers(xml=TransfersTemplate) + def index(self, req): + """Returns a summary list of transfers.""" + return self._get_transfers(req, is_detail=False) + + @wsgi.serializers(xml=TransfersTemplate) + def detail(self, req): + """Returns a detailed list of transfers.""" + return self._get_transfers(req, is_detail=True) + + def _get_transfers(self, req, is_detail): + """Returns a list of transfers, transformed through view builder.""" + context = req.environ['cinder.context'] + LOG.debug(_('Listing volume transfers')) + transfers = self.transfer_api.get_all(context) + limited_list = common.limited(transfers, req) + + if is_detail: + transfers = self._view_builder.detail_list(req, limited_list) + else: + transfers = self._view_builder.summary_list(req, limited_list) + + return transfers + + @wsgi.response(202) + @wsgi.serializers(xml=TransferTemplate) + @wsgi.deserializers(xml=CreateDeserializer) + def create(self, req, body): + """Create a new volume transfer.""" + LOG.debug(_('Creating new volume transfer %s'), body) + if not self.is_valid_body(body, 'transfer'): + raise exc.HTTPBadRequest() + + context = req.environ['cinder.context'] + + try: + transfer = body['transfer'] + volume_id = transfer['volume_id'] + except KeyError: + msg = _("Incorrect request body format") + raise exc.HTTPBadRequest(explanation=msg) + + name = transfer.get('name', None) + + LOG.audit(_("Creating transfer of volume %s"), + volume_id, + context=context) + + try: + new_transfer = self.transfer_api.create(context, volume_id, name) + except exception.InvalidVolume as error: + raise exc.HTTPBadRequest(explanation=error.msg) + except exception.VolumeNotFound as error: + raise exc.HTTPNotFound(explanation=error.msg) + + transfer = self._view_builder.create(req, + dict(new_transfer.iteritems())) + return transfer + + @wsgi.response(202) + @wsgi.serializers(xml=TransferTemplate) + @wsgi.deserializers(xml=AcceptDeserializer) + def accept(self, req, id, body): + """Accept a new volume transfer.""" + transfer_id = id + LOG.debug(_('Accepting volume transfer %s'), transfer_id) + if not self.is_valid_body(body, 'accept'): + raise exc.HTTPBadRequest() + + context = req.environ['cinder.context'] + + try: + accept = body['accept'] + auth_key = accept['auth_key'] + except KeyError: + msg = _("Incorrect request body format") + raise exc.HTTPBadRequest(explanation=msg) + + LOG.audit(_("Accepting transfer %s"), transfer_id, + context=context) + + try: + accepted_transfer = self.transfer_api.accept(context, transfer_id, + auth_key) + except exception.VolumeSizeExceedsAvailableQuota as error: + raise exc.HTTPRequestEntityTooLarge( + explanation=error.msg, headers={'Retry-After': 0}) + except exception.InvalidVolume as error: + raise exc.HTTPBadRequest(explanation=error.msg) + + transfer = \ + self._view_builder.summary(req, + dict(accepted_transfer.iteritems())) + return transfer + + def delete(self, req, id): + """Delete a transfer.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete transfer with id: %s"), id, context=context) + + try: + self.transfer_api.delete(context, transfer_id=id) + except exception.TransferNotFound as error: + raise exc.HTTPNotFound(explanation=error.msg) + return webob.Response(status_int=202) + + +class Volume_transfer(extensions.ExtensionDescriptor): + """Volume transfer management support.""" + + name = "VolumeTransfer" + alias = "os-volume-transfer" + namespace = "http://docs.openstack.org/volume/ext/volume-transfer/" + \ + "api/v1.1" + updated = "2013-05-29T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension(Volume_transfer.alias, + VolumeTransferController(), + collection_actions={'detail': + 'GET'}, + member_actions={'accept': 'POST'}) + resources.append(res) + return resources diff --git a/cinder/api/contrib/volume_type_encryption.py b/cinder/api/contrib/volume_type_encryption.py new file mode 100644 index 0000000000..12107cb957 --- /dev/null +++ b/cinder/api/contrib/volume_type_encryption.py @@ -0,0 +1,186 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume types encryption extension.""" + +import webob + +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder.api import xmlutil +from cinder import db +from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api +from cinder.volume import volume_types + +authorize = extensions.extension_authorizer('volume', + 'volume_type_encryption') + +CONTROL_LOCATION = ['front-end', 'back-end'] + + +class VolumeTypeEncryptionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.make_flat_dict('encryption', selector='encryption') + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypeEncryptionController(wsgi.Controller): + """The volume type encryption API controller for the OpenStack API.""" + + def _get_volume_type_encryption(self, context, type_id): + encryption_ref = db.volume_type_encryption_get(context, type_id) + encryption_specs = {} + if not encryption_ref: + return encryption_specs + for key, value in encryption_ref.iteritems(): + encryption_specs[key] = value + return encryption_specs + + def _check_type(self, context, type_id): + try: + volume_types.get_volume_type(context, type_id) + except exception.NotFound as ex: + raise webob.exc.HTTPNotFound(explanation=ex.msg) + + def _check_encryption_input(self, encryption, create=True): + if 'key_size' in encryption.keys(): + key_size = encryption['key_size'] + if key_size is not None: + if isinstance(key_size, (int, long)): + if key_size < 0: + msg = _('key_size must be non-negative') + raise exception.InvalidInput(reason=msg) + else: + msg = _('key_size must be an integer') + raise exception.InvalidInput(reason=msg) + + if create: + msg = None + if 'provider' not in encryption.keys(): + msg = _('provider must be defined') + elif 'control_location' not in encryption.keys(): + msg = _('control_location must be defined') + + if msg is not None: + raise exception.InvalidInput(reason=msg) + + # Check control location + if 'control_location' in encryption.keys(): + if encryption['control_location'] not in CONTROL_LOCATION: + msg = _("Valid control location are: %s") % CONTROL_LOCATION + raise exception.InvalidInput(reason=msg) + + def _encrypted_type_in_use(self, context, volume_type_id): + volume_list = db.volume_type_encryption_volume_get(context, + volume_type_id) + # If there is at least one volume in the list + # returned, this type is in use by a volume. + if len(volume_list) > 0: + return True + else: + return False + + @wsgi.serializers(xml=VolumeTypeEncryptionTemplate) + def index(self, req, type_id): + """Returns the encryption specs for a given volume type.""" + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + return self._get_volume_type_encryption(context, type_id) + + @wsgi.serializers(xml=VolumeTypeEncryptionTemplate) + def create(self, req, type_id, body=None): + """Create encryption specs for an existing volume type.""" + context = req.environ['cinder.context'] + authorize(context) + + if self._encrypted_type_in_use(context, type_id): + expl = _('Cannot create encryption specs. Volume type in use.') + raise webob.exc.HTTPBadRequest(explanation=expl) + + if not self.is_valid_body(body, 'encryption'): + expl = _('Create body is not valid.') + raise webob.exc.HTTPBadRequest(explanation=expl) + + self._check_type(context, type_id) + + encryption_specs = self._get_volume_type_encryption(context, type_id) + if encryption_specs: + raise exception.VolumeTypeEncryptionExists(type_id=type_id) + + encryption_specs = body['encryption'] + + self._check_encryption_input(encryption_specs) + + db.volume_type_encryption_update_or_create(context, type_id, + encryption_specs) + notifier_info = dict(type_id=type_id, specs=encryption_specs) + notifier_api.notify(context, 'volumeTypeEncryption', + 'volume_type_encryption.create', + notifier_api.INFO, notifier_info) + return body + + @wsgi.serializers(xml=VolumeTypeEncryptionTemplate) + def show(self, req, type_id, id): + """Return a single encryption item.""" + context = req.environ['cinder.context'] + authorize(context) + + self._check_type(context, type_id) + + encryption_specs = self._get_volume_type_encryption(context, type_id) + + if id not in encryption_specs: + raise webob.exc.HTTPNotFound() + + return {id: encryption_specs[id]} + + def delete(self, req, type_id, id): + """Delete encryption specs for a given volume type.""" + context = req.environ['cinder.context'] + authorize(context) + + if self._encrypted_type_in_use(context, type_id): + expl = _('Cannot delete encryption specs. Volume type in use.') + raise webob.exc.HTTPBadRequest(explanation=expl) + else: + db.volume_type_encryption_delete(context, type_id) + + return webob.Response(status_int=202) + + +class Volume_type_encryption(extensions.ExtensionDescriptor): + """Encryption support for volume types.""" + + name = "VolumeTypeEncryption" + alias = "encryption" + namespace = ("http://docs.openstack.org/volume/ext/" + "volume-type-encryption/api/v1") + updated = "2013-07-01T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + Volume_type_encryption.alias, + VolumeTypeEncryptionController(), + parent=dict(member_name='type', collection_name='types')) + resources.append(res) + return resources + + def get_controller_extensions(self): + controller = VolumeTypeEncryptionController() + extension = extensions.ControllerExtension(self, 'types', controller) + return [extension] diff --git a/cinder/api/openstack/extensions.py b/cinder/api/extensions.py similarity index 88% rename from cinder/api/openstack/extensions.py rename to cinder/api/extensions.py index a615f2a08e..b20323d89d 100644 --- a/cinder/api/openstack/extensions.py +++ b/cinder/api/extensions.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # @@ -18,22 +16,22 @@ import os +from oslo.config import cfg import webob.dec import webob.exc import cinder.api.openstack from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil +from cinder.api import xmlutil from cinder import exception -from cinder import flags -from cinder.openstack.common import log as logging -from cinder.openstack.common import exception as common_exception from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging import cinder.policy +CONF = cfg.CONF + LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS class ExtensionDescriptor(object): @@ -175,11 +173,18 @@ def create(self, req): class ExtensionManager(object): """Load extensions from the configured extension path. - See cinder/tests/api/openstack/extensions/foxinsocks/extension.py for an + See cinder/tests/api/extensions/foxinsocks/extension.py for an example extension implementation. """ + def __init__(self): + LOG.audit(_('Initializing extension manager.')) + + self.cls_list = CONF.osapi_volume_extension + self.extensions = {} + self._load_extensions() + def is_loaded(self, alias): return alias in self.extensions @@ -262,12 +267,26 @@ def _load_extensions(self): extensions = list(self.cls_list) + # NOTE(thingee): Backwards compat for the old extension loader path. + # We can drop this post-grizzly in the H release. + old_contrib_path = ('cinder.api.openstack.volume.contrib.' + 'standard_extensions') + new_contrib_path = 'cinder.api.contrib.standard_extensions' + if old_contrib_path in extensions: + LOG.warn(_('osapi_volume_extension is set to deprecated path: %s'), + old_contrib_path) + LOG.warn(_('Please set your flag or cinder.conf settings for ' + 'osapi_volume_extension to: %s'), new_contrib_path) + extensions = [e.replace(old_contrib_path, new_contrib_path) + for e in extensions] + for ext_factory in extensions: try: self.load_extension(ext_factory) except Exception as exc: LOG.warn(_('Failed to load extension %(ext_factory)s: ' - '%(exc)s') % locals()) + '%(exc)s'), + {'ext_factory': ext_factory, 'exc': exc}) class ControllerExtension(object): @@ -335,7 +354,8 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): ext_mgr.load_extension(classpath) except Exception as exc: logger.warn(_('Failed to load extension %(classpath)s: ' - '%(exc)s') % locals()) + '%(exc)s'), + {'classpath': classpath, 'exc': exc}) # Now, let's consider any subdirectories we may have... subdirs = [] @@ -350,7 +370,7 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): (package, relpkg, dname)) try: ext = importutils.import_class(ext_name) - except common_exception.NotFound: + except ImportError: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) @@ -359,7 +379,8 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): ext(ext_mgr) except Exception as exc: logger.warn(_('Failed to load extension %(ext_name)s: ' - '%(exc)s') % locals()) + '%(exc)s'), + {'ext_name': ext_name, 'exc': exc}) # Update the list of directories we'll explore... dirnames[:] = subdirs diff --git a/cinder/testing/__init__.py b/cinder/api/middleware/__init__.py similarity index 100% rename from cinder/testing/__init__.py rename to cinder/api/middleware/__init__.py diff --git a/cinder/api/middleware/auth.py b/cinder/api/middleware/auth.py new file mode 100644 index 0000000000..b1b4c999ff --- /dev/null +++ b/cinder/api/middleware/auth.py @@ -0,0 +1,155 @@ +# Copyright 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Common Auth Middleware. + +""" + + +import os + +from oslo.config import cfg +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder import context +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder import wsgi as base_wsgi + + +use_forwarded_for_opt = cfg.BoolOpt( + 'use_forwarded_for', + default=False, + help='Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.') + +CONF = cfg.CONF +CONF.register_opt(use_forwarded_for_opt) + +LOG = logging.getLogger(__name__) + + +def pipeline_factory(loader, global_conf, **local_conf): + """A paste pipeline replica that keys off of auth_strategy.""" + pipeline = local_conf[CONF.auth_strategy] + if not CONF.api_rate_limit: + limit_name = CONF.auth_strategy + '_nolimit' + pipeline = local_conf.get(limit_name, pipeline) + pipeline = pipeline.split() + filters = [loader.get_filter(n) for n in pipeline[:-1]] + app = loader.get_app(pipeline[-1]) + filters.reverse() + for filter in filters: + app = filter(app) + return app + + +class InjectContext(base_wsgi.Middleware): + """Add a 'cinder.context' to WSGI environ.""" + + def __init__(self, context, *args, **kwargs): + self.context = context + super(InjectContext, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=base_wsgi.Request) + def __call__(self, req): + req.environ['cinder.context'] = self.context + return self.application + + +class CinderKeystoneContext(base_wsgi.Middleware): + """Make a request context from keystone headers.""" + + @webob.dec.wsgify(RequestClass=base_wsgi.Request) + def __call__(self, req): + user_id = req.headers.get('X_USER') + user_id = req.headers.get('X_USER_ID', user_id) + if user_id is None: + LOG.debug("Neither X_USER_ID nor X_USER found in request") + return webob.exc.HTTPUnauthorized() + # get the roles + roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] + if 'X_TENANT_ID' in req.headers: + # This is the new header since Keystone went to ID/Name + project_id = req.headers['X_TENANT_ID'] + else: + # This is for legacy compatibility + project_id = req.headers['X_TENANT'] + + project_name = req.headers.get('X_TENANT_NAME') + # Get the auth token + auth_token = req.headers.get('X_AUTH_TOKEN', + req.headers.get('X_STORAGE_TOKEN')) + + # Build a context, including the auth_token... + remote_address = req.remote_addr + + service_catalog = None + if req.headers.get('X_SERVICE_CATALOG') is not None: + try: + catalog_header = req.headers.get('X_SERVICE_CATALOG') + service_catalog = jsonutils.loads(catalog_header) + except ValueError: + raise webob.exc.HTTPInternalServerError( + _('Invalid service catalog json.')) + + if CONF.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + project_name=project_name, + roles=roles, + auth_token=auth_token, + remote_address=remote_address, + service_catalog=service_catalog) + + req.environ['cinder.context'] = ctx + return self.application + + +class NoAuthMiddleware(base_wsgi.Middleware): + """Return a fake token if one isn't specified.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if 'X-Auth-Token' not in req.headers: + user_id = req.headers.get('X-Auth-User', 'admin') + project_id = req.headers.get('X-Auth-Project-Id', 'admin') + os_url = os.path.join(req.url, project_id) + res = webob.Response() + # NOTE(vish): This is expecting and returning Auth(1.1), whereas + # keystone uses 2.0 auth. We should probably allow + # 2.0 auth here as well. + res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) + res.headers['X-Server-Management-Url'] = os_url + res.content_type = 'text/plain' + res.status = '204' + return res + + token = req.headers['X-Auth-Token'] + user_id, _sep, project_id = token.partition(':') + project_id = project_id or user_id + remote_address = getattr(req, 'remote_address', '127.0.0.1') + if CONF.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + is_admin=True, + remote_address=remote_address) + + req.environ['cinder.context'] = ctx + return self.application diff --git a/cinder/api/middleware/fault.py b/cinder/api/middleware/fault.py new file mode 100644 index 0000000000..020b769064 --- /dev/null +++ b/cinder/api/middleware/fault.py @@ -0,0 +1,77 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import utils +from cinder import wsgi as base_wsgi + + +LOG = logging.getLogger(__name__) + + +class FaultWrapper(base_wsgi.Middleware): + """Calls down the middleware stack, making exceptions into faults.""" + + _status_to_type = {} + + @staticmethod + def status_to_type(status): + if not FaultWrapper._status_to_type: + for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): + FaultWrapper._status_to_type[clazz.code] = clazz + return FaultWrapper._status_to_type.get( + status, webob.exc.HTTPInternalServerError)() + + def _error(self, inner, req): + LOG.exception(_("Caught error: %s"), unicode(inner)) + + safe = getattr(inner, 'safe', False) + headers = getattr(inner, 'headers', None) + status = getattr(inner, 'code', 500) + if status is None: + status = 500 + + msg_dict = dict(url=req.url, status=status) + LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) + outer = self.status_to_type(status) + if headers: + outer.headers = headers + # NOTE(johannes): We leave the explanation empty here on + # purpose. It could possibly have sensitive information + # that should not be returned back to the user. See + # bugs 868360 and 874472 + # NOTE(eglynn): However, it would be over-conservative and + # inconsistent with the EC2 API to hide every exception, + # including those that are safe to expose, see bug 1021373 + if safe: + msg = (inner.msg if isinstance(inner, exception.CinderException) + else unicode(inner)) + params = {'exception': inner.__class__.__name__, + 'explanation': msg} + outer.explanation = _('%(exception)s: %(explanation)s') % params + return wsgi.Fault(outer) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + try: + return req.get_response(self.application) + except Exception as ex: + return self._error(ex, req) diff --git a/cinder/api/middleware/sizelimit.py b/cinder/api/middleware/sizelimit.py new file mode 100644 index 0000000000..4c3717b2b8 --- /dev/null +++ b/cinder/api/middleware/sizelimit.py @@ -0,0 +1,84 @@ +# Copyright (c) 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Request Body limiting middleware. + +""" + + +from oslo.config import cfg +import webob.dec +import webob.exc + +from cinder.openstack.common import log as logging +from cinder import wsgi + + +#default request size is 112k +max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', + default=114688, + help='Max size for body of a request') + +CONF = cfg.CONF +CONF.register_opt(max_request_body_size_opt) + +LOG = logging.getLogger(__name__) + + +class LimitingReader(object): + """Reader to limit the size of an incoming request.""" + def __init__(self, data, limit): + """Initialize LimitingReader. + + :param data: Underlying data object + :param limit: maximum number of bytes the reader should allow + """ + self.data = data + self.limit = limit + self.bytes_read = 0 + + def __iter__(self): + for chunk in self.data: + self.bytes_read += len(chunk) + if self.bytes_read > self.limit: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + else: + yield chunk + + def read(self, i=None): + result = self.data.read(i) + self.bytes_read += len(result) + if self.bytes_read > self.limit: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + return result + + +class RequestBodySizeLimiter(wsgi.Middleware): + """Add a 'cinder.context' to WSGI environ.""" + + def __init__(self, *args, **kwargs): + super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if req.content_length > CONF.osapi_max_request_body_size: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + if req.content_length is None and req.is_body_readable: + limiter = LimitingReader(req.body_file, + CONF.osapi_max_request_body_size) + req.body_file = limiter + return self.application diff --git a/cinder/api/openstack/__init__.py b/cinder/api/openstack/__init__.py index 117babd984..008ff031f1 100644 --- a/cinder/api/openstack/__init__.py +++ b/cinder/api/openstack/__init__.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. +# Copyright (c) 2013 OpenStack Foundation +# # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -21,10 +19,7 @@ """ import routes -import webob.dec -import webob.exc -from cinder import utils from cinder.api.openstack import wsgi from cinder.openstack.common import log as logging from cinder import wsgi as base_wsgi @@ -33,53 +28,6 @@ LOG = logging.getLogger(__name__) -class FaultWrapper(base_wsgi.Middleware): - """Calls down the middleware stack, making exceptions into faults.""" - - _status_to_type = {} - - @staticmethod - def status_to_type(status): - if not FaultWrapper._status_to_type: - for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): - FaultWrapper._status_to_type[clazz.code] = clazz - return FaultWrapper._status_to_type.get( - status, webob.exc.HTTPInternalServerError)() - - def _error(self, inner, req): - LOG.exception(_("Caught error: %s"), unicode(inner)) - - safe = getattr(inner, 'safe', False) - headers = getattr(inner, 'headers', None) - status = getattr(inner, 'code', 500) - if status is None: - status = 500 - - msg_dict = dict(url=req.url, status=status) - LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) - outer = self.status_to_type(status) - if headers: - outer.headers = headers - # NOTE(johannes): We leave the explanation empty here on - # purpose. It could possibly have sensitive information - # that should not be returned back to the user. See - # bugs 868360 and 874472 - # NOTE(eglynn): However, it would be over-conservative and - # inconsistent with the EC2 API to hide every exception, - # including those that are safe to expose, see bug 1021373 - if safe: - outer.explanation = '%s: %s' % (inner.__class__.__name__, - unicode(inner)) - return wsgi.Fault(outer) - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - try: - return req.get_response(self.application) - except Exception as ex: - return self._error(ex, req) - - class APIMapper(routes.Mapper): def routematch(self, url=None, environ=None): if url is "": @@ -90,7 +38,7 @@ def routematch(self, url=None, environ=None): class ProjectMapper(APIMapper): def resource(self, member_name, collection_name, **kwargs): - if not ('parent_resource' in kwargs): + if 'parent_resource' not in kwargs: kwargs['path_prefix'] = '{project_id}/' else: parent_resource = kwargs['parent_resource'] @@ -98,21 +46,19 @@ def resource(self, member_name, collection_name, **kwargs): p_member = parent_resource['member_name'] kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection, p_member) - routes.Mapper.resource(self, member_name, - collection_name, - **kwargs) + routes.Mapper.resource(self, + member_name, + collection_name, + **kwargs) class APIRouter(base_wsgi.Router): - """ - Routes requests on the OpenStack API to the appropriate controller - and method. - """ + """Routes requests on the API to the appropriate controller and method.""" ExtensionManager = None # override in subclasses @classmethod def factory(cls, global_config, **local_config): - """Simple paste factory, :class:`cinder.wsgi.Router` doesn't have""" + """Simple paste factory, :class:`cinder.wsgi.Router` doesn't have.""" return cls() def __init__(self, ext_mgr=None): @@ -151,18 +97,20 @@ def _setup_ext_routes(self, mapper, ext_mgr): def _setup_extensions(self, ext_mgr): for extension in ext_mgr.get_controller_extensions(): - ext_name = extension.extension.name collection = extension.collection controller = extension.controller if collection not in self.resources: LOG.warning(_('Extension %(ext_name)s: Cannot extend ' - 'resource %(collection)s: No such resource') % - locals()) + 'resource %(collection)s: No such resource'), + {'ext_name': extension.extension.name, + 'collection': collection}) continue LOG.debug(_('Extension %(ext_name)s extending resource: ' - '%(collection)s') % locals()) + '%(collection)s'), + {'ext_name': extension.extension.name, + 'collection': collection}) resource = self.resources[collection] resource.register_actions(controller) @@ -170,3 +118,13 @@ def _setup_extensions(self, ext_mgr): def _setup_routes(self, mapper, ext_mgr): raise NotImplementedError + + +class FaultWrapper(base_wsgi.Middleware): + + def __init__(self, application): + LOG.warn(_('cinder.api.openstack:FaultWrapper is deprecated. Please ' + 'use cinder.api.middleware.fault:FaultWrapper instead.')) + # Avoid circular imports from here. Can I just remove this class? + from cinder.api.middleware import fault + super(FaultWrapper, self).__init__(fault.FaultWrapper(application)) diff --git a/cinder/api/openstack/auth.py b/cinder/api/openstack/auth.py deleted file mode 100644 index 68c98dad5b..0000000000 --- a/cinder/api/openstack/auth.py +++ /dev/null @@ -1,65 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import webob.dec -import webob.exc - -from cinder.api.openstack import wsgi -from cinder import context -from cinder import flags -from cinder.openstack.common import log as logging -from cinder import wsgi as base_wsgi - -LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS -flags.DECLARE('use_forwarded_for', 'cinder.api.auth') - - -class NoAuthMiddleware(base_wsgi.Middleware): - """Return a fake token if one isn't specified.""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - if 'X-Auth-Token' not in req.headers: - user_id = req.headers.get('X-Auth-User', 'admin') - project_id = req.headers.get('X-Auth-Project-Id', 'admin') - os_url = os.path.join(req.url, project_id) - res = webob.Response() - # NOTE(vish): This is expecting and returning Auth(1.1), whereas - # keystone uses 2.0 auth. We should probably allow - # 2.0 auth here as well. - res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) - res.headers['X-Server-Management-Url'] = os_url - res.content_type = 'text/plain' - res.status = '204' - return res - - token = req.headers['X-Auth-Token'] - user_id, _sep, project_id = token.partition(':') - project_id = project_id or user_id - remote_address = getattr(req, 'remote_address', '127.0.0.1') - if FLAGS.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', remote_address) - ctx = context.RequestContext(user_id, - project_id, - is_admin=True, - remote_address=remote_address) - - req.environ['cinder.context'] = ctx - return self.application diff --git a/cinder/api/openstack/urlmap.py b/cinder/api/openstack/urlmap.py index 52b7298600..d4bfe6d83e 100644 --- a/cinder/api/openstack/urlmap.py +++ b/cinder/api/openstack/urlmap.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright (c) 2013 OpenStack Foundation +# # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,283 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. -import paste.urlmap -import re -import urllib2 - +from cinder.api import urlmap from cinder.openstack.common import log as logging -from cinder.api.openstack import wsgi - -_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' -_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*' - r'(?:=\s*([^;]+|%s))?\s*' % - (_quoted_string_re, _quoted_string_re)) LOG = logging.getLogger(__name__) -def unquote_header_value(value): - """Unquotes a header value. - This does not use the real unquoting but what browsers are actually - using for quoting. - - :param value: the header value to unquote. - """ - if value and value[0] == value[-1] == '"': - # this is not the real unquoting, but fixing this so that the - # RFC is met will result in bugs with internet explorer and - # probably some other browsers as well. IE for example is - # uploading files with "C:\foo\bar.txt" as filename - value = value[1:-1] - return value - - -def parse_list_header(value): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Quotes are removed automatically after parsing. - - The return value is a standard :class:`list`: - - >>> parse_list_header('token, "quoted value"') - ['token', 'quoted value'] - - :param value: a string with a list header. - :return: :class:`list` - """ - result = [] - for item in urllib2.parse_http_list(value): - if item[:1] == item[-1:] == '"': - item = unquote_header_value(item[1:-1]) - result.append(item) - return result - - -def parse_options_header(value): - """Parse a ``Content-Type`` like header into a tuple with the content - type and the options: - - >>> parse_options_header('Content-Type: text/html; mimetype=text/html') - ('Content-Type:', {'mimetype': 'text/html'}) - - :param value: the header to parse. - :return: (str, options) - """ - def _tokenize(string): - for match in _option_header_piece_re.finditer(string): - key, value = match.groups() - key = unquote_header_value(key) - if value is not None: - value = unquote_header_value(value) - yield key, value - - if not value: - return '', {} - - parts = _tokenize(';' + value) - name = parts.next()[0] - extra = dict(parts) - return name, extra - - -class Accept(object): - def __init__(self, value): - self._content_types = [parse_options_header(v) for v in - parse_list_header(value)] - - def best_match(self, supported_content_types): - # FIXME: Should we have a more sophisticated matching algorithm that - # takes into account the version as well? - best_quality = -1 - best_content_type = None - best_params = {} - best_match = '*/*' - - for content_type in supported_content_types: - for content_mask, params in self._content_types: - try: - quality = float(params.get('q', 1)) - except ValueError: - continue - - if quality < best_quality: - continue - elif best_quality == quality: - if best_match.count('*') <= content_mask.count('*'): - continue - - if self._match_mask(content_mask, content_type): - best_quality = quality - best_content_type = content_type - best_params = params - best_match = content_mask - - return best_content_type, best_params - - def content_type_params(self, best_content_type): - """Find parameters in Accept header for given content type.""" - for content_type, params in self._content_types: - if best_content_type == content_type: - return params - - return {} - - def _match_mask(self, mask, content_type): - if '*' not in mask: - return content_type == mask - if mask == '*/*': - return True - mask_major = mask[:-2] - content_type_major = content_type.split('/', 1)[0] - return content_type_major == mask_major - - def urlmap_factory(loader, global_conf, **local_conf): - if 'not_found_app' in local_conf: - not_found_app = local_conf.pop('not_found_app') - else: - not_found_app = global_conf.get('not_found_app') - if not_found_app: - not_found_app = loader.get_app(not_found_app, global_conf=global_conf) - urlmap = URLMap(not_found_app=not_found_app) - for path, app_name in local_conf.items(): - path = paste.urlmap.parse_path_expression(path) - app = loader.get_app(app_name, global_conf=global_conf) - urlmap[path] = app - return urlmap - - -class URLMap(paste.urlmap.URLMap): - def _match(self, host, port, path_info): - """Find longest match for a given URL path.""" - for (domain, app_url), app in self.applications: - if domain and domain != host and domain != host + ':' + port: - continue - if (path_info == app_url - or path_info.startswith(app_url + '/')): - return app, app_url - - return None, None - - def _set_script_name(self, app, app_url): - def wrap(environ, start_response): - environ['SCRIPT_NAME'] += app_url - return app(environ, start_response) - - return wrap - - def _munge_path(self, app, path_info, app_url): - def wrap(environ, start_response): - environ['SCRIPT_NAME'] += app_url - environ['PATH_INFO'] = path_info[len(app_url):] - return app(environ, start_response) - - return wrap - - def _path_strategy(self, host, port, path_info): - """Check path suffix for MIME type and path prefix for API version.""" - mime_type = app = app_url = None - - parts = path_info.rsplit('.', 1) - if len(parts) > 1: - possible_type = 'application/' + parts[1] - if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: - mime_type = possible_type - - parts = path_info.split('/') - if len(parts) > 1: - possible_app, possible_app_url = self._match(host, port, path_info) - # Don't use prefix if it ends up matching default - if possible_app and possible_app_url: - app_url = possible_app_url - app = self._munge_path(possible_app, path_info, app_url) - - return mime_type, app, app_url - - def _content_type_strategy(self, host, port, environ): - """Check Content-Type header for API version.""" - app = None - params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] - if 'version' in params: - app, app_url = self._match(host, port, '/v' + params['version']) - if app: - app = self._set_script_name(app, app_url) - - return app - - def _accept_strategy(self, host, port, environ, supported_content_types): - """Check Accept header for best matching MIME type and API version.""" - accept = Accept(environ.get('HTTP_ACCEPT', '')) - - app = None - - # Find the best match in the Accept header - mime_type, params = accept.best_match(supported_content_types) - if 'version' in params: - app, app_url = self._match(host, port, '/v' + params['version']) - if app: - app = self._set_script_name(app, app_url) - - return mime_type, app - - def __call__(self, environ, start_response): - host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() - if ':' in host: - host, port = host.split(':', 1) - else: - if environ['wsgi.url_scheme'] == 'http': - port = '80' - else: - port = '443' - - path_info = environ['PATH_INFO'] - path_info = self.normalize_url(path_info, False)[1] - - # The MIME type for the response is determined in one of two ways: - # 1) URL path suffix (eg /servers/detail.json) - # 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2) - - # The API version is determined in one of three ways: - # 1) URL path prefix (eg /v1.1/tenant/servers/detail) - # 2) Content-Type header (eg application/json;version=1.1) - # 3) Accept header (eg application/json;q=0.8;version=1.1) - - supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) - - mime_type, app, app_url = self._path_strategy(host, port, path_info) - - # Accept application/atom+xml for the index query of each API - # version mount point as well as the root index - if (app_url and app_url + '/' == path_info) or path_info == '/': - supported_content_types.append('application/atom+xml') - - if not app: - app = self._content_type_strategy(host, port, environ) - - if not mime_type or not app: - possible_mime_type, possible_app = self._accept_strategy( - host, port, environ, supported_content_types) - if possible_mime_type and not mime_type: - mime_type = possible_mime_type - if possible_app and not app: - app = possible_app - - if not mime_type: - mime_type = 'application/json' - - if not app: - # Didn't match a particular version, probably matches default - app, app_url = self._match(host, port, path_info) - if app: - app = self._munge_path(app, path_info, app_url) - - if app: - environ['cinder.best_content_type'] = mime_type - return app(environ, start_response) - - environ['paste.urlmap_object'] = self - return self.not_found_application(environ, start_response) + LOG.warn(_('cinder.api.openstack.urlmap:urlmap_factory is deprecated. ' + 'Please use cinder.api.urlmap:urlmap_factory instead.')) + urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/cinder/api/openstack/volume/__init__.py b/cinder/api/openstack/volume/__init__.py index 9a99451008..90a52cbd7b 100644 --- a/cinder/api/openstack/volume/__init__.py +++ b/cinder/api/openstack/volume/__init__.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. +# Copyright (c) 2013 OpenStack Foundation +# # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,54 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. -""" -WSGI middleware for OpenStack Volume API. -""" - -import cinder.api.openstack -from cinder.api.openstack.volume import extensions -from cinder.api.openstack.volume import limits -from cinder.api.openstack.volume import snapshots -from cinder.api.openstack.volume import types -from cinder.api.openstack.volume import volumes -from cinder.api.openstack.volume import versions +from cinder.api.v1.router import APIRouter as v1_router from cinder.openstack.common import log as logging - LOG = logging.getLogger(__name__) -class APIRouter(cinder.api.openstack.APIRouter): - """ - Routes requests on the OpenStack API to the appropriate controller - and method. - """ - ExtensionManager = extensions.ExtensionManager - - def _setup_routes(self, mapper, ext_mgr): - self.resources['versions'] = versions.create_resource() - mapper.connect("versions", "/", - controller=self.resources['versions'], - action='show') - - mapper.redirect("", "/") - - self.resources['volumes'] = volumes.create_resource(ext_mgr) - mapper.resource("volume", "volumes", - controller=self.resources['volumes'], - collection={'detail': 'GET'}, - member={'action': 'POST'}) - - self.resources['types'] = types.create_resource() - mapper.resource("type", "types", - controller=self.resources['types']) - - self.resources['snapshots'] = snapshots.create_resource(ext_mgr) - mapper.resource("snapshot", "snapshots", - controller=self.resources['snapshots'], - collection={'detail': 'GET'}, - member={'action': 'POST'}) - - self.resources['limits'] = limits.create_resource() - mapper.resource("limit", "limits", - controller=self.resources['limits']) +class APIRouter(v1_router): + def __init__(self, ext_mgr=None): + LOG.warn(_('cinder.api.openstack.volume:APIRouter is deprecated. ' + 'Please use cinder.api.v1.router:APIRouter instead.')) + super(APIRouter, self).__init__(ext_mgr) diff --git a/cinder/api/openstack/volume/contrib/volume_actions.py b/cinder/api/openstack/volume/contrib/volume_actions.py deleted file mode 100644 index 5c6276646c..0000000000 --- a/cinder/api/openstack/volume/contrib/volume_actions.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2012 OpenStack, LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob -from xml.dom import minidom - -from cinder.api.openstack import extensions -from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil -from cinder import volume -from cinder import exception -from cinder import flags -from cinder.openstack.common import log as logging -from cinder.openstack.common.rpc import common as rpc_common - - -FLAGS = flags.FLAGS -LOG = logging.getLogger(__name__) - - -def authorize(context, action_name): - action = 'volume_actions:%s' % action_name - extensions.extension_authorizer('volume', action)(context) - - -class VolumeToImageSerializer(xmlutil.TemplateBuilder): - def construct(self): - root = xmlutil.TemplateElement('os-volume_upload_image', - selector='os-volume_upload_image') - root.set('id') - root.set('updated_at') - root.set('status') - root.set('display_description') - root.set('size') - root.set('volume_type') - root.set('image_id') - root.set('container_format') - root.set('disk_format') - root.set('image_name') - return xmlutil.MasterTemplate(root, 1) - - -class VolumeToImageDeserializer(wsgi.XMLDeserializer): - """Deserializer to handle xml-formatted requests""" - def default(self, string): - dom = minidom.parseString(string) - action_node = dom.childNodes[0] - action_name = action_node.tagName - - action_data = {} - attributes = ["force", "image_name", "container_format", "disk_format"] - for attr in attributes: - if action_node.hasAttribute(attr): - action_data[attr] = action_node.getAttribute(attr) - if 'force' in action_data and action_data['force'] == 'True': - action_data['force'] = True - return {'body': {action_name: action_data}} - - -class VolumeActionsController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(VolumeActionsController, self).__init__(*args, **kwargs) - self.volume_api = volume.API() - - @wsgi.action('os-attach') - def _attach(self, req, id, body): - """Add attachment metadata.""" - context = req.environ['cinder.context'] - volume = self.volume_api.get(context, id) - - instance_uuid = body['os-attach']['instance_uuid'] - mountpoint = body['os-attach']['mountpoint'] - - self.volume_api.attach(context, volume, - instance_uuid, mountpoint) - return webob.Response(status_int=202) - - @wsgi.action('os-detach') - def _detach(self, req, id, body): - """Clear attachment metadata.""" - context = req.environ['cinder.context'] - volume = self.volume_api.get(context, id) - self.volume_api.detach(context, volume) - return webob.Response(status_int=202) - - @wsgi.action('os-reserve') - def _reserve(self, req, id, body): - """Mark volume as reserved.""" - context = req.environ['cinder.context'] - volume = self.volume_api.get(context, id) - self.volume_api.reserve_volume(context, volume) - return webob.Response(status_int=202) - - @wsgi.action('os-unreserve') - def _unreserve(self, req, id, body): - """Unmark volume as reserved.""" - context = req.environ['cinder.context'] - volume = self.volume_api.get(context, id) - self.volume_api.unreserve_volume(context, volume) - return webob.Response(status_int=202) - - @wsgi.action('os-begin_detaching') - def _begin_detaching(self, req, id, body): - """Update volume status to 'detaching'.""" - context = req.environ['cinder.context'] - volume = self.volume_api.get(context, id) - self.volume_api.begin_detaching(context, volume) - return webob.Response(status_int=202) - - @wsgi.action('os-roll_detaching') - def _roll_detaching(self, req, id, body): - """Roll back volume status to 'in-use'.""" - context = req.environ['cinder.context'] - volume = self.volume_api.get(context, id) - self.volume_api.roll_detaching(context, volume) - return webob.Response(status_int=202) - - @wsgi.action('os-initialize_connection') - def _initialize_connection(self, req, id, body): - """Initialize volume attachment.""" - context = req.environ['cinder.context'] - volume = self.volume_api.get(context, id) - connector = body['os-initialize_connection']['connector'] - info = self.volume_api.initialize_connection(context, - volume, - connector) - return {'connection_info': info} - - @wsgi.action('os-terminate_connection') - def _terminate_connection(self, req, id, body): - """Terminate volume attachment.""" - context = req.environ['cinder.context'] - volume = self.volume_api.get(context, id) - connector = body['os-terminate_connection']['connector'] - self.volume_api.terminate_connection(context, volume, connector) - return webob.Response(status_int=202) - - @wsgi.response(202) - @wsgi.action('os-volume_upload_image') - @wsgi.serializers(xml=VolumeToImageSerializer) - @wsgi.deserializers(xml=VolumeToImageDeserializer) - def _volume_upload_image(self, req, id, body): - """Uploads the specified volume to image service.""" - context = req.environ['cinder.context'] - try: - params = body['os-volume_upload_image'] - except (TypeError, KeyError): - msg = _("Invalid request body") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if not params.get("image_name"): - msg = _("No image_name was specified in request.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - force = params.get('force', False) - try: - volume = self.volume_api.get(context, id) - except exception.VolumeNotFound, error: - raise webob.exc.HTTPNotFound(explanation=unicode(error)) - authorize(context, "upload_image") - image_metadata = {"container_format": params.get("container_format", - "bare"), - "disk_format": params.get("disk_format", "raw"), - "name": params["image_name"]} - try: - response = self.volume_api.copy_volume_to_image(context, - volume, - image_metadata, - force) - except exception.InvalidVolume, error: - raise webob.exc.HTTPBadRequest(explanation=unicode(error)) - except ValueError, error: - raise webob.exc.HTTPBadRequest(explanation=unicode(error)) - except rpc_common.RemoteError as error: - msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type, - 'err_msg': error.value} - raise webob.exc.HTTPBadRequest(explanation=msg) - return {'os-volume_upload_image': response} - - -class Volume_actions(extensions.ExtensionDescriptor): - """Enable volume actions - """ - - name = "VolumeActions" - alias = "os-volume-actions" - namespace = "http://docs.openstack.org/volume/ext/volume-actions/api/v1.1" - updated = "2012-05-31T00:00:00+00:00" - - def get_controller_extensions(self): - controller = VolumeActionsController() - extension = extensions.ControllerExtension(self, 'volumes', controller) - return [extension] diff --git a/cinder/api/openstack/volume/versions.py b/cinder/api/openstack/volume/versions.py index a3cc91f9bf..c2db50a89c 100644 --- a/cinder/api/openstack/volume/versions.py +++ b/cinder/api/openstack/volume/versions.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. +# Copyright (c) 2013 OpenStack Foundation +# # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,224 +14,16 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime - -from lxml import etree - -from cinder.api.openstack.volume.views import versions as views_versions -from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil - - -VERSIONS = { - "v1.0": { - "id": "v1.0", - "status": "CURRENT", - "updated": "2012-01-04T11:33:21Z", - "links": [ - { - "rel": "describedby", - "type": "application/pdf", - "href": "http://jorgew.github.com/block-storage-api/" - "content/os-block-storage-1.0.pdf", - }, - { - "rel": "describedby", - "type": "application/vnd.sun.wadl+xml", - #(anthony) FIXME - "href": "http://docs.rackspacecloud.com/" - "servers/api/v1.1/application.wadl", - }, - ], - "media-types": [ - { - "base": "application/xml", - "type": "application/vnd.openstack.volume+xml;version=1", - }, - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1", - } - ], - } -} - - -class MediaTypesTemplateElement(xmlutil.TemplateElement): - def will_render(self, datum): - return 'media-types' in datum - - -def make_version(elem): - elem.set('id') - elem.set('status') - elem.set('updated') - - mts = MediaTypesTemplateElement('media-types') - elem.append(mts) - - mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types') - mt.set('base') - mt.set('type') - - xmlutil.make_links(elem, 'links') - - -version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} - - -class VersionTemplate(xmlutil.TemplateBuilder): - def construct(self): - root = xmlutil.TemplateElement('version', selector='version') - make_version(root) - return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) - - -class VersionsTemplate(xmlutil.TemplateBuilder): - def construct(self): - root = xmlutil.TemplateElement('versions') - elem = xmlutil.SubTemplateElement(root, 'version', selector='versions') - make_version(elem) - return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) - - -class ChoicesTemplate(xmlutil.TemplateBuilder): - def construct(self): - root = xmlutil.TemplateElement('choices') - elem = xmlutil.SubTemplateElement(root, 'version', selector='choices') - make_version(elem) - return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) - - -class AtomSerializer(wsgi.XMLDictSerializer): - - NSMAP = {None: xmlutil.XMLNS_ATOM} - - def __init__(self, metadata=None, xmlns=None): - self.metadata = metadata or {} - if not xmlns: - self.xmlns = wsgi.XMLNS_ATOM - else: - self.xmlns = xmlns +from cinder.api import versions +from cinder.openstack.common import log as logging - def _get_most_recent_update(self, versions): - recent = None - for version in versions: - updated = datetime.datetime.strptime(version['updated'], - '%Y-%m-%dT%H:%M:%SZ') - if not recent: - recent = updated - elif updated > recent: - recent = updated - return recent.strftime('%Y-%m-%dT%H:%M:%SZ') +LOG = logging.getLogger(__name__) - def _get_base_url(self, link_href): - # Make sure no trailing / - link_href = link_href.rstrip('/') - return link_href.rsplit('/', 1)[0] + '/' - - def _create_feed(self, versions, feed_title, feed_id): - feed = etree.Element('feed', nsmap=self.NSMAP) - title = etree.SubElement(feed, 'title') - title.set('type', 'text') - title.text = feed_title - - # Set this updated to the most recently updated version - recent = self._get_most_recent_update(versions) - etree.SubElement(feed, 'updated').text = recent - - etree.SubElement(feed, 'id').text = feed_id - - link = etree.SubElement(feed, 'link') - link.set('rel', 'self') - link.set('href', feed_id) - - author = etree.SubElement(feed, 'author') - etree.SubElement(author, 'name').text = 'Rackspace' - etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' - - for version in versions: - feed.append(self._create_version_entry(version)) - - return feed - - def _create_version_entry(self, version): - entry = etree.Element('entry') - etree.SubElement(entry, 'id').text = version['links'][0]['href'] - title = etree.SubElement(entry, 'title') - title.set('type', 'text') - title.text = 'Version %s' % version['id'] - etree.SubElement(entry, 'updated').text = version['updated'] - - for link in version['links']: - link_elem = etree.SubElement(entry, 'link') - link_elem.set('rel', link['rel']) - link_elem.set('href', link['href']) - if 'type' in link: - link_elem.set('type', link['type']) - - content = etree.SubElement(entry, 'content') - content.set('type', 'text') - content.text = 'Version %s %s (%s)' % (version['id'], - version['status'], - version['updated']) - return entry - - -class VersionsAtomSerializer(AtomSerializer): - def default(self, data): - versions = data['versions'] - feed_id = self._get_base_url(versions[0]['links'][0]['href']) - feed = self._create_feed(versions, 'Available API Versions', feed_id) - return self._to_xml(feed) - - -class VersionAtomSerializer(AtomSerializer): - def default(self, data): - version = data['version'] - feed_id = version['links'][0]['href'] - feed = self._create_feed([version], 'About This Version', feed_id) - return self._to_xml(feed) - - -class Versions(wsgi.Resource): +class Versions(versions.Versions): def __init__(self): - super(Versions, self).__init__(None) - - @wsgi.serializers(xml=VersionsTemplate, - atom=VersionsAtomSerializer) - def index(self, req): - """Return all versions.""" - builder = views_versions.get_view_builder(req) - return builder.build_versions(VERSIONS) - - @wsgi.serializers(xml=ChoicesTemplate) - @wsgi.response(300) - def multi(self, req): - """Return multiple choices.""" - builder = views_versions.get_view_builder(req) - return builder.build_choices(VERSIONS, req) - - def get_action_args(self, request_environment): - """Parse dictionary created by routes library.""" - args = {} - if request_environment['PATH_INFO'] == '/': - args['action'] = 'index' - else: - args['action'] = 'multi' - - return args - - -class VolumeVersionV1(object): - @wsgi.serializers(xml=VersionTemplate, - atom=VersionAtomSerializer) - def show(self, req): - builder = views_versions.get_view_builder(req) - return builder.build_version(VERSIONS['v1.0']) - - -def create_resource(): - return wsgi.Resource(VolumeVersionV1()) + LOG.warn(_('cinder.api.openstack.volume.versions.Versions is ' + 'deprecated. Please use cinder.api.versions.Versions ' + 'instead.')) + super(Versions, self).__init__() diff --git a/cinder/api/openstack/wsgi.py b/cinder/api/openstack/wsgi.py index fa0baead62..b1138cdb60 100644 --- a/cinder/api/openstack/wsgi.py +++ b/cinder/api/openstack/wsgi.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation +# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -18,17 +17,20 @@ import inspect import math import time -import webob - -from cinder import exception -from cinder import wsgi -from cinder.openstack.common import log as logging -from cinder.openstack.common import jsonutils from lxml import etree +import six +import webob from xml.dom import minidom from xml.parsers import expat +from cinder import exception +from cinder.openstack.common import gettextutils +from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging +from cinder import utils +from cinder import wsgi + XMLNS_V1 = 'http://docs.openstack.org/volume/api/v1' XMLNS_ATOM = 'http://www.w3.org/2005/Atom' @@ -62,6 +64,80 @@ class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" + def __init__(self, *args, **kwargs): + super(Request, self).__init__(*args, **kwargs) + self._resource_cache = {} + + def cache_resource(self, resource_to_cache, id_attribute='id', name=None): + """Cache the given resource. + + Allow API methods to cache objects, such as results from a DB query, + to be used by API extensions within the same API request. + + The resource_to_cache can be a list or an individual resource, + but ultimately resources are cached individually using the given + id_attribute. + + Different resources types might need to be cached during the same + request, they can be cached using the name parameter. For example: + + Controller 1: + request.cache_resource(db_volumes, 'volumes') + request.cache_resource(db_volume_types, 'types') + Controller 2: + db_volumes = request.cached_resource('volumes') + db_type_1 = request.cached_resource_by_id('1', 'types') + + If no name is given, a default name will be used for the resource. + + An instance of this class only lives for the lifetime of a + single API request, so there's no need to implement full + cache management. + """ + if not isinstance(resource_to_cache, list): + resource_to_cache = [resource_to_cache] + if not name: + name = self.path + cached_resources = self._resource_cache.setdefault(name, {}) + for resource in resource_to_cache: + cached_resources[resource[id_attribute]] = resource + + def cached_resource(self, name=None): + """Get the cached resources cached under the given resource name. + + Allow an API extension to get previously stored objects within + the same API request. + + Note that the object data will be slightly stale. + + :returns: a dict of id_attribute to the resource from the cached + resources, an empty map if an empty collection was cached, + or None if nothing has been cached yet under this name + """ + if not name: + name = self.path + if name not in self._resource_cache: + # Nothing has been cached for this key yet + return None + return self._resource_cache[name] + + def cached_resource_by_id(self, resource_id, name=None): + """Get a resource by ID cached under the given resource name. + + Allow an API extension to get a previously stored object + within the same API request. This is basically a convenience method + to lookup by ID on the dictionary of all cached resources. + + Note that the object data will be slightly stale. + + :returns: the cached resource or None if the item is not in the cache + """ + resources = self.cached_resource(name) + if not resources: + # Nothing has been cached yet for this key yet + return None + return resources.get(resource_id) + def best_match_content_type(self): """Determine the requested response content-type.""" if 'cinder.best_content_type' not in self.environ: @@ -79,7 +155,7 @@ def best_match_content_type(self): content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) self.environ['cinder.best_content_type'] = (content_type or - 'application/json') + 'application/json') return self.environ['cinder.best_content_type'] @@ -89,7 +165,7 @@ def get_content_type(self): Does not do any body introspection, only checks header """ - if not "Content-Type" in self.headers: + if "Content-Type" not in self.headers: return None allowed_types = SUPPORTED_CONTENT_TYPES @@ -100,6 +176,17 @@ def get_content_type(self): return content_type + def best_match_language(self): + """Determines best available locale from the Accept-Language header. + + :returns: the best language match or None if the 'Accept-Language' + header was not available in the request. + """ + if not self.accept_language: + return None + all_languages = gettextutils.get_available_languages('cinder') + return self.accept_language.best_match(all_languages) + class ActionDispatcher(object): """Maps method name to local methods through action name.""" @@ -115,7 +202,7 @@ def default(self, data): class TextDeserializer(ActionDispatcher): - """Default request body deserialization""" + """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) @@ -140,7 +227,8 @@ def default(self, datastring): class XMLDeserializer(TextDeserializer): def __init__(self, metadata=None): - """ + """Initialize XMLDeserializer. + :param metadata: information needed to deserialize xml into a dictionary. """ @@ -151,7 +239,7 @@ def _from_xml(self, datastring): plurals = set(self.metadata.get('plurals', {})) try: - node = minidom.parseString(datastring).childNodes[0] + node = utils.safe_minidom_parse_string(datastring).childNodes[0] return {node.nodeName: self._from_xml_node(node, plurals)} except expat.ExpatError: msg = _("cannot understand XML") @@ -178,29 +266,40 @@ def _from_xml_node(self, node, listnames): listnames) return result + def find_first_child_named_in_namespace(self, parent, namespace, name): + """Search a nodes children for the first child with a given name.""" + for node in parent.childNodes: + if (node.localName == name and + node.namespaceURI and + node.namespaceURI == namespace): + return node + return None + def find_first_child_named(self, parent, name): - """Search a nodes children for the first child with a given name""" + """Search a nodes children for the first child with a given name.""" for node in parent.childNodes: if node.nodeName == name: return node return None def find_children_named(self, parent, name): - """Return all of a nodes children who have the given name""" + """Return all of a nodes children who have the given name.""" for node in parent.childNodes: if node.nodeName == name: yield node def extract_text(self, node): - """Get the text field contained by the given node""" - if len(node.childNodes) == 1: - child = node.childNodes[0] + """Get the text field contained by the given node.""" + text = [] + # Cannot assume entire text will be in a single child node because SAX + # parsers may split contiguous character data into multiple chunks + for child in node.childNodes: if child.nodeType == child.TEXT_NODE: - return child.nodeValue - return "" + text.append(child.nodeValue) + return ''.join(text) def find_attribute_or_element(self, parent, name): - """Get an attribute value; fallback to an element if not found""" + """Get an attribute value; fallback to an element if not found.""" if parent.hasAttribute(name): return parent.getAttribute(name) @@ -217,7 +316,7 @@ def default(self, datastring): class MetadataXMLDeserializer(XMLDeserializer): def extract_metadata(self, metadata_node): - """Marshal the metadata attribute of a parsed request""" + """Marshal the metadata attribute of a parsed request.""" metadata = {} if metadata_node is not None: for meta_node in self.find_children_named(metadata_node, "meta"): @@ -227,7 +326,7 @@ def extract_metadata(self, metadata_node): class DictSerializer(ActionDispatcher): - """Default request body serialization""" + """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) @@ -237,7 +336,7 @@ def default(self, data): class JSONDictSerializer(DictSerializer): - """Default JSON request body serialization""" + """Default JSON request body serialization.""" def default(self, data): return jsonutils.dumps(data) @@ -246,7 +345,8 @@ def default(self, data): class XMLDictSerializer(DictSerializer): def __init__(self, metadata=None, xmlns=None): - """ + """Initialize XMLDictSerializer. + :param metadata: information needed to deserialize xml into a dictionary. :param xmlns: XML namespace to include with serialized xml @@ -548,7 +648,7 @@ def action_peek_json(body): def action_peek_xml(body): """Determine action to invoke.""" - dom = minidom.parseString(body) + dom = utils.safe_minidom_parse_string(body) action_node = dom.childNodes[0] return action_node.tagName @@ -570,15 +670,15 @@ def __exit__(self, ex_type, ex_value, ex_traceback): return True if isinstance(ex_value, exception.NotAuthorized): - msg = unicode(ex_value) - raise Fault(webob.exc.HTTPForbidden(explanation=msg)) + raise Fault(webob.exc.HTTPForbidden(explanation=ex_value.msg)) elif isinstance(ex_value, exception.Invalid): raise Fault(exception.ConvertedException( - code=ex_value.code, explanation=unicode(ex_value))) + code=ex_value.code, explanation=ex_value.msg)) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) - LOG.error(_('Exception handling resource: %s') % ex_value, - exc_info=exc_info) + LOG.error(_( + 'Exception handling resource: %s') % + ex_value, exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): LOG.info(_("Fault thrown: %s"), unicode(ex_value)) @@ -608,7 +708,8 @@ class Resource(wsgi.Application): """ def __init__(self, controller, action_peek=None, **deserializers): - """ + """Initialize Resource. + :param controller: object that implement methods created by routes lib :param action_peek: dictionary of routines for peeking into an action request body to determine the desired action @@ -689,6 +790,11 @@ def get_action_args(self, request_environment): return args def get_body(self, request): + + if len(request.body) == 0: + LOG.debug(_("Empty body provided in request")) + return None, '' + try: content_type = request.get_content_type() except exception.InvalidContentType: @@ -699,10 +805,6 @@ def get_body(self, request): LOG.debug(_("No Content-Type provided in request")) return None, '' - if len(request.body) <= 0: - LOG.debug(_("Empty body provided in request")) - return None, '' - return content_type, request.body def deserialize(self, meth, content_type, body): @@ -882,7 +984,7 @@ def _process_stack(self, request, action, action_args, try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)d") % msg_dict - except AttributeError, e: + except AttributeError as e: msg_dict = dict(url=request.url, e=e) msg = _("%(url)s returned a fault: %(e)s") % msg_dict @@ -901,7 +1003,7 @@ def get_method(self, request, action, content_type, body): meth = getattr(self.controller, action) except AttributeError: if (not self.wsgi_actions or - action not in ['action', 'create', 'delete']): + action not in ['action', 'create', 'delete']): # Propagate the error raise else: @@ -911,6 +1013,7 @@ def get_method(self, request, action, content_type, body): # OK, it's an action; figure out which action... mtype = _MEDIA_TYPE_MAP.get(content_type) action_name = self.action_peek[mtype](body) + LOG.debug("Action body: %s" % body) else: action_name = action @@ -1000,11 +1103,10 @@ def __new__(mcs, name, bases, cls_dict): cls_dict) +@six.add_metaclass(ControllerMetaclass) class Controller(object): """Default controller.""" - __metaclass__ = ControllerMetaclass - _view_builder_class = None def __init__(self, view_builder=None): @@ -1037,17 +1139,16 @@ def is_dict(d): class Fault(webob.exc.HTTPException): """Wrap webob.exc.HTTPException to provide API friendly response.""" - _fault_names = { - 400: "badRequest", - 401: "unauthorized", - 403: "forbidden", - 404: "itemNotFound", - 405: "badMethod", - 409: "conflictingRequest", - 413: "overLimit", - 415: "badMediaType", - 501: "notImplemented", - 503: "serviceUnavailable"} + _fault_names = {400: "badRequest", + 401: "unauthorized", + 403: "forbidden", + 404: "itemNotFound", + 405: "badMethod", + 409: "conflictingRequest", + 413: "overLimit", + 415: "badMediaType", + 501: "notImplemented", + 503: "serviceUnavailable"} def __init__(self, exception): """Create a Fault for the given webob.exc.exception.""" @@ -1058,15 +1159,18 @@ def __init__(self, exception): def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. + locale = req.best_match_language() code = self.wrapped_exc.status_int fault_name = self._fault_names.get(code, "computeFault") + explanation = self.wrapped_exc.explanation fault_data = { fault_name: { 'code': code, - 'message': self.wrapped_exc.explanation}} + 'message': gettextutils.translate(explanation, locale)}} if code == 413: - retry = self.wrapped_exc.headers['Retry-After'] - fault_data[fault_name]['retryAfter'] = retry + retry = self.wrapped_exc.headers.get('Retry-After', None) + if retry: + fault_data[fault_name]['retryAfter'] = retry # 'code' is an attribute on the fault tag itself metadata = {'attributes': {fault_name: 'code'}} @@ -1096,14 +1200,10 @@ def _set_request_id_header(req, headers): class OverLimitFault(webob.exc.HTTPException): - """ - Rate-limited request response. - """ + """Rate-limited request response.""" def __init__(self, message, details, retry_time): - """ - Initialize new `OverLimitFault` with relevant information. - """ + """Initialize new `OverLimitFault` with relevant information.""" hdrs = OverLimitFault._retry_after(retry_time) self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) self.content = { @@ -1123,13 +1223,19 @@ def _retry_after(retry_time): @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): - """ - Return the wrapped exception with a serialized body conforming to our - error format. - """ + """Serializes the wrapped exception conforming to our error format.""" content_type = request.best_match_content_type() metadata = {"attributes": {"overLimitFault": "code"}} + def translate(msg): + locale = request.best_match_language() + return gettextutils.translate(msg, locale) + + self.content['overLimitFault']['message'] = \ + translate(self.content['overLimitFault']['message']) + self.content['overLimitFault']['details'] = \ + translate(self.content['overLimitFault']['details']) + xml_serializer = XMLDictSerializer(metadata, XMLNS_V1) serializer = { 'application/xml': xml_serializer, diff --git a/cinder/api/openstack/volume/schemas/atom-link.rng b/cinder/api/schemas/atom-link.rng similarity index 100% rename from cinder/api/openstack/volume/schemas/atom-link.rng rename to cinder/api/schemas/atom-link.rng diff --git a/cinder/api/openstack/volume/schemas/v1.1/extension.rng b/cinder/api/schemas/v1.1/extension.rng similarity index 100% rename from cinder/api/openstack/volume/schemas/v1.1/extension.rng rename to cinder/api/schemas/v1.1/extension.rng diff --git a/cinder/api/openstack/volume/schemas/v1.1/extensions.rng b/cinder/api/schemas/v1.1/extensions.rng similarity index 100% rename from cinder/api/openstack/volume/schemas/v1.1/extensions.rng rename to cinder/api/schemas/v1.1/extensions.rng diff --git a/cinder/api/openstack/volume/schemas/v1.1/limits.rng b/cinder/api/schemas/v1.1/limits.rng similarity index 100% rename from cinder/api/openstack/volume/schemas/v1.1/limits.rng rename to cinder/api/schemas/v1.1/limits.rng diff --git a/cinder/api/openstack/volume/schemas/v1.1/metadata.rng b/cinder/api/schemas/v1.1/metadata.rng similarity index 100% rename from cinder/api/openstack/volume/schemas/v1.1/metadata.rng rename to cinder/api/schemas/v1.1/metadata.rng diff --git a/cinder/api/sizelimit.py b/cinder/api/sizelimit.py index 2479912628..1f7359ff0a 100644 --- a/cinder/api/sizelimit.py +++ b/cinder/api/sizelimit.py @@ -1,6 +1,6 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 OpenStack, LLC +# Copyright (c) 2013 OpenStack Foundation +# +# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,41 +13,16 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Request Body limiting middleware. - -""" - -import webob.dec -import webob.exc -from cinder import flags -from cinder.openstack.common import cfg +from cinder.api.middleware import sizelimit from cinder.openstack.common import log as logging -from cinder import wsgi - -#default request size is 112k -max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', - default=114688, - help='Max size for body of a request') - -FLAGS = flags.FLAGS -FLAGS.register_opt(max_request_body_size_opt) LOG = logging.getLogger(__name__) -class RequestBodySizeLimiter(wsgi.Middleware): - """Add a 'cinder.context' to WSGI environ.""" - +class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter): def __init__(self, *args, **kwargs): + LOG.warn(_('cinder.api.sizelimit:RequestBodySizeLimiter is ' + 'deprecated. Please use cinder.api.middleware.sizelimit:' + 'RequestBodySizeLimiter instead')) super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - if (req.content_length > FLAGS.osapi_max_request_body_size - or len(req.body) > FLAGS.osapi_max_request_body_size): - msg = _("Request is too large.") - raise webob.exc.HTTPBadRequest(explanation=msg) - else: - return self.application diff --git a/cinder/api/urlmap.py b/cinder/api/urlmap.py new file mode 100644 index 0000000000..e3a9ee5fb3 --- /dev/null +++ b/cinder/api/urlmap.py @@ -0,0 +1,295 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import paste.urlmap +import re +import urllib2 + +from cinder.api.openstack import wsgi +from cinder.openstack.common import log as logging + + +_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' +_option_header_piece_re = re.compile( + r';\s*([^\s;=]+|%s)\s*' + r'(?:=\s*([^;]+|%s))?\s*' % + (_quoted_string_re, _quoted_string_re)) + +LOG = logging.getLogger(__name__) + + +def unquote_header_value(value): + """Unquotes a header value. + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + return value + + +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + :param value: a string with a list header. + :return: :class:`list` + """ + result = [] + for item in urllib2.parse_http_list(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +def parse_options_header(value): + """Parse a ``Content-Type`` like header into a tuple with the content + type and the options: + + >>> parse_options_header('Content-Type: text/html; mimetype=text/html') + ('Content-Type:', {'mimetype': 'text/html'}) + + :param value: the header to parse. + :return: (str, options) + """ + def _tokenize(string): + for match in _option_header_piece_re.finditer(string): + key, value = match.groups() + key = unquote_header_value(key) + if value is not None: + value = unquote_header_value(value) + yield key, value + + if not value: + return '', {} + + parts = _tokenize(';' + value) + name = parts.next()[0] + extra = dict(parts) + return name, extra + + +class Accept(object): + def __init__(self, value): + self._content_types = [parse_options_header(v) for v in + parse_list_header(value)] + + def best_match(self, supported_content_types): + # FIXME: Should we have a more sophisticated matching algorithm that + # takes into account the version as well? + best_quality = -1 + best_content_type = None + best_params = {} + best_match = '*/*' + + for content_type in supported_content_types: + for content_mask, params in self._content_types: + try: + quality = float(params.get('q', 1)) + except ValueError: + continue + + if quality < best_quality: + continue + elif best_quality == quality: + if best_match.count('*') <= content_mask.count('*'): + continue + + if self._match_mask(content_mask, content_type): + best_quality = quality + best_content_type = content_type + best_params = params + best_match = content_mask + + return best_content_type, best_params + + def content_type_params(self, best_content_type): + """Find parameters in Accept header for given content type.""" + for content_type, params in self._content_types: + if best_content_type == content_type: + return params + + return {} + + def _match_mask(self, mask, content_type): + if '*' not in mask: + return content_type == mask + if mask == '*/*': + return True + mask_major = mask[:-2] + content_type_major = content_type.split('/', 1)[0] + return content_type_major == mask_major + + +def urlmap_factory(loader, global_conf, **local_conf): + if 'not_found_app' in local_conf: + not_found_app = local_conf.pop('not_found_app') + else: + not_found_app = global_conf.get('not_found_app') + if not_found_app: + not_found_app = loader.get_app(not_found_app, global_conf=global_conf) + urlmap = URLMap(not_found_app=not_found_app) + for path, app_name in local_conf.items(): + path = paste.urlmap.parse_path_expression(path) + app = loader.get_app(app_name, global_conf=global_conf) + urlmap[path] = app + return urlmap + + +class URLMap(paste.urlmap.URLMap): + def _match(self, host, port, path_info): + """Find longest match for a given URL path.""" + for (domain, app_url), app in self.applications: + if domain and domain != host and domain != host + ':' + port: + continue + if (path_info == app_url or path_info.startswith(app_url + '/')): + return app, app_url + + return None, None + + def _set_script_name(self, app, app_url): + def wrap(environ, start_response): + environ['SCRIPT_NAME'] += app_url + return app(environ, start_response) + + return wrap + + def _munge_path(self, app, path_info, app_url): + def wrap(environ, start_response): + environ['SCRIPT_NAME'] += app_url + environ['PATH_INFO'] = path_info[len(app_url):] + return app(environ, start_response) + + return wrap + + def _path_strategy(self, host, port, path_info): + """Check path suffix for MIME type and path prefix for API version.""" + mime_type = app = app_url = None + + parts = path_info.rsplit('.', 1) + if len(parts) > 1: + possible_type = 'application/' + parts[1] + if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: + mime_type = possible_type + + parts = path_info.split('/') + if len(parts) > 1: + possible_app, possible_app_url = self._match(host, port, path_info) + # Don't use prefix if it ends up matching default + if possible_app and possible_app_url: + app_url = possible_app_url + app = self._munge_path(possible_app, path_info, app_url) + + return mime_type, app, app_url + + def _content_type_strategy(self, host, port, environ): + """Check Content-Type header for API version.""" + app = None + params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] + if 'version' in params: + app, app_url = self._match(host, port, '/v' + params['version']) + if app: + app = self._set_script_name(app, app_url) + + return app + + def _accept_strategy(self, host, port, environ, supported_content_types): + """Check Accept header for best matching MIME type and API version.""" + accept = Accept(environ.get('HTTP_ACCEPT', '')) + + app = None + + # Find the best match in the Accept header + mime_type, params = accept.best_match(supported_content_types) + if 'version' in params: + app, app_url = self._match(host, port, '/v' + params['version']) + if app: + app = self._set_script_name(app, app_url) + + return mime_type, app + + def __call__(self, environ, start_response): + host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() + if ':' in host: + host, port = host.split(':', 1) + else: + if environ['wsgi.url_scheme'] == 'http': + port = '80' + else: + port = '443' + + path_info = environ['PATH_INFO'] + path_info = self.normalize_url(path_info, False)[1] + + # The MIME type for the response is determined in one of two ways: + # 1) URL path suffix (eg /servers/detail.json) + # 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2) + + # The API version is determined in one of three ways: + # 1) URL path prefix (eg /v1.1/tenant/servers/detail) + # 2) Content-Type header (eg application/json;version=1.1) + # 3) Accept header (eg application/json;q=0.8;version=1.1) + + supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) + + mime_type, app, app_url = self._path_strategy(host, port, path_info) + + # Accept application/atom+xml for the index query of each API + # version mount point as well as the root index + if (app_url and app_url + '/' == path_info) or path_info == '/': + supported_content_types.append('application/atom+xml') + + if not app: + app = self._content_type_strategy(host, port, environ) + + if not mime_type or not app: + possible_mime_type, possible_app = self._accept_strategy( + host, port, environ, supported_content_types) + if possible_mime_type and not mime_type: + mime_type = possible_mime_type + if possible_app and not app: + app = possible_app + + if not mime_type: + mime_type = 'application/json' + + if not app: + # Didn't match a particular version, probably matches default + app, app_url = self._match(host, port, path_info) + if app: + app = self._munge_path(app, path_info, app_url) + + if app: + environ['cinder.best_content_type'] = mime_type + return app(environ, start_response) + + environ['paste.urlmap_object'] = self + return self.not_found_application(environ, start_response) diff --git a/doc/source/_static/.gitignore b/cinder/api/v1/__init__.py similarity index 100% rename from doc/source/_static/.gitignore rename to cinder/api/v1/__init__.py diff --git a/cinder/api/openstack/volume/limits.py b/cinder/api/v1/limits.py similarity index 86% rename from cinder/api/openstack/volume/limits.py rename to cinder/api/v1/limits.py index a9e62dde56..de70243f98 100644 --- a/cinder/api/openstack/volume/limits.py +++ b/cinder/api/v1/limits.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -27,15 +27,17 @@ import webob.dec import webob.exc -from cinder.api.openstack.volume.views import limits as limits_views from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil +from cinder.api.views import limits as limits_views +from cinder.api import xmlutil +from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import importutils from cinder.openstack.common import jsonutils from cinder import quota from cinder import wsgi as base_wsgi QUOTAS = quota.QUOTAS +LIMITS_PREFIX = "limits." # Convenience constants for the limits dictionary passed to Limiter(). @@ -73,16 +75,12 @@ def construct(self): return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap) -class LimitsController(object): - """ - Controller for accessing limits in the OpenStack API. - """ +class LimitsController(wsgi.Controller): + """Controller for accessing limits in the OpenStack API.""" @wsgi.serializers(xml=LimitsTemplate) def index(self, req): - """ - Return all global and rate limit information. - """ + """Return all global and rate limit information.""" context = req.environ['cinder.context'] quotas = QUOTAS.get_project_quotas(context, context.project_id, usages=False) @@ -101,9 +99,7 @@ def create_resource(): class Limit(object): - """ - Stores information about a limit for HTTP requests. - """ + """Stores information about a limit for HTTP requests.""" UNITS = { 1: "SECOND", @@ -115,8 +111,7 @@ class Limit(object): UNIT_MAP = dict([(v, k) for k, v in UNITS.items()]) def __init__(self, verb, uri, regex, value, unit): - """ - Initialize a new `Limit`. + """Initialize a new `Limit`. @param verb: HTTP verb (POST, PUT, etc.) @param uri: Human-readable URI @@ -146,8 +141,7 @@ def __init__(self, verb, uri, regex, value, unit): self.error_message = msg % self.__dict__ def __call__(self, verb, url): - """ - Represents a call to this limit from a relevant request. + """Represent a call to this limit from a relevant request. @param verb: string http verb (POST, GET, etc.) @param url: string URL @@ -215,15 +209,15 @@ def display(self): class RateLimitingMiddleware(base_wsgi.Middleware): - """ - Rate-limits requests passing through this middleware. All limit information - is stored in memory for this implementation. + """Rate-limits requests passing through this middleware. + + All limit information is stored in memory for this implementation. """ def __init__(self, application, limits=None, limiter=None, **kwargs): - """ - Initialize new `RateLimitingMiddleware`, which wraps the given WSGI - application and sets up the given limits. + """Initialize new `RateLimitingMiddleware` + + This wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @@ -247,10 +241,10 @@ def __init__(self, application, limits=None, limiter=None, **kwargs): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): - """ - Represents a single call through this middleware. We should record the - request if we have a limit relevant to it. If no limit is relevant to - the request, ignore it. + """Represent a single call through this middleware. + + We should record the request if we have a limit relevant to it. + If no limit is relevant to the request, ignore it. If the request should be rate limited, return a fault telling the user they are over the limit and need to retry later. @@ -277,13 +271,10 @@ def __call__(self, req): class Limiter(object): - """ - Rate-limit checking class which handles limits in memory. - """ + """Rate-limit checking class which handles limits in memory.""" def __init__(self, limits, **kwargs): - """ - Initialize the new `Limiter`. + """Initialize the new `Limiter`. @param limits: List of `Limit` objects """ @@ -292,19 +283,16 @@ def __init__(self, limits, **kwargs): # Pick up any per-user limit information for key, value in kwargs.items(): - if key.startswith('user:'): - username = key[5:] + if key.startswith(LIMITS_PREFIX): + username = key[len(LIMITS_PREFIX):] self.levels[username] = self.parse_limits(value) def get_limits(self, username=None): - """ - Return the limits for a given user. - """ + """Return the limits for a given user.""" return [limit.display() for limit in self.levels[username]] def check_for_delay(self, verb, url, username=None): - """ - Check the given verb/user/user triplet for limit. + """Check the given verb/user/user triplet for limit. @return: Tuple of delay (in seconds) and error message (or None, None) """ @@ -328,9 +316,9 @@ def check_for_delay(self, verb, url, username=None): # default limit parsing. @staticmethod def parse_limits(limits): - """ - Convert a string into a list of Limit instances. This - implementation expects a semicolon-separated sequence of + """Convert a string into a list of Limit instances. + + This implementation expects a semicolon-separated sequence of parenthesized groups, where each group contains a comma-separated sequence consisting of HTTP method, user-readable URI, a URI reg-exp, an integer number of @@ -383,8 +371,9 @@ def parse_limits(limits): class WsgiLimiter(object): - """ - Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. + """Rate-limit checking from a WSGI application. + + Uses an in-memory `Limiter`. To use, POST ``/`` with JSON data such as:: @@ -399,8 +388,7 @@ class WsgiLimiter(object): """ def __init__(self, limits=None): - """ - Initialize the new `WsgiLimiter`. + """Initialize the new `WsgiLimiter`. @param limits: List of `Limit` objects """ @@ -408,10 +396,11 @@ def __init__(self, limits=None): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, request): - """ - Handles a call to this application. Returns 204 if the request is - acceptable to the limiter, else a 403 is returned with a relevant - header indicating when the request *will* succeed. + """Handles a call to this application. + + Returns 204 if the request is acceptable to the limiter, else a 403 + is returned with a relevant header indicating when the request + *will* succeed. """ if request.method != "POST": raise webob.exc.HTTPMethodNotAllowed() @@ -435,13 +424,10 @@ def __call__(self, request): class WsgiLimiterProxy(object): - """ - Rate-limit requests based on answers from a remote source. - """ + """Rate-limit requests based on answers from a remote source.""" def __init__(self, limiter_address): - """ - Initialize the new `WsgiLimiterProxy`. + """Initialize the new `WsgiLimiterProxy`. @param limiter_address: IP/port combination of where to request limit """ @@ -472,9 +458,7 @@ def check_for_delay(self, verb, path, username=None): # decisions are made by a remote server. @staticmethod def parse_limits(limits): - """ - Ignore a limits string--simply doesn't apply for the limit - proxy. + """Ignore a limits string--simply doesn't apply for the limit proxy. @return: Empty list. """ diff --git a/cinder/api/v1/router.py b/cinder/api/v1/router.py new file mode 100644 index 0000000000..12259ec005 --- /dev/null +++ b/cinder/api/v1/router.py @@ -0,0 +1,96 @@ +# Copyright 2011 OpenStack Foundation +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack Volume API. +""" + +from cinder.api import extensions +import cinder.api.openstack +from cinder.api.v1 import limits +from cinder.api.v1 import snapshot_metadata +from cinder.api.v1 import snapshots +from cinder.api.v1 import types +from cinder.api.v1 import volume_metadata +from cinder.api.v1 import volumes +from cinder.api import versions +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class APIRouter(cinder.api.openstack.APIRouter): + """Routes requests on the API to the appropriate controller and method.""" + ExtensionManager = extensions.ExtensionManager + + def _setup_routes(self, mapper, ext_mgr): + self.resources['versions'] = versions.create_resource() + mapper.connect("versions", "/", + controller=self.resources['versions'], + action='show') + + mapper.redirect("", "/") + + self.resources['volumes'] = volumes.create_resource(ext_mgr) + mapper.resource("volume", "volumes", + controller=self.resources['volumes'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['types'] = types.create_resource() + mapper.resource("type", "types", + controller=self.resources['types']) + + self.resources['snapshots'] = snapshots.create_resource(ext_mgr) + mapper.resource("snapshot", "snapshots", + controller=self.resources['snapshots'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['snapshot_metadata'] = \ + snapshot_metadata.create_resource() + snapshot_metadata_controller = self.resources['snapshot_metadata'] + + mapper.resource("snapshot_metadata", "metadata", + controller=snapshot_metadata_controller, + parent_resource=dict(member_name='snapshot', + collection_name='snapshots')) + + mapper.connect("metadata", + "/{project_id}/snapshots/{snapshot_id}/metadata", + controller=snapshot_metadata_controller, + action='update_all', + conditions={"method": ['PUT']}) + + self.resources['limits'] = limits.create_resource() + mapper.resource("limit", "limits", + controller=self.resources['limits']) + self.resources['volume_metadata'] = \ + volume_metadata.create_resource() + volume_metadata_controller = self.resources['volume_metadata'] + + mapper.resource("volume_metadata", "metadata", + controller=volume_metadata_controller, + parent_resource=dict(member_name='volume', + collection_name='volumes')) + + mapper.connect("metadata", + "/{project_id}/volumes/{volume_id}/metadata", + controller=volume_metadata_controller, + action='update_all', + conditions={"method": ['PUT']}) diff --git a/cinder/api/v1/snapshot_metadata.py b/cinder/api/v1/snapshot_metadata.py new file mode 100644 index 0000000000..b2df8b68f3 --- /dev/null +++ b/cinder/api/v1/snapshot_metadata.py @@ -0,0 +1,162 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import volume +from webob import exc + + +class Controller(wsgi.Controller): + """The snapshot metadata API controller for the OpenStack API.""" + + def __init__(self): + self.volume_api = volume.API() + super(Controller, self).__init__() + + def _get_metadata(self, context, snapshot_id): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + meta = self.volume_api.get_snapshot_metadata(context, snapshot) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return meta + + @wsgi.serializers(xml=common.MetadataTemplate) + def index(self, req, snapshot_id): + """Returns the list of metadata for a given snapshot.""" + context = req.environ['cinder.context'] + return {'metadata': self._get_metadata(context, snapshot_id)} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def create(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (KeyError, TypeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + context = req.environ['cinder.context'] + + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=False) + + return {'metadata': new_metadata} + + @wsgi.serializers(xml=common.MetaItemTemplate) + @wsgi.deserializers(xml=common.MetaItemDeserializer) + def update(self, req, snapshot_id, id, body): + try: + meta_item = body['meta'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + if id not in meta_item: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + + if len(meta_item) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + self._update_snapshot_metadata(context, + snapshot_id, + meta_item, + delete=False) + + return {'meta': meta_item} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def update_all(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=True) + + return {'metadata': new_metadata} + + def _update_snapshot_metadata(self, context, + snapshot_id, metadata, + delete=False): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + return self.volume_api.update_snapshot_metadata(context, + snapshot, + metadata, + delete) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + + except (ValueError, AttributeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + except exception.InvalidVolumeMetadata as error: + raise exc.HTTPBadRequest(explanation=error.msg) + + except exception.InvalidVolumeMetadataSize as error: + raise exc.HTTPRequestEntityTooLarge(explanation=error.msg) + + @wsgi.serializers(xml=common.MetaItemTemplate) + def show(self, req, snapshot_id, id): + """Return a single metadata item.""" + context = req.environ['cinder.context'] + data = self._get_metadata(context, snapshot_id) + + try: + return {'meta': {id: data[id]}} + except KeyError: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + def delete(self, req, snapshot_id, id): + """Deletes an existing metadata.""" + context = req.environ['cinder.context'] + + metadata = self._get_metadata(context, snapshot_id) + + if id not in metadata: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + self.volume_api.delete_snapshot_metadata(context, snapshot, id) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return webob.Response(status_int=200) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/cinder/api/openstack/volume/snapshots.py b/cinder/api/v1/snapshots.py similarity index 78% rename from cinder/api/openstack/volume/snapshots.py rename to cinder/api/v1/snapshots.py index 59d26a6e05..1e44111434 100644 --- a/cinder/api/openstack/volume/snapshots.py +++ b/cinder/api/v1/snapshots.py @@ -15,16 +15,16 @@ """The volumes snapshots api.""" -from webob import exc import webob +from webob import exc -from cinder.api.openstack import common +from cinder.api import common from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil -from cinder.api.openstack.volume import volumes +from cinder.api.v1 import volumes +from cinder.api import xmlutil from cinder import exception -from cinder import flags from cinder.openstack.common import log as logging +from cinder.openstack.common import strutils from cinder import utils from cinder import volume @@ -32,9 +32,6 @@ LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS - - def _translate_snapshot_detail_view(context, snapshot): """Maps keys for snapshots details view.""" @@ -56,6 +53,15 @@ def _translate_snapshot_summary_view(context, snapshot): d['status'] = snapshot['status'] d['size'] = snapshot['volume_size'] + if snapshot.get('snapshot_metadata'): + metadata = snapshot.get('snapshot_metadata') + d['metadata'] = dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif snapshot.get('metadata') and isinstance(snapshot.get('metadata'), + dict): + d['metadata'] = snapshot['metadata'] + else: + d['metadata'] = {} return d @@ -67,6 +73,7 @@ def make_snapshot(elem): elem.set('display_name') elem.set('display_description') elem.set('volume_id') + elem.append(common.MetadataTemplate()) class SnapshotTemplate(xmlutil.TemplateBuilder): @@ -132,8 +139,12 @@ def _items(self, req, entity_maker): """Returns a list of snapshots, transformed through entity_maker.""" context = req.environ['cinder.context'] - search_opts = {} - search_opts.update(req.GET) + #pop out limit and offset , they are not search_opts + search_opts = req.GET.copy() + search_opts.pop('limit', None) + search_opts.pop('offset', None) + + #filter out invalid option allowed_search_options = ('status', 'volume_id', 'display_name') volumes.remove_invalid_options(context, search_opts, allowed_search_options) @@ -147,14 +158,26 @@ def _items(self, req, entity_maker): @wsgi.serializers(xml=SnapshotTemplate) def create(self, req, body): """Creates a new snapshot.""" + kwargs = {} context = req.environ['cinder.context'] if not self.is_valid_body(body, 'snapshot'): raise exc.HTTPUnprocessableEntity() snapshot = body['snapshot'] - volume_id = snapshot['volume_id'] - volume = self.volume_api.get(context, volume_id) + kwargs['metadata'] = snapshot.get('metadata', None) + + try: + volume_id = snapshot['volume_id'] + except KeyError: + msg = _("'volume_id' must be specified") + raise exc.HTTPBadRequest(explanation=msg) + + try: + volume = self.volume_api.get(context, volume_id) + except exception.NotFound: + raise exc.HTTPNotFound() + force = snapshot.get('force', False) msg = _("Create snapshot from volume %s") LOG.audit(msg, volume_id, context=context) @@ -163,16 +186,20 @@ def create(self, req, body): msg = _("Invalid value '%s' for force. ") % force raise exception.InvalidParameterValue(err=msg) - if utils.bool_from_str(force): - new_snapshot = self.volume_api.create_snapshot_force(context, - volume, - snapshot.get('display_name'), - snapshot.get('display_description')) + if strutils.bool_from_string(force): + new_snapshot = self.volume_api.create_snapshot_force( + context, + volume, + snapshot.get('display_name'), + snapshot.get('display_description'), + **kwargs) else: - new_snapshot = self.volume_api.create_snapshot(context, - volume, - snapshot.get('display_name'), - snapshot.get('display_description')) + new_snapshot = self.volume_api.create_snapshot( + context, + volume, + snapshot.get('display_name'), + snapshot.get('display_description'), + **kwargs) retval = _translate_snapshot_detail_view(context, new_snapshot) @@ -186,7 +213,7 @@ def update(self, req, id, body): if not body: raise exc.HTTPUnprocessableEntity() - if not 'snapshot' in body: + if 'snapshot' not in body: raise exc.HTTPUnprocessableEntity() snapshot = body['snapshot'] diff --git a/cinder/api/openstack/volume/types.py b/cinder/api/v1/types.py similarity index 85% rename from cinder/api/openstack/volume/types.py rename to cinder/api/v1/types.py index 2a9bd364a1..aa1256b33e 100644 --- a/cinder/api/openstack/volume/types.py +++ b/cinder/api/v1/types.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. +# Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -15,13 +13,13 @@ # License for the specific language governing permissions and limitations # under the License. -""" The volume type & volume types extra specs extension""" +"""The volume type & volume types extra specs extension.""" from webob import exc from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil -from cinder.api.openstack.volume.views import types as views_types +from cinder.api.views import types as views_types +from cinder.api import xmlutil from cinder import exception from cinder.volume import volume_types @@ -50,20 +48,20 @@ def construct(self): class VolumeTypesController(wsgi.Controller): - """ The volume types API controller for the OpenStack API """ + """The volume types API controller for the OpenStack API.""" _view_builder_class = views_types.ViewBuilder @wsgi.serializers(xml=VolumeTypesTemplate) def index(self, req): - """ Returns the list of volume types """ + """Returns the list of volume types.""" context = req.environ['cinder.context'] vol_types = volume_types.get_all_types(context).values() return self._view_builder.index(req, vol_types) @wsgi.serializers(xml=VolumeTypeTemplate) def show(self, req, id): - """ Return a single volume type item """ + """Return a single volume type item.""" context = req.environ['cinder.context'] try: diff --git a/cinder/api/v1/volume_metadata.py b/cinder/api/v1/volume_metadata.py new file mode 100644 index 0000000000..1b916d5675 --- /dev/null +++ b/cinder/api/v1/volume_metadata.py @@ -0,0 +1,162 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import volume +from webob import exc + + +class Controller(wsgi.Controller): + """The volume metadata API controller for the OpenStack API.""" + + def __init__(self): + self.volume_api = volume.API() + super(Controller, self).__init__() + + def _get_metadata(self, context, volume_id): + try: + volume = self.volume_api.get(context, volume_id) + meta = self.volume_api.get_volume_metadata(context, volume) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise exc.HTTPNotFound(explanation=msg) + return meta + + @wsgi.serializers(xml=common.MetadataTemplate) + def index(self, req, volume_id): + """Returns the list of metadata for a given volume.""" + context = req.environ['cinder.context'] + return {'metadata': self._get_metadata(context, volume_id)} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def create(self, req, volume_id, body): + try: + metadata = body['metadata'] + except (KeyError, TypeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + context = req.environ['cinder.context'] + + new_metadata = self._update_volume_metadata(context, + volume_id, + metadata, + delete=False) + + return {'metadata': new_metadata} + + @wsgi.serializers(xml=common.MetaItemTemplate) + @wsgi.deserializers(xml=common.MetaItemDeserializer) + def update(self, req, volume_id, id, body): + try: + meta_item = body['meta'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + if id not in meta_item: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + + if len(meta_item) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + self._update_volume_metadata(context, + volume_id, + meta_item, + delete=False) + + return {'meta': meta_item} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def update_all(self, req, volume_id, body): + try: + metadata = body['metadata'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + new_metadata = self._update_volume_metadata(context, + volume_id, + metadata, + delete=True) + + return {'metadata': new_metadata} + + def _update_volume_metadata(self, context, + volume_id, metadata, + delete=False): + try: + volume = self.volume_api.get(context, volume_id) + return self.volume_api.update_volume_metadata(context, + volume, + metadata, + delete) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise exc.HTTPNotFound(explanation=msg) + + except (ValueError, AttributeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + except exception.InvalidVolumeMetadata as error: + raise exc.HTTPBadRequest(explanation=error.msg) + + except exception.InvalidVolumeMetadataSize as error: + raise exc.HTTPRequestEntityTooLarge(explanation=error.msg) + + @wsgi.serializers(xml=common.MetaItemTemplate) + def show(self, req, volume_id, id): + """Return a single metadata item.""" + context = req.environ['cinder.context'] + data = self._get_metadata(context, volume_id) + + try: + return {'meta': {id: data[id]}} + except KeyError: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + def delete(self, req, volume_id, id): + """Deletes an existing metadata.""" + context = req.environ['cinder.context'] + + metadata = self._get_metadata(context, volume_id) + + if id not in metadata: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + try: + volume = self.volume_api.get(context, volume_id) + self.volume_api.delete_volume_metadata(context, volume, id) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise exc.HTTPNotFound(explanation=msg) + return webob.Response(status_int=200) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/cinder/api/openstack/volume/volumes.py b/cinder/api/v1/volumes.py similarity index 65% rename from cinder/api/openstack/volume/volumes.py rename to cinder/api/v1/volumes.py index 69d55c6bd8..8364deb03f 100644 --- a/cinder/api/openstack/volume/volumes.py +++ b/cinder/api/v1/volumes.py @@ -15,27 +15,24 @@ """The volumes api.""" -from webob import exc +import ast import webob -from xml.dom import minidom +from webob import exc -from cinder.api.openstack import common +from cinder.api import common from cinder.api.openstack import wsgi -from cinder.api.openstack import xmlutil +from cinder.api import xmlutil from cinder import exception -from cinder import flags from cinder.openstack.common import log as logging +from cinder.openstack.common import uuidutils from cinder import utils -from cinder import volume +from cinder import volume as cinder_volume from cinder.volume import volume_types LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS - - def _translate_attachment_detail_view(_context, vol): """Maps keys for attachment details view.""" @@ -57,6 +54,7 @@ def _translate_attachment_summary_view(_context, vol): d['volume_id'] = volume_id d['server_id'] = vol['instance_uuid'] + d['host_name'] = vol['attached_host'] if vol.get('mountpoint'): d['device'] = vol['mountpoint'] @@ -83,6 +81,13 @@ def _translate_volume_summary_view(context, vol, image_id=None): d['availability_zone'] = vol['availability_zone'] d['created_at'] = vol['created_at'] + # Need to form the string true/false explicitly here to + # maintain our API contract + if vol['bootable']: + d['bootable'] = 'true' + else: + d['bootable'] = 'false' + d['attachments'] = [] if vol['attach_status'] == 'attached': attachment = _translate_attachment_detail_view(context, vol) @@ -98,6 +103,7 @@ def _translate_volume_summary_view(context, vol, image_id=None): d['volume_type'] = str(vol['volume_type_id']) d['snapshot_id'] = vol['snapshot_id'] + d['source_volid'] = vol['source_volid'] if image_id: d['image_id'] = image_id @@ -107,6 +113,9 @@ def _translate_volume_summary_view(context, vol, image_id=None): if vol.get('volume_metadata'): metadata = vol.get('volume_metadata') d['metadata'] = dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif vol.get('metadata') and isinstance(vol.get('metadata'), dict): + d['metadata'] = vol['metadata'] else: d['metadata'] = {} @@ -116,6 +125,7 @@ def _translate_volume_summary_view(context, vol, image_id=None): def make_attachment(elem): elem.set('id') elem.set('server_id') + elem.set('host_name') elem.set('volume_id') elem.set('device') @@ -127,9 +137,11 @@ def make_volume(elem): elem.set('availability_zone') elem.set('created_at') elem.set('display_name') + elem.set('bootable') elem.set('display_description') elem.set('volume_type') elem.set('snapshot_id') + elem.set('source_volid') attachments = xmlutil.SubTemplateElement(elem, 'attachments') attachment = xmlutil.SubTemplateElement(attachments, 'attachment', @@ -173,7 +185,8 @@ def _extract_volume(self, node): volume_node = self.find_first_child_named(node, 'volume') attributes = ['display_name', 'display_description', 'size', - 'volume_type', 'availability_zone'] + 'volume_type', 'availability_zone', 'imageRef', + 'snapshot_id', 'source_volid'] for attr in attributes: if volume_node.getAttribute(attr): volume[attr] = volume_node.getAttribute(attr) @@ -194,7 +207,7 @@ class CreateDeserializer(CommonDeserializer): def default(self, string): """Deserialize an xml-formatted volume create request.""" - dom = minidom.parseString(string) + dom = utils.safe_minidom_parse_string(string) volume = self._extract_volume(dom) return {'body': {'volume': volume}} @@ -202,11 +215,59 @@ def default(self, string): class VolumeController(wsgi.Controller): """The Volumes API controller for the OpenStack API.""" + _visible_admin_metadata_keys = ['readonly', 'attached_mode'] + def __init__(self, ext_mgr): - self.volume_api = volume.API() + self.volume_api = cinder_volume.API() self.ext_mgr = ext_mgr super(VolumeController, self).__init__() + def _add_visible_admin_metadata(self, context, volume): + if context is None: + return + + visible_admin_meta = {} + + if context.is_admin: + volume_tmp = volume + else: + try: + volume_tmp = self.volume_api.get(context.elevated(), + volume['id']) + except Exception: + return + + if volume_tmp.get('volume_admin_metadata'): + for item in volume_tmp['volume_admin_metadata']: + if item['key'] in self._visible_admin_metadata_keys: + visible_admin_meta[item['key']] = item['value'] + # avoid circular ref when volume is a Volume instance + elif (volume_tmp.get('admin_metadata') and + isinstance(volume_tmp.get('admin_metadata'), dict)): + for key in self._visible_admin_metadata_keys: + if key in volume_tmp['admin_metadata'].keys(): + visible_admin_meta[key] = volume_tmp['admin_metadata'][key] + + if not visible_admin_meta: + return + + # NOTE(zhiyan): update visible administration metadata to + # volume metadata, administration metadata will rewrite existing key. + if volume.get('volume_metadata'): + orig_meta = list(volume.get('volume_metadata')) + for item in orig_meta: + if item['key'] in visible_admin_meta.keys(): + item['value'] = visible_admin_meta.pop(item['key']) + for key, value in visible_admin_meta.iteritems(): + orig_meta.append({'key': key, 'value': value}) + volume['volume_metadata'] = orig_meta + # avoid circular ref when vol is a Volume instance + elif (volume.get('metadata') and + isinstance(volume.get('metadata'), dict)): + volume['metadata'].update(visible_admin_meta) + else: + volume['metadata'] = visible_admin_meta + @wsgi.serializers(xml=VolumeTemplate) def show(self, req, id): """Return data about the given volume.""" @@ -214,9 +275,12 @@ def show(self, req, id): try: vol = self.volume_api.get(context, id) + req.cache_resource(vol) except exception.NotFound: raise exc.HTTPNotFound() + self._add_visible_admin_metadata(context, vol) + return {'volume': _translate_volume_detail_view(context, vol)} def delete(self, req, id): @@ -245,15 +309,29 @@ def detail(self, req): def _items(self, req, entity_maker): """Returns a list of volumes, transformed through entity_maker.""" - search_opts = {} - search_opts.update(req.GET) + #pop out limit and offset , they are not search_opts + search_opts = req.GET.copy() + search_opts.pop('limit', None) + search_opts.pop('offset', None) + + if 'metadata' in search_opts: + search_opts['metadata'] = ast.literal_eval(search_opts['metadata']) context = req.environ['cinder.context'] remove_invalid_options(context, search_opts, self._get_volume_search_options()) - volumes = self.volume_api.get_all(context, search_opts=search_opts) + volumes = self.volume_api.get_all(context, marker=None, limit=None, + sort_key='created_at', + sort_dir='desc', filters=search_opts) + + volumes = [dict(vol.iteritems()) for vol in volumes] + + for volume in volumes: + self._add_visible_admin_metadata(context, volume) + limited_list = common.limited(volumes, req) + req.cache_resource(limited_list) res = [entity_maker(context, vol) for vol in limited_list] return {'volumes': res} @@ -266,7 +344,7 @@ def _image_uuid_from_href(self, image_href): msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) - if not utils.is_uuid_like(image_uuid): + if not uuidutils.is_uuid_like(image_uuid): msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) @@ -279,6 +357,7 @@ def create(self, req, body): if not self.is_valid_body(body, 'volume'): raise exc.HTTPUnprocessableEntity() + LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] volume = body['volume'] @@ -287,33 +366,56 @@ def create(self, req, body): req_volume_type = volume.get('volume_type', None) if req_volume_type: try: - kwargs['volume_type'] = volume_types.get_volume_type_by_name( + if not uuidutils.is_uuid_like(req_volume_type): + kwargs['volume_type'] = \ + volume_types.get_volume_type_by_name( + context, req_volume_type) + else: + kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) - except exception.NotFound: - raise exc.HTTPNotFound() + except exception.VolumeTypeNotFound: + explanation = 'Volume type not found.' + raise exc.HTTPNotFound(explanation=explanation) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: - kwargs['snapshot'] = self.volume_api.get_snapshot(context, - snapshot_id) + try: + kwargs['snapshot'] = self.volume_api.get_snapshot(context, + snapshot_id) + except exception.NotFound: + explanation = _('snapshot id:%s not found') % snapshot_id + raise exc.HTTPNotFound(explanation=explanation) + else: kwargs['snapshot'] = None + source_volid = volume.get('source_volid') + if source_volid is not None: + try: + kwargs['source_volume'] = \ + self.volume_api.get_volume(context, + source_volid) + except exception.NotFound: + explanation = _('source vol id:%s not found') % source_volid + raise exc.HTTPNotFound(explanation=explanation) + else: + kwargs['source_volume'] = None + size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] + elif size is None and kwargs['source_volume'] is not None: + size = kwargs['source_volume']['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) image_href = None image_uuid = None if self.ext_mgr.is_loaded('os-image-create'): + # NOTE(jdg): misleading name "imageRef" as it's an image-id image_href = volume.get('imageRef') - if snapshot_id and image_href: - msg = _("Snapshot and image cannot be specified together.") - raise exc.HTTPBadRequest(explanation=msg) if image_href: image_uuid = self._image_uuid_from_href(image_href) kwargs['image_id'] = image_uuid @@ -329,14 +431,17 @@ def create(self, req, body): # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. - retval = _translate_volume_detail_view(context, dict(new_volume), - image_uuid) + new_volume = dict(new_volume.iteritems()) + + self._add_visible_admin_metadata(context, new_volume) + + retval = _translate_volume_detail_view(context, new_volume, image_uuid) return {'volume': retval} def _get_volume_search_options(self): """Return volume search options allowed by non-admin.""" - return ('display_name', 'status') + return ('display_name', 'status', 'metadata') @wsgi.serializers(xml=VolumeTemplate) def update(self, req, id, body): @@ -346,7 +451,7 @@ def update(self, req, id, body): if not body: raise exc.HTTPUnprocessableEntity() - if not 'volume' in body: + if 'volume' not in body: raise exc.HTTPUnprocessableEntity() volume = body['volume'] @@ -355,6 +460,7 @@ def update(self, req, id, body): valid_update_keys = ( 'display_name', 'display_description', + 'metadata', ) for key in valid_update_keys: @@ -369,6 +475,8 @@ def update(self, req, id, body): volume.update(update_dict) + self._add_visible_admin_metadata(context, volume) + return {'volume': _translate_volume_detail_view(context, volume)} @@ -383,9 +491,10 @@ def remove_invalid_options(context, search_options, allowed_search_options): return # Otherwise, strip out all unknown options unknown_options = [opt for opt in search_options - if opt not in allowed_search_options] + if opt not in allowed_search_options] bad_options = ", ".join(unknown_options) - log_msg = _("Removing options '%(bad_options)s' from query") % locals() + log_msg = _("Removing options '%(bad_options)s'" + " from query") % {'bad_options': bad_options} LOG.debug(log_msg) for opt in unknown_options: del search_options[opt] diff --git a/doc/source/_static/.placeholder b/cinder/api/v2/__init__.py similarity index 100% rename from doc/source/_static/.placeholder rename to cinder/api/v2/__init__.py diff --git a/cinder/api/v2/limits.py b/cinder/api/v2/limits.py new file mode 100644 index 0000000000..d8f17d424a --- /dev/null +++ b/cinder/api/v2/limits.py @@ -0,0 +1,463 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Module dedicated functions/classes dealing with rate limiting requests. +""" + +import collections +import copy +import httplib +import math +import re +import time + +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder.api.views import limits as limits_views +from cinder.api import xmlutil +from cinder.openstack.common import importutils +from cinder.openstack.common import jsonutils +from cinder import quota +from cinder import wsgi as base_wsgi + +QUOTAS = quota.QUOTAS +LIMITS_PREFIX = "limits." + + +# Convenience constants for the limits dictionary passed to Limiter(). +PER_SECOND = 1 +PER_MINUTE = 60 +PER_HOUR = 60 * 60 +PER_DAY = 60 * 60 * 24 + + +limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class LimitsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('limits', selector='limits') + + rates = xmlutil.SubTemplateElement(root, 'rates') + rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate') + rate.set('uri', 'uri') + rate.set('regex', 'regex') + limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit') + limit.set('value', 'value') + limit.set('verb', 'verb') + limit.set('remaining', 'remaining') + limit.set('unit', 'unit') + limit.set('next-available', 'next-available') + + absolute = xmlutil.SubTemplateElement(root, 'absolute', + selector='absolute') + limit = xmlutil.SubTemplateElement(absolute, 'limit', + selector=xmlutil.get_items) + limit.set('name', 0) + limit.set('value', 1) + + return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap) + + +class LimitsController(wsgi.Controller): + """Controller for accessing limits in the OpenStack API.""" + + @wsgi.serializers(xml=LimitsTemplate) + def index(self, req): + """Return all global and rate limit information.""" + context = req.environ['cinder.context'] + quotas = QUOTAS.get_project_quotas(context, context.project_id, + usages=False) + abs_limits = dict((k, v['limit']) for k, v in quotas.items()) + rate_limits = req.environ.get("cinder.limits", []) + + builder = self._get_view_builder(req) + return builder.build(rate_limits, abs_limits) + + def _get_view_builder(self, req): + return limits_views.ViewBuilder() + + +def create_resource(): + return wsgi.Resource(LimitsController()) + + +class Limit(object): + """Stores information about a limit for HTTP requests.""" + + UNITS = { + 1: "SECOND", + 60: "MINUTE", + 60 * 60: "HOUR", + 60 * 60 * 24: "DAY", + } + + UNIT_MAP = dict([(v, k) for k, v in UNITS.items()]) + + def __init__(self, verb, uri, regex, value, unit): + """Initialize a new `Limit`. + + @param verb: HTTP verb (POST, PUT, etc.) + @param uri: Human-readable URI + @param regex: Regular expression format for this limit + @param value: Integer number of requests which can be made + @param unit: Unit of measure for the value parameter + """ + self.verb = verb + self.uri = uri + self.regex = regex + self.value = int(value) + self.unit = unit + self.unit_string = self.display_unit().lower() + self.remaining = int(value) + + if value <= 0: + raise ValueError("Limit value must be > 0") + + self.last_request = None + self.next_request = None + + self.water_level = 0 + self.capacity = self.unit + self.request_value = float(self.capacity) / float(self.value) + msg = _("Only %(value)s %(verb)s request(s) can be " + "made to %(uri)s every %(unit_string)s.") + self.error_message = msg % self.__dict__ + + def __call__(self, verb, url): + """Represent a call to this limit from a relevant request. + + @param verb: string http verb (POST, GET, etc.) + @param url: string URL + """ + if self.verb != verb or not re.match(self.regex, url): + return + + now = self._get_time() + + if self.last_request is None: + self.last_request = now + + leak_value = now - self.last_request + + self.water_level -= leak_value + self.water_level = max(self.water_level, 0) + self.water_level += self.request_value + + difference = self.water_level - self.capacity + + self.last_request = now + + if difference > 0: + self.water_level -= self.request_value + self.next_request = now + difference + return difference + + cap = self.capacity + water = self.water_level + val = self.value + + self.remaining = math.floor(((cap - water) / cap) * val) + self.next_request = now + + def _get_time(self): + """Retrieve the current time. Broken out for testability.""" + return time.time() + + def display_unit(self): + """Display the string name of the unit.""" + return self.UNITS.get(self.unit, "UNKNOWN") + + def display(self): + """Return a useful representation of this class.""" + return { + "verb": self.verb, + "URI": self.uri, + "regex": self.regex, + "value": self.value, + "remaining": int(self.remaining), + "unit": self.display_unit(), + "resetTime": int(self.next_request or self._get_time()), + } + +# "Limit" format is a dictionary with the HTTP verb, human-readable URI, +# a regular-expression to match, value and unit of measure (PER_DAY, etc.) + +DEFAULT_LIMITS = [ + Limit("POST", "*", ".*", 10, PER_MINUTE), + Limit("POST", "*/servers", "^/servers", 50, PER_DAY), + Limit("PUT", "*", ".*", 10, PER_MINUTE), + Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), + Limit("DELETE", "*", ".*", 100, PER_MINUTE), +] + + +class RateLimitingMiddleware(base_wsgi.Middleware): + """Rate-limits requests passing through this middleware. + + All limit information is stored in memory for this implementation. + """ + + def __init__(self, application, limits=None, limiter=None, **kwargs): + """Initialize new `RateLimitingMiddleware`, which wraps the given WSGI + application and sets up the given limits. + + @param application: WSGI application to wrap + @param limits: String describing limits + @param limiter: String identifying class for representing limits + + Other parameters are passed to the constructor for the limiter. + """ + base_wsgi.Middleware.__init__(self, application) + + # Select the limiter class + if limiter is None: + limiter = Limiter + else: + limiter = importutils.import_class(limiter) + + # Parse the limits, if any are provided + if limits is not None: + limits = limiter.parse_limits(limits) + + self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """Represents a single call through this middleware. + + We should record the request if we have a limit relevant to it. + If no limit is relevant to the request, ignore it. If the request + should be rate limited, return a fault telling the user they are + over the limit and need to retry later. + """ + verb = req.method + url = req.url + context = req.environ.get("cinder.context") + + if context: + username = context.user_id + else: + username = None + + delay, error = self._limiter.check_for_delay(verb, url, username) + + if delay: + msg = _("This request was rate-limited.") + retry = time.time() + delay + return wsgi.OverLimitFault(msg, error, retry) + + req.environ["cinder.limits"] = self._limiter.get_limits(username) + + return self.application + + +class Limiter(object): + """Rate-limit checking class which handles limits in memory.""" + + def __init__(self, limits, **kwargs): + """Initialize the new `Limiter`. + + @param limits: List of `Limit` objects + """ + self.limits = copy.deepcopy(limits) + self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) + + # Pick up any per-user limit information + for key, value in kwargs.items(): + if key.startswith(LIMITS_PREFIX): + username = key[len(LIMITS_PREFIX):] + self.levels[username] = self.parse_limits(value) + + def get_limits(self, username=None): + """Return the limits for a given user.""" + return [limit.display() for limit in self.levels[username]] + + def check_for_delay(self, verb, url, username=None): + """Check the given verb/user/user triplet for limit. + + @return: Tuple of delay (in seconds) and error message (or None, None) + """ + delays = [] + + for limit in self.levels[username]: + delay = limit(verb, url) + if delay: + delays.append((delay, limit.error_message)) + + if delays: + delays.sort() + return delays[0] + + return None, None + + # Note: This method gets called before the class is instantiated, + # so this must be either a static method or a class method. It is + # used to develop a list of limits to feed to the constructor. We + # put this in the class so that subclasses can override the + # default limit parsing. + @staticmethod + def parse_limits(limits): + """Convert a string into a list of Limit instances. + + This implementation expects a semicolon-separated sequence of + parenthesized groups, where each group contains a + comma-separated sequence consisting of HTTP method, + user-readable URI, a URI reg-exp, an integer number of + requests which can be made, and a unit of measure. Valid + values for the latter are "SECOND", "MINUTE", "HOUR", and + "DAY". + + @return: List of Limit instances. + """ + + # Handle empty limit strings + limits = limits.strip() + if not limits: + return [] + + # Split up the limits by semicolon + result = [] + for group in limits.split(';'): + group = group.strip() + if group[:1] != '(' or group[-1:] != ')': + raise ValueError("Limit rules must be surrounded by " + "parentheses") + group = group[1:-1] + + # Extract the Limit arguments + args = [a.strip() for a in group.split(',')] + if len(args) != 5: + raise ValueError("Limit rules must contain the following " + "arguments: verb, uri, regex, value, unit") + + # Pull out the arguments + verb, uri, regex, value, unit = args + + # Upper-case the verb + verb = verb.upper() + + # Convert value--raises ValueError if it's not integer + value = int(value) + + # Convert unit + unit = unit.upper() + if unit not in Limit.UNIT_MAP: + raise ValueError("Invalid units specified") + unit = Limit.UNIT_MAP[unit] + + # Build a limit + result.append(Limit(verb, uri, regex, value, unit)) + + return result + + +class WsgiLimiter(object): + """Rate-limit checking from a WSGI application. + + Uses an in-memory `Limiter`. + + To use, POST ``/`` with JSON data such as:: + + { + "verb" : GET, + "path" : "/servers" + } + + and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds + header containing the number of seconds to wait before the action would + succeed. + """ + + def __init__(self, limits=None): + """Initialize the new `WsgiLimiter`. + + @param limits: List of `Limit` objects + """ + self._limiter = Limiter(limits or DEFAULT_LIMITS) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, request): + """Handles a call to this application. + + Returns 204 if the request is acceptable to the limiter, else a 403 + is returned with a relevant header indicating when the request + *will* succeed. + """ + if request.method != "POST": + raise webob.exc.HTTPMethodNotAllowed() + + try: + info = dict(jsonutils.loads(request.body)) + except ValueError: + raise webob.exc.HTTPBadRequest() + + username = request.path_info_pop() + verb = info.get("verb") + path = info.get("path") + + delay, error = self._limiter.check_for_delay(verb, path, username) + + if delay: + headers = {"X-Wait-Seconds": "%.2f" % delay} + return webob.exc.HTTPForbidden(headers=headers, explanation=error) + else: + return webob.exc.HTTPNoContent() + + +class WsgiLimiterProxy(object): + """Rate-limit requests based on answers from a remote source.""" + + def __init__(self, limiter_address): + """Initialize the new `WsgiLimiterProxy`. + + @param limiter_address: IP/port combination of where to request limit + """ + self.limiter_address = limiter_address + + def check_for_delay(self, verb, path, username=None): + body = jsonutils.dumps({"verb": verb, "path": path}) + headers = {"Content-Type": "application/json"} + + conn = httplib.HTTPConnection(self.limiter_address) + + if username: + conn.request("POST", "/%s" % (username), body, headers) + else: + conn.request("POST", "/", body, headers) + + resp = conn.getresponse() + + if 200 >= resp.status < 300: + return None, None + + return resp.getheader("X-Wait-Seconds"), resp.read() or None + + # Note: This method gets called before the class is instantiated, + # so this must be either a static method or a class method. It is + # used to develop a list of limits to feed to the constructor. + # This implementation returns an empty list, since all limit + # decisions are made by a remote server. + @staticmethod + def parse_limits(limits): + """Ignore a limits string--simply doesn't apply for the limit proxy. + + @return: Empty list. + """ + + return [] diff --git a/cinder/api/v2/router.py b/cinder/api/v2/router.py new file mode 100644 index 0000000000..44821c4659 --- /dev/null +++ b/cinder/api/v2/router.py @@ -0,0 +1,97 @@ +# Copyright 2011 OpenStack Foundation +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack Volume API. +""" + +from cinder.api import extensions +import cinder.api.openstack +from cinder.api.v2 import limits +from cinder.api.v2 import snapshot_metadata +from cinder.api.v2 import snapshots +from cinder.api.v2 import types +from cinder.api.v2 import volume_metadata +from cinder.api.v2 import volumes +from cinder.api import versions +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class APIRouter(cinder.api.openstack.APIRouter): + """Routes requests on the API to the appropriate controller and method.""" + ExtensionManager = extensions.ExtensionManager + + def _setup_routes(self, mapper, ext_mgr): + self.resources['versions'] = versions.create_resource() + mapper.connect("versions", "/", + controller=self.resources['versions'], + action='show') + + mapper.redirect("", "/") + + self.resources['volumes'] = volumes.create_resource(ext_mgr) + mapper.resource("volume", "volumes", + controller=self.resources['volumes'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['types'] = types.create_resource() + mapper.resource("type", "types", + controller=self.resources['types']) + + self.resources['snapshots'] = snapshots.create_resource(ext_mgr) + mapper.resource("snapshot", "snapshots", + controller=self.resources['snapshots'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + self.resources['limits'] = limits.create_resource() + mapper.resource("limit", "limits", + controller=self.resources['limits']) + + self.resources['snapshot_metadata'] = \ + snapshot_metadata.create_resource() + snapshot_metadata_controller = self.resources['snapshot_metadata'] + + mapper.resource("snapshot_metadata", "metadata", + controller=snapshot_metadata_controller, + parent_resource=dict(member_name='snapshot', + collection_name='snapshots')) + + mapper.connect("metadata", + "/{project_id}/snapshots/{snapshot_id}/metadata", + controller=snapshot_metadata_controller, + action='update_all', + conditions={"method": ['PUT']}) + + self.resources['volume_metadata'] = \ + volume_metadata.create_resource() + volume_metadata_controller = self.resources['volume_metadata'] + + mapper.resource("volume_metadata", "metadata", + controller=volume_metadata_controller, + parent_resource=dict(member_name='volume', + collection_name='volumes')) + + mapper.connect("metadata", + "/{project_id}/volumes/{volume_id}/metadata", + controller=volume_metadata_controller, + action='update_all', + conditions={"method": ['PUT']}) diff --git a/cinder/api/v2/snapshot_metadata.py b/cinder/api/v2/snapshot_metadata.py new file mode 100644 index 0000000000..b2df8b68f3 --- /dev/null +++ b/cinder/api/v2/snapshot_metadata.py @@ -0,0 +1,162 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import volume +from webob import exc + + +class Controller(wsgi.Controller): + """The snapshot metadata API controller for the OpenStack API.""" + + def __init__(self): + self.volume_api = volume.API() + super(Controller, self).__init__() + + def _get_metadata(self, context, snapshot_id): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + meta = self.volume_api.get_snapshot_metadata(context, snapshot) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return meta + + @wsgi.serializers(xml=common.MetadataTemplate) + def index(self, req, snapshot_id): + """Returns the list of metadata for a given snapshot.""" + context = req.environ['cinder.context'] + return {'metadata': self._get_metadata(context, snapshot_id)} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def create(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (KeyError, TypeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + context = req.environ['cinder.context'] + + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=False) + + return {'metadata': new_metadata} + + @wsgi.serializers(xml=common.MetaItemTemplate) + @wsgi.deserializers(xml=common.MetaItemDeserializer) + def update(self, req, snapshot_id, id, body): + try: + meta_item = body['meta'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + if id not in meta_item: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + + if len(meta_item) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + self._update_snapshot_metadata(context, + snapshot_id, + meta_item, + delete=False) + + return {'meta': meta_item} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def update_all(self, req, snapshot_id, body): + try: + metadata = body['metadata'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + new_metadata = self._update_snapshot_metadata(context, + snapshot_id, + metadata, + delete=True) + + return {'metadata': new_metadata} + + def _update_snapshot_metadata(self, context, + snapshot_id, metadata, + delete=False): + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + return self.volume_api.update_snapshot_metadata(context, + snapshot, + metadata, + delete) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + + except (ValueError, AttributeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + + except exception.InvalidVolumeMetadata as error: + raise exc.HTTPBadRequest(explanation=error.msg) + + except exception.InvalidVolumeMetadataSize as error: + raise exc.HTTPRequestEntityTooLarge(explanation=error.msg) + + @wsgi.serializers(xml=common.MetaItemTemplate) + def show(self, req, snapshot_id, id): + """Return a single metadata item.""" + context = req.environ['cinder.context'] + data = self._get_metadata(context, snapshot_id) + + try: + return {'meta': {id: data[id]}} + except KeyError: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + def delete(self, req, snapshot_id, id): + """Deletes an existing metadata.""" + context = req.environ['cinder.context'] + + metadata = self._get_metadata(context, snapshot_id) + + if id not in metadata: + msg = _("Metadata item was not found") + raise exc.HTTPNotFound(explanation=msg) + + try: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + self.volume_api.delete_snapshot_metadata(context, snapshot, id) + except exception.SnapshotNotFound: + msg = _('snapshot does not exist') + raise exc.HTTPNotFound(explanation=msg) + return webob.Response(status_int=200) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/cinder/api/v2/snapshots.py b/cinder/api/v2/snapshots.py new file mode 100644 index 0000000000..bc74d58fdf --- /dev/null +++ b/cinder/api/v2/snapshots.py @@ -0,0 +1,276 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes snapshots api.""" + +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api.v2 import volumes +from cinder.api import xmlutil +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import strutils +from cinder import utils +from cinder import volume + + +LOG = logging.getLogger(__name__) + + +def _translate_snapshot_detail_view(context, snapshot): + """Maps keys for snapshots details view.""" + + d = _translate_snapshot_summary_view(context, snapshot) + + # NOTE(gagupta): No additional data / lookups at the moment + return d + + +def _translate_snapshot_summary_view(context, snapshot): + """Maps keys for snapshots summary view.""" + d = {} + + d['id'] = snapshot['id'] + d['created_at'] = snapshot['created_at'] + d['name'] = snapshot['display_name'] + d['description'] = snapshot['display_description'] + d['volume_id'] = snapshot['volume_id'] + d['status'] = snapshot['status'] + d['size'] = snapshot['volume_size'] + + if snapshot.get('snapshot_metadata'): + metadata = snapshot.get('snapshot_metadata') + d['metadata'] = dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif snapshot.get('metadata') and isinstance(snapshot.get('metadata'), + dict): + d['metadata'] = snapshot['metadata'] + else: + d['metadata'] = {} + return d + + +def make_snapshot(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('created_at') + elem.set('name') + elem.set('description') + elem.set('volume_id') + elem.append(common.MetadataTemplate()) + + +class SnapshotTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshot', selector='snapshot') + make_snapshot(root) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshots') + elem = xmlutil.SubTemplateElement(root, 'snapshot', + selector='snapshots') + make_snapshot(elem) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsController(wsgi.Controller): + """The Volumes API controller for the OpenStack API.""" + + def __init__(self, ext_mgr=None): + self.volume_api = volume.API() + self.ext_mgr = ext_mgr + super(SnapshotsController, self).__init__() + + @wsgi.serializers(xml=SnapshotTemplate) + def show(self, req, id): + """Return data about the given snapshot.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get_snapshot(context, id) + except exception.NotFound: + msg = _("Snapshot could not be found") + raise exc.HTTPNotFound(explanation=msg) + + return {'snapshot': _translate_snapshot_detail_view(context, vol)} + + def delete(self, req, id): + """Delete a snapshot.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete snapshot with id: %s"), id, context=context) + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.delete_snapshot(context, snapshot) + except exception.NotFound: + msg = _("Snapshot could not be found") + raise exc.HTTPNotFound(explanation=msg) + + return webob.Response(status_int=202) + + @wsgi.serializers(xml=SnapshotsTemplate) + def index(self, req): + """Returns a summary list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_summary_view) + + @wsgi.serializers(xml=SnapshotsTemplate) + def detail(self, req): + """Returns a detailed list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_detail_view) + + def _items(self, req, entity_maker): + """Returns a list of snapshots, transformed through entity_maker.""" + context = req.environ['cinder.context'] + + #pop out limit and offset , they are not search_opts + search_opts = req.GET.copy() + search_opts.pop('limit', None) + search_opts.pop('offset', None) + + #filter out invalid option + allowed_search_options = ('status', 'volume_id', 'name') + volumes.remove_invalid_options(context, search_opts, + allowed_search_options) + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in search_opts: + search_opts['display_name'] = search_opts['name'] + del search_opts['name'] + + snapshots = self.volume_api.get_all_snapshots(context, + search_opts=search_opts) + limited_list = common.limited(snapshots, req) + res = [entity_maker(context, snapshot) for snapshot in limited_list] + return {'snapshots': res} + + @wsgi.response(202) + @wsgi.serializers(xml=SnapshotTemplate) + def create(self, req, body): + """Creates a new snapshot.""" + kwargs = {} + context = req.environ['cinder.context'] + + if not self.is_valid_body(body, 'snapshot'): + msg = (_("Missing required element '%s' in request body") % + 'snapshot') + raise exc.HTTPBadRequest(explanation=msg) + + snapshot = body['snapshot'] + kwargs['metadata'] = snapshot.get('metadata', None) + + try: + volume_id = snapshot['volume_id'] + except KeyError: + msg = _("'volume_id' must be specified") + raise exc.HTTPBadRequest(explanation=msg) + + try: + volume = self.volume_api.get(context, volume_id) + except exception.NotFound: + msg = _("Volume could not be found") + raise exc.HTTPNotFound(explanation=msg) + force = snapshot.get('force', False) + msg = _("Create snapshot from volume %s") + LOG.audit(msg, volume_id, context=context) + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in snapshot: + snapshot['display_name'] = snapshot.get('name') + del snapshot['name'] + + if not utils.is_valid_boolstr(force): + msg = _("Invalid value '%s' for force. ") % force + raise exception.InvalidParameterValue(err=msg) + + if strutils.bool_from_string(force): + new_snapshot = self.volume_api.create_snapshot_force( + context, + volume, + snapshot.get('display_name'), + snapshot.get('description'), + **kwargs) + else: + new_snapshot = self.volume_api.create_snapshot( + context, + volume, + snapshot.get('display_name'), + snapshot.get('description'), + **kwargs) + + retval = _translate_snapshot_detail_view(context, new_snapshot) + + return {'snapshot': retval} + + @wsgi.serializers(xml=SnapshotTemplate) + def update(self, req, id, body): + """Update a snapshot.""" + context = req.environ['cinder.context'] + + if not body: + msg = _("Missing request body") + raise exc.HTTPBadRequest(explanation=msg) + + if 'snapshot' not in body: + msg = (_("Missing required element '%s' in request body") % + 'snapshot') + raise exc.HTTPBadRequest(explanation=msg) + + snapshot = body['snapshot'] + update_dict = {} + + valid_update_keys = ( + 'name', + 'description', + 'display_name', + 'display_description', + ) + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in snapshot: + snapshot['display_name'] = snapshot['name'] + del snapshot['name'] + + # NOTE(thingee): v2 API allows description instead of + # display_description + if 'description' in snapshot: + snapshot['display_description'] = snapshot['description'] + del snapshot['description'] + + for key in valid_update_keys: + if key in snapshot: + update_dict[key] = snapshot[key] + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.update_snapshot(context, snapshot, update_dict) + except exception.NotFound: + msg = _("Snapshot could not be found") + raise exc.HTTPNotFound(explanation=msg) + + snapshot.update(update_dict) + + return {'snapshot': _translate_snapshot_detail_view(context, snapshot)} + + +def create_resource(ext_mgr): + return wsgi.Resource(SnapshotsController(ext_mgr)) diff --git a/cinder/api/v2/types.py b/cinder/api/v2/types.py new file mode 100644 index 0000000000..99d5ffd50d --- /dev/null +++ b/cinder/api/v2/types.py @@ -0,0 +1,79 @@ +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume type & volume types extra specs extension.""" + +from webob import exc + +from cinder.api.openstack import wsgi +from cinder.api.views import types as views_types +from cinder.api import xmlutil +from cinder import exception +from cinder.volume import volume_types + + +def make_voltype(elem): + elem.set('id') + elem.set('name') + extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') + elem.append(extra_specs) + + +class VolumeTypeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_type', selector='volume_type') + make_voltype(root) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_types') + elem = xmlutil.SubTemplateElement(root, 'volume_type', + selector='volume_types') + make_voltype(elem) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesController(wsgi.Controller): + """The volume types API controller for the OpenStack API.""" + + _view_builder_class = views_types.ViewBuilder + + @wsgi.serializers(xml=VolumeTypesTemplate) + def index(self, req): + """Returns the list of volume types.""" + context = req.environ['cinder.context'] + vol_types = volume_types.get_all_types(context).values() + return self._view_builder.index(req, vol_types) + + @wsgi.serializers(xml=VolumeTypeTemplate) + def show(self, req, id): + """Return a single volume type item.""" + context = req.environ['cinder.context'] + + try: + vol_type = volume_types.get_volume_type(context, id) + except exception.NotFound: + msg = _("Volume type not found") + raise exc.HTTPNotFound(explanation=msg) + + # TODO(bcwaldon): remove str cast once we use uuids + vol_type['id'] = str(vol_type['id']) + return self._view_builder.show(req, vol_type) + + +def create_resource(): + return wsgi.Resource(VolumeTypesController()) diff --git a/doc/source/_templates/.gitignore b/cinder/api/v2/views/__init__.py similarity index 100% rename from doc/source/_templates/.gitignore rename to cinder/api/v2/views/__init__.py diff --git a/cinder/api/v2/views/volumes.py b/cinder/api/v2/views/volumes.py new file mode 100644 index 0000000000..c1b45acc65 --- /dev/null +++ b/cinder/api/v2/views/volumes.py @@ -0,0 +1,123 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + """Model a server API response as a python dictionary.""" + + _collection_name = "volumes" + + def __init__(self): + """Initialize view builder.""" + super(ViewBuilder, self).__init__() + + def summary_list(self, request, volumes): + """Show a list of volumes without many details.""" + return self._list_view(self.summary, request, volumes) + + def detail_list(self, request, volumes): + """Detailed view of a list of volumes.""" + return self._list_view(self.detail, request, volumes) + + def summary(self, request, volume): + """Generic, non-detailed view of an volume.""" + return { + 'volume': { + 'id': volume['id'], + 'name': volume['display_name'], + 'links': self._get_links(request, + volume['id']), + }, + } + + def detail(self, request, volume): + """Detailed view of a single volume.""" + return { + 'volume': { + 'id': volume.get('id'), + 'status': volume.get('status'), + 'size': volume.get('size'), + 'availability_zone': volume.get('availability_zone'), + 'created_at': volume.get('created_at'), + 'attachments': self._get_attachments(volume), + 'name': volume.get('display_name'), + 'description': volume.get('display_description'), + 'volume_type': self._get_volume_type(volume), + 'snapshot_id': volume.get('snapshot_id'), + 'source_volid': volume.get('source_volid'), + 'metadata': self._get_volume_metadata(volume), + 'links': self._get_links(request, volume['id']), + 'user_id': volume.get('user_id'), + 'bootable': str(volume.get('bootable')).lower() + } + } + + def _get_attachments(self, volume): + """Retrieve the attachments of the volume object.""" + attachments = [] + + if volume['attach_status'] == 'attached': + d = {} + volume_id = volume['id'] + + # note(justinsb): we use the volume id as the id of the attachments + # object + d['id'] = volume_id + + d['volume_id'] = volume_id + d['server_id'] = volume['instance_uuid'] + d['host_name'] = volume['attached_host'] + if volume.get('mountpoint'): + d['device'] = volume['mountpoint'] + attachments.append(d) + + return attachments + + def _get_volume_metadata(self, volume): + """Retrieve the metadata of the volume object.""" + if volume.get('volume_metadata'): + metadata = volume.get('volume_metadata') + return dict((item['key'], item['value']) for item in metadata) + # avoid circular ref when vol is a Volume instance + elif volume.get('metadata') and isinstance(volume.get('metadata'), + dict): + return volume['metadata'] + return {} + + def _get_volume_type(self, volume): + """Retrieve the type the volume object.""" + if volume['volume_type_id'] and volume.get('volume_type'): + return volume['volume_type']['name'] + else: + return volume['volume_type_id'] + + def _list_view(self, func, request, volumes): + """Provide a view for a list of volumes.""" + volumes_list = [func(request, volume)['volume'] for volume in volumes] + volumes_links = self._get_collection_links(request, + volumes, + self._collection_name) + volumes_dict = dict(volumes=volumes_list) + + if volumes_links: + volumes_dict['volumes_links'] = volumes_links + + return volumes_dict diff --git a/cinder/api/v2/volume_metadata.py b/cinder/api/v2/volume_metadata.py new file mode 100644 index 0000000000..d2bc37d853 --- /dev/null +++ b/cinder/api/v2/volume_metadata.py @@ -0,0 +1,161 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder import exception +from cinder import volume + + +class Controller(wsgi.Controller): + """The volume metadata API controller for the OpenStack API.""" + + def __init__(self): + self.volume_api = volume.API() + super(Controller, self).__init__() + + def _get_metadata(self, context, volume_id): + try: + volume = self.volume_api.get(context, volume_id) + meta = self.volume_api.get_volume_metadata(context, volume) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise webob.exc.HTTPNotFound(explanation=msg) + return meta + + @wsgi.serializers(xml=common.MetadataTemplate) + def index(self, req, volume_id): + """Returns the list of metadata for a given volume.""" + context = req.environ['cinder.context'] + return {'metadata': self._get_metadata(context, volume_id)} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def create(self, req, volume_id, body): + try: + metadata = body['metadata'] + except (KeyError, TypeError): + msg = _("Malformed request body") + raise webob.exc.HTTPBadRequest(explanation=msg) + + context = req.environ['cinder.context'] + + new_metadata = self._update_volume_metadata(context, + volume_id, + metadata, + delete=False) + + return {'metadata': new_metadata} + + @wsgi.serializers(xml=common.MetaItemTemplate) + @wsgi.deserializers(xml=common.MetaItemDeserializer) + def update(self, req, volume_id, id, body): + try: + meta_item = body['meta'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise webob.exc.HTTPBadRequest(explanation=expl) + + if id not in meta_item: + expl = _('Request body and URI mismatch') + raise webob.exc.HTTPBadRequest(explanation=expl) + + if len(meta_item) > 1: + expl = _('Request body contains too many items') + raise webob.exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + self._update_volume_metadata(context, + volume_id, + meta_item, + delete=False) + + return {'meta': meta_item} + + @wsgi.serializers(xml=common.MetadataTemplate) + @wsgi.deserializers(xml=common.MetadataDeserializer) + def update_all(self, req, volume_id, body): + try: + metadata = body['metadata'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise webob.exc.HTTPBadRequest(explanation=expl) + + context = req.environ['cinder.context'] + new_metadata = self._update_volume_metadata(context, + volume_id, + metadata, + delete=True) + + return {'metadata': new_metadata} + + def _update_volume_metadata(self, context, + volume_id, metadata, + delete=False): + try: + volume = self.volume_api.get(context, volume_id) + return self.volume_api.update_volume_metadata(context, + volume, + metadata, + delete) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise webob.exc.HTTPNotFound(explanation=msg) + + except (ValueError, AttributeError): + msg = _("Malformed request body") + raise webob.exc.HTTPBadRequest(explanation=msg) + + except exception.InvalidVolumeMetadata as error: + raise webob.exc.HTTPBadRequest(explanation=error.msg) + + except exception.InvalidVolumeMetadataSize as error: + raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) + + @wsgi.serializers(xml=common.MetaItemTemplate) + def show(self, req, volume_id, id): + """Return a single metadata item.""" + context = req.environ['cinder.context'] + data = self._get_metadata(context, volume_id) + + try: + return {'meta': {id: data[id]}} + except KeyError: + msg = _("Metadata item was not found") + raise webob.exc.HTTPNotFound(explanation=msg) + + def delete(self, req, volume_id, id): + """Deletes an existing metadata.""" + context = req.environ['cinder.context'] + + metadata = self._get_metadata(context, volume_id) + + if id not in metadata: + msg = _("Metadata item was not found") + raise webob.exc.HTTPNotFound(explanation=msg) + + try: + volume = self.volume_api.get(context, volume_id) + self.volume_api.delete_volume_metadata(context, volume, id) + except exception.VolumeNotFound: + msg = _('volume does not exist') + raise webob.exc.HTTPNotFound(explanation=msg) + return webob.Response(status_int=200) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/cinder/api/v2/volumes.py b/cinder/api/v2/volumes.py new file mode 100644 index 0000000000..1267cd0b18 --- /dev/null +++ b/cinder/api/v2/volumes.py @@ -0,0 +1,475 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes api.""" + + +import ast +import webob +from webob import exc + +from cinder.api import common +from cinder.api.openstack import wsgi +from cinder.api.v2.views import volumes as volume_views +from cinder.api import xmlutil +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import uuidutils +from cinder import utils +from cinder import volume as cinder_volume +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) +SCHEDULER_HINTS_NAMESPACE =\ + "http://docs.openstack.org/block-service/ext/scheduler-hints/api/v2" + + +def make_attachment(elem): + elem.set('id') + elem.set('server_id') + elem.set('host_name') + elem.set('volume_id') + elem.set('device') + + +def make_volume(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('availability_zone') + elem.set('created_at') + elem.set('name') + elem.set('bootable') + elem.set('description') + elem.set('volume_type') + elem.set('snapshot_id') + elem.set('source_volid') + + attachments = xmlutil.SubTemplateElement(elem, 'attachments') + attachment = xmlutil.SubTemplateElement(attachments, 'attachment', + selector='attachments') + make_attachment(attachment) + + # Attach metadata node + elem.append(common.MetadataTemplate()) + + +volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V2, 'atom': xmlutil.XMLNS_ATOM} + + +class VolumeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class VolumesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class CommonDeserializer(wsgi.MetadataXMLDeserializer): + """Common deserializer to handle xml-formatted volume requests. + + Handles standard volume attributes as well as the optional metadata + attribute + """ + + metadata_deserializer = common.MetadataXMLDeserializer() + + def _extract_scheduler_hints(self, volume_node): + """Marshal the scheduler hints attribute of a parsed request.""" + node =\ + self.find_first_child_named_in_namespace(volume_node, + SCHEDULER_HINTS_NAMESPACE, + "scheduler_hints") + if node: + scheduler_hints = {} + for child in self.extract_elements(node): + scheduler_hints.setdefault(child.nodeName, []) + value = self.extract_text(child).strip() + scheduler_hints[child.nodeName].append(value) + return scheduler_hints + else: + return None + + def _extract_volume(self, node): + """Marshal the volume attribute of a parsed request.""" + volume = {} + volume_node = self.find_first_child_named(node, 'volume') + + attributes = ['name', 'description', 'size', + 'volume_type', 'availability_zone', 'imageRef', + 'snapshot_id', 'source_volid'] + for attr in attributes: + if volume_node.getAttribute(attr): + volume[attr] = volume_node.getAttribute(attr) + + metadata_node = self.find_first_child_named(volume_node, 'metadata') + if metadata_node is not None: + volume['metadata'] = self.extract_metadata(metadata_node) + + scheduler_hints = self._extract_scheduler_hints(volume_node) + if scheduler_hints: + volume['scheduler_hints'] = scheduler_hints + + return volume + + +class CreateDeserializer(CommonDeserializer): + """Deserializer to handle xml-formatted create volume requests. + + Handles standard volume attributes as well as the optional metadata + attribute + """ + + def default(self, string): + """Deserialize an xml-formatted volume create request.""" + dom = utils.safe_minidom_parse_string(string) + volume = self._extract_volume(dom) + return {'body': {'volume': volume}} + + +class VolumeController(wsgi.Controller): + """The Volumes API controller for the OpenStack API.""" + + _view_builder_class = volume_views.ViewBuilder + + _visible_admin_metadata_keys = ['readonly', 'attached_mode'] + + def __init__(self, ext_mgr): + self.volume_api = cinder_volume.API() + self.ext_mgr = ext_mgr + super(VolumeController, self).__init__() + + def _add_visible_admin_metadata(self, context, volume): + if context is None: + return + + visible_admin_meta = {} + + if context.is_admin: + volume_tmp = volume + else: + try: + volume_tmp = self.volume_api.get(context.elevated(), + volume['id']) + except Exception: + return + + if volume_tmp.get('volume_admin_metadata'): + for item in volume_tmp['volume_admin_metadata']: + if item['key'] in self._visible_admin_metadata_keys: + visible_admin_meta[item['key']] = item['value'] + # avoid circular ref when volume is a Volume instance + elif (volume_tmp.get('admin_metadata') and + isinstance(volume_tmp.get('admin_metadata'), dict)): + for key in self._visible_admin_metadata_keys: + if key in volume_tmp['admin_metadata'].keys(): + visible_admin_meta[key] = volume_tmp['admin_metadata'][key] + + if not visible_admin_meta: + return + + # NOTE(zhiyan): update visible administration metadata to + # volume metadata, administration metadata will rewrite existing key. + if volume.get('volume_metadata'): + orig_meta = list(volume.get('volume_metadata')) + for item in orig_meta: + if item['key'] in visible_admin_meta.keys(): + item['value'] = visible_admin_meta.pop(item['key']) + for key, value in visible_admin_meta.iteritems(): + orig_meta.append({'key': key, 'value': value}) + volume['volume_metadata'] = orig_meta + # avoid circular ref when vol is a Volume instance + elif (volume.get('metadata') and + isinstance(volume.get('metadata'), dict)): + volume['metadata'].update(visible_admin_meta) + else: + volume['metadata'] = visible_admin_meta + + @wsgi.serializers(xml=VolumeTemplate) + def show(self, req, id): + """Return data about the given volume.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get(context, id) + req.cache_resource(vol) + except exception.NotFound: + msg = _("Volume could not be found") + raise exc.HTTPNotFound(explanation=msg) + + self._add_visible_admin_metadata(context, vol) + + return self._view_builder.detail(req, vol) + + def delete(self, req, id): + """Delete a volume.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete volume with id: %s"), id, context=context) + + try: + volume = self.volume_api.get(context, id) + self.volume_api.delete(context, volume) + except exception.NotFound: + msg = _("Volume could not be found") + raise exc.HTTPNotFound(explanation=msg) + except exception.VolumeAttached: + msg = _("Volume cannot be deleted while in attached state") + raise exc.HTTPBadRequest(explanation=msg) + return webob.Response(status_int=202) + + @wsgi.serializers(xml=VolumesTemplate) + def index(self, req): + """Returns a summary list of volumes.""" + return self._get_volumes(req, is_detail=False) + + @wsgi.serializers(xml=VolumesTemplate) + def detail(self, req): + """Returns a detailed list of volumes.""" + return self._get_volumes(req, is_detail=True) + + def _get_volumes(self, req, is_detail): + """Returns a list of volumes, transformed through view builder.""" + + context = req.environ['cinder.context'] + + params = req.params.copy() + marker = params.pop('marker', None) + limit = params.pop('limit', None) + sort_key = params.pop('sort_key', 'created_at') + sort_dir = params.pop('sort_dir', 'desc') + params.pop('offset', None) + filters = params + + remove_invalid_options(context, + filters, self._get_volume_filter_options()) + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in filters: + filters['display_name'] = filters['name'] + del filters['name'] + + if 'metadata' in filters: + filters['metadata'] = ast.literal_eval(filters['metadata']) + + volumes = self.volume_api.get_all(context, marker, limit, sort_key, + sort_dir, filters) + + volumes = [dict(vol.iteritems()) for vol in volumes] + + for volume in volumes: + self._add_visible_admin_metadata(context, volume) + + limited_list = common.limited(volumes, req) + + if is_detail: + volumes = self._view_builder.detail_list(req, limited_list) + else: + volumes = self._view_builder.summary_list(req, limited_list) + req.cache_resource(limited_list) + return volumes + + def _image_uuid_from_href(self, image_href): + # If the image href was generated by nova api, strip image_href + # down to an id. + try: + image_uuid = image_href.split('/').pop() + except (TypeError, AttributeError): + msg = _("Invalid imageRef provided.") + raise exc.HTTPBadRequest(explanation=msg) + + if not uuidutils.is_uuid_like(image_uuid): + msg = _("Invalid imageRef provided.") + raise exc.HTTPBadRequest(explanation=msg) + + return image_uuid + + @wsgi.response(202) + @wsgi.serializers(xml=VolumeTemplate) + @wsgi.deserializers(xml=CreateDeserializer) + def create(self, req, body): + """Creates a new volume.""" + if not self.is_valid_body(body, 'volume'): + msg = _("Missing required element '%s' in request body") % 'volume' + raise exc.HTTPBadRequest(explanation=msg) + + LOG.debug('Create volume request body: %s', body) + context = req.environ['cinder.context'] + volume = body['volume'] + + kwargs = {} + + # NOTE(thingee): v2 API allows name instead of display_name + if volume.get('name'): + volume['display_name'] = volume.get('name') + del volume['name'] + + # NOTE(thingee): v2 API allows description instead of description + if volume.get('description'): + volume['display_description'] = volume.get('description') + del volume['description'] + + req_volume_type = volume.get('volume_type', None) + if req_volume_type: + try: + if not uuidutils.is_uuid_like(req_volume_type): + kwargs['volume_type'] = \ + volume_types.get_volume_type_by_name( + context, req_volume_type) + else: + kwargs['volume_type'] = volume_types.get_volume_type( + context, req_volume_type) + except exception.VolumeTypeNotFound: + msg = _("Volume type not found.") + raise exc.HTTPNotFound(explanation=msg) + + kwargs['metadata'] = volume.get('metadata', None) + + snapshot_id = volume.get('snapshot_id') + if snapshot_id is not None: + try: + kwargs['snapshot'] = self.volume_api.get_snapshot(context, + snapshot_id) + except exception.NotFound: + explanation = _('snapshot id:%s not found') % snapshot_id + raise exc.HTTPNotFound(explanation=explanation) + else: + kwargs['snapshot'] = None + + source_volid = volume.get('source_volid') + if source_volid is not None: + try: + kwargs['source_volume'] = \ + self.volume_api.get_volume(context, + source_volid) + except exception.NotFound: + explanation = _('source volume id:%s not found') % source_volid + raise exc.HTTPNotFound(explanation=explanation) + else: + kwargs['source_volume'] = None + + size = volume.get('size', None) + if size is None and kwargs['snapshot'] is not None: + size = kwargs['snapshot']['volume_size'] + elif size is None and kwargs['source_volume'] is not None: + size = kwargs['source_volume']['size'] + + LOG.audit(_("Create volume of %s GB"), size, context=context) + + if self.ext_mgr.is_loaded('os-image-create'): + image_href = volume.get('imageRef') + if image_href: + image_uuid = self._image_uuid_from_href(image_href) + kwargs['image_id'] = image_uuid + + kwargs['availability_zone'] = volume.get('availability_zone', None) + kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) + + new_volume = self.volume_api.create(context, + size, + volume.get('display_name'), + volume.get('display_description'), + **kwargs) + + # TODO(vish): Instance should be None at db layer instead of + # trying to lazy load, but for now we turn it into + # a dict to avoid an error. + new_volume = dict(new_volume.iteritems()) + + self._add_visible_admin_metadata(context, new_volume) + + retval = self._view_builder.detail(req, new_volume) + + return retval + + def _get_volume_filter_options(self): + """Return volume search options allowed by non-admin.""" + return ('name', 'status', 'metadata') + + @wsgi.serializers(xml=VolumeTemplate) + def update(self, req, id, body): + """Update a volume.""" + context = req.environ['cinder.context'] + + if not body: + msg = _("Missing request body") + raise exc.HTTPBadRequest(explanation=msg) + + if 'volume' not in body: + msg = _("Missing required element '%s' in request body") % 'volume' + raise exc.HTTPBadRequest(explanation=msg) + + volume = body['volume'] + update_dict = {} + + valid_update_keys = ( + 'name', + 'description', + 'metadata', + ) + + for key in valid_update_keys: + if key in volume: + update_dict[key] = volume[key] + + # NOTE(thingee): v2 API allows name instead of display_name + if 'name' in update_dict: + update_dict['display_name'] = update_dict['name'] + del update_dict['name'] + + # NOTE(thingee): v2 API allows name instead of display_name + if 'description' in update_dict: + update_dict['display_description'] = update_dict['description'] + del update_dict['description'] + + try: + volume = self.volume_api.get(context, id) + self.volume_api.update(context, volume, update_dict) + except exception.NotFound: + msg = _("Volume could not be found") + raise exc.HTTPNotFound(explanation=msg) + + volume.update(update_dict) + + self._add_visible_admin_metadata(context, volume) + + return self._view_builder.detail(req, volume) + + +def create_resource(ext_mgr): + return wsgi.Resource(VolumeController(ext_mgr)) + + +def remove_invalid_options(context, filters, allowed_search_options): + """Remove search options that are not valid for non-admin API/context.""" + if context.is_admin: + # Allow all options + return + # Otherwise, strip out all unknown options + unknown_options = [opt for opt in filters + if opt not in allowed_search_options] + bad_options = ", ".join(unknown_options) + log_msg = _("Removing options '%s' from query") % bad_options + LOG.debug(log_msg) + for opt in unknown_options: + del filters[opt] diff --git a/cinder/api/versions.py b/cinder/api/versions.py new file mode 100644 index 0000000000..9981ee9705 --- /dev/null +++ b/cinder/api/versions.py @@ -0,0 +1,282 @@ +# Copyright 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import datetime + +from lxml import etree +from oslo.config import cfg + +from cinder.api.openstack import wsgi +from cinder.api.views import versions as views_versions +from cinder.api import xmlutil + + +CONF = cfg.CONF + + +_KNOWN_VERSIONS = { + "v2.0": { + "id": "v2.0", + "status": "CURRENT", + "updated": "2012-11-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://jorgew.github.com/block-storage-api/" + "content/os-block-storage-1.0.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + #(anthony) FIXME + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.volume+xml;version=1", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.volume+json;version=1", + } + ], + }, + "v1.0": { + "id": "v1.0", + "status": "CURRENT", + "updated": "2012-01-04T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://jorgew.github.com/block-storage-api/" + "content/os-block-storage-1.0.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + #(anthony) FIXME + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.volume+xml;version=1", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.volume+json;version=1", + } + ], + } +} + + +def get_supported_versions(): + versions = {} + + if CONF.enable_v1_api: + versions['v1.0'] = _KNOWN_VERSIONS['v1.0'] + if CONF.enable_v2_api: + versions['v2.0'] = _KNOWN_VERSIONS['v2.0'] + + return versions + + +class MediaTypesTemplateElement(xmlutil.TemplateElement): + def will_render(self, datum): + return 'media-types' in datum + + +def make_version(elem): + elem.set('id') + elem.set('status') + elem.set('updated') + + mts = MediaTypesTemplateElement('media-types') + elem.append(mts) + + mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types') + mt.set('base') + mt.set('type') + + xmlutil.make_links(elem, 'links') + + +version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class VersionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('version', selector='version') + make_version(root) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class VersionsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('versions') + elem = xmlutil.SubTemplateElement(root, 'version', selector='versions') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class ChoicesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('choices') + elem = xmlutil.SubTemplateElement(root, 'version', selector='choices') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class AtomSerializer(wsgi.XMLDictSerializer): + + NSMAP = {None: xmlutil.XMLNS_ATOM} + + def __init__(self, metadata=None, xmlns=None): + self.metadata = metadata or {} + if not xmlns: + self.xmlns = wsgi.XMLNS_ATOM + else: + self.xmlns = xmlns + + def _get_most_recent_update(self, versions): + recent = None + for version in versions: + updated = datetime.datetime.strptime(version['updated'], + '%Y-%m-%dT%H:%M:%SZ') + if not recent: + recent = updated + elif updated > recent: + recent = updated + + return recent.strftime('%Y-%m-%dT%H:%M:%SZ') + + def _get_base_url(self, link_href): + # Make sure no trailing / + link_href = link_href.rstrip('/') + return link_href.rsplit('/', 1)[0] + '/' + + def _create_feed(self, versions, feed_title, feed_id): + feed = etree.Element('feed', nsmap=self.NSMAP) + title = etree.SubElement(feed, 'title') + title.set('type', 'text') + title.text = feed_title + + # Set this updated to the most recently updated version + recent = self._get_most_recent_update(versions) + etree.SubElement(feed, 'updated').text = recent + + etree.SubElement(feed, 'id').text = feed_id + + link = etree.SubElement(feed, 'link') + link.set('rel', 'self') + link.set('href', feed_id) + + author = etree.SubElement(feed, 'author') + etree.SubElement(author, 'name').text = 'Rackspace' + etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' + + for version in versions: + feed.append(self._create_version_entry(version)) + + return feed + + def _create_version_entry(self, version): + entry = etree.Element('entry') + etree.SubElement(entry, 'id').text = version['links'][0]['href'] + title = etree.SubElement(entry, 'title') + title.set('type', 'text') + title.text = 'Version %s' % version['id'] + etree.SubElement(entry, 'updated').text = version['updated'] + + for link in version['links']: + link_elem = etree.SubElement(entry, 'link') + link_elem.set('rel', link['rel']) + link_elem.set('href', link['href']) + if 'type' in link: + link_elem.set('type', link['type']) + + content = etree.SubElement(entry, 'content') + content.set('type', 'text') + content.text = 'Version %s %s (%s)' % (version['id'], + version['status'], + version['updated']) + return entry + + +class VersionsAtomSerializer(AtomSerializer): + def default(self, data): + versions = data['versions'] + feed_id = self._get_base_url(versions[0]['links'][0]['href']) + feed = self._create_feed(versions, 'Available API Versions', feed_id) + return self._to_xml(feed) + + +class VersionAtomSerializer(AtomSerializer): + def default(self, data): + version = data['version'] + feed_id = version['links'][0]['href'] + feed = self._create_feed([version], 'About This Version', feed_id) + return self._to_xml(feed) + + +class Versions(wsgi.Resource): + + def __init__(self): + super(Versions, self).__init__(None) + + @wsgi.serializers(xml=VersionsTemplate, + atom=VersionsAtomSerializer) + def index(self, req): + """Return all versions.""" + builder = views_versions.get_view_builder(req) + return builder.build_versions(get_supported_versions()) + + @wsgi.serializers(xml=ChoicesTemplate) + @wsgi.response(300) + def multi(self, req): + """Return multiple choices.""" + builder = views_versions.get_view_builder(req) + return builder.build_choices(get_supported_versions(), req) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + args = {} + if request_environment['PATH_INFO'] == '/': + args['action'] = 'index' + else: + args['action'] = 'multi' + + return args + + +class VolumeVersionV1(object): + @wsgi.serializers(xml=VersionTemplate, + atom=VersionAtomSerializer) + def show(self, req): + builder = views_versions.get_view_builder(req) + return builder.build_version(_KNOWN_VERSIONS['v1.0']) + + +def create_resource(): + return wsgi.Resource(VolumeVersionV1()) diff --git a/doc/source/_templates/.placeholder b/cinder/api/views/__init__.py similarity index 100% rename from doc/source/_templates/.placeholder rename to cinder/api/views/__init__.py diff --git a/cinder/api/views/availability_zones.py b/cinder/api/views/availability_zones.py new file mode 100644 index 0000000000..bcf658af6b --- /dev/null +++ b/cinder/api/views/availability_zones.py @@ -0,0 +1,29 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import cinder.api.common + + +class ViewBuilder(cinder.api.common.ViewBuilder): + """Map cinder.volumes.api list_availability_zones response into dicts.""" + + def list(self, request, availability_zones): + def fmt(az): + return { + 'zoneName': az['name'], + 'zoneState': {'available': az['available']}, + } + + return {'availabilityZoneInfo': [fmt(az) for az in availability_zones]} diff --git a/cinder/api/views/backups.py b/cinder/api/views/backups.py new file mode 100644 index 0000000000..446bf30c61 --- /dev/null +++ b/cinder/api/views/backups.py @@ -0,0 +1,90 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + """Model backup API responses as a python dictionary.""" + + _collection_name = "backups" + + def __init__(self): + """Initialize view builder.""" + super(ViewBuilder, self).__init__() + + def summary_list(self, request, backups): + """Show a list of backups without many details.""" + return self._list_view(self.summary, request, backups) + + def detail_list(self, request, backups): + """Detailed view of a list of backups .""" + return self._list_view(self.detail, request, backups) + + def summary(self, request, backup): + """Generic, non-detailed view of a backup.""" + return { + 'backup': { + 'id': backup['id'], + 'name': backup['display_name'], + 'links': self._get_links(request, + backup['id']), + }, + } + + def restore_summary(self, request, restore): + """Generic, non-detailed view of a restore.""" + return { + 'restore': { + 'backup_id': restore['backup_id'], + 'volume_id': restore['volume_id'], + }, + } + + def detail(self, request, backup): + """Detailed view of a single backup.""" + return { + 'backup': { + 'id': backup.get('id'), + 'status': backup.get('status'), + 'size': backup.get('size'), + 'object_count': backup.get('object_count'), + 'availability_zone': backup.get('availability_zone'), + 'container': backup.get('container'), + 'created_at': backup.get('created_at'), + 'name': backup.get('display_name'), + 'description': backup.get('display_description'), + 'fail_reason': backup.get('fail_reason'), + 'volume_id': backup.get('volume_id'), + 'links': self._get_links(request, backup['id']) + } + } + + def _list_view(self, func, request, backups): + """Provide a view for a list of backups.""" + backups_list = [func(request, backup)['backup'] for backup in backups] + backups_links = self._get_collection_links(request, + backups, + self._collection_name) + backups_dict = dict(backups=backups_list) + + if backups_links: + backups_dict['backups_links'] = backups_links + + return backups_dict diff --git a/cinder/api/openstack/volume/views/limits.py b/cinder/api/views/limits.py similarity index 95% rename from cinder/api/openstack/volume/views/limits.py rename to cinder/api/views/limits.py index 81b1e794ec..528ca417dd 100644 --- a/cinder/api/openstack/volume/views/limits.py +++ b/cinder/api/views/limits.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010-2011 OpenStack LLC. +# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -71,7 +69,7 @@ def _build_rate_limits(self, rate_limits): # check for existing key for limit in limits: if (limit["uri"] == rate_limit["URI"] and - limit["regex"] == rate_limit["regex"]): + limit["regex"] == rate_limit["regex"]): _rate_limit_key = limit break diff --git a/cinder/api/views/qos_specs.py b/cinder/api/views/qos_specs.py new file mode 100644 index 0000000000..cd83862dd3 --- /dev/null +++ b/cinder/api/views/qos_specs.py @@ -0,0 +1,64 @@ +# Copyright (C) 2013 eBay Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + """Model QoS specs API responses as a python dictionary.""" + + _collection_name = "qos_specs" + + def __init__(self): + """Initialize view builder.""" + super(ViewBuilder, self).__init__() + + def summary_list(self, request, qos_specs): + """Show a list of qos_specs without many details.""" + return self._list_view(self.detail, request, qos_specs) + + def summary(self, request, qos_spec): + """Generic, non-detailed view of a qos_specs.""" + return { + 'qos_specs': qos_spec, + 'links': self._get_links(request, + qos_spec['id']), + } + + def detail(self, request, qos_spec): + """Detailed view of a single qos_spec.""" + #TODO(zhiteng) Add associations to detailed view + return { + 'qos_specs': qos_spec, + 'links': self._get_links(request, + qos_spec['id']), + } + + def associations(self, request, associates): + """View of qos specs associations.""" + return { + 'qos_associations': associates + } + + def _list_view(self, func, request, qos_specs): + """Provide a view for a list of qos_specs.""" + specs_list = [func(request, specs)['qos_specs'] for specs in qos_specs] + specs_dict = dict(qos_specs=specs_list) + + return specs_dict diff --git a/cinder/api/views/transfers.py b/cinder/api/views/transfers.py new file mode 100644 index 0000000000..3cc757f862 --- /dev/null +++ b/cinder/api/views/transfers.py @@ -0,0 +1,89 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + """Model transfer API responses as a python dictionary.""" + + _collection_name = "os-volume-transfer" + + def __init__(self): + """Initialize view builder.""" + super(ViewBuilder, self).__init__() + + def summary_list(self, request, transfers): + """Show a list of transfers without many details.""" + return self._list_view(self.summary, request, transfers) + + def detail_list(self, request, transfers): + """Detailed view of a list of transfers .""" + return self._list_view(self.detail, request, transfers) + + def summary(self, request, transfer): + """Generic, non-detailed view of a transfer.""" + return { + 'transfer': { + 'id': transfer['id'], + 'volume_id': transfer.get('volume_id'), + 'name': transfer['display_name'], + 'links': self._get_links(request, + transfer['id']), + }, + } + + def detail(self, request, transfer): + """Detailed view of a single transfer.""" + return { + 'transfer': { + 'id': transfer.get('id'), + 'created_at': transfer.get('created_at'), + 'name': transfer.get('display_name'), + 'volume_id': transfer.get('volume_id'), + 'links': self._get_links(request, transfer['id']) + } + } + + def create(self, request, transfer): + """Detailed view of a single transfer when created.""" + return { + 'transfer': { + 'id': transfer.get('id'), + 'created_at': transfer.get('created_at'), + 'name': transfer.get('display_name'), + 'volume_id': transfer.get('volume_id'), + 'auth_key': transfer.get('auth_key'), + 'links': self._get_links(request, transfer['id']) + } + } + + def _list_view(self, func, request, transfers): + """Provide a view for a list of transfers.""" + transfers_list = [func(request, transfer)['transfer'] for transfer in + transfers] + transfers_links = self._get_collection_links(request, + transfers, + self._collection_name) + transfers_dict = dict(transfers=transfers_list) + + if transfers_links: + transfers_dict['transfers_links'] = transfers_links + + return transfers_dict diff --git a/cinder/api/openstack/volume/views/types.py b/cinder/api/views/types.py similarity index 81% rename from cinder/api/openstack/volume/views/types.py rename to cinder/api/views/types.py index a0c510cddc..d542c9f9ab 100644 --- a/cinder/api/openstack/volume/views/types.py +++ b/cinder/api/views/types.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 Red Hat, Inc. # All Rights Reserved. # @@ -15,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -from cinder.api.openstack import common +from cinder.api import common class ViewBuilder(common.ViewBuilder): @@ -23,12 +21,12 @@ class ViewBuilder(common.ViewBuilder): def show(self, request, volume_type, brief=False): """Trim away extraneous volume type attributes.""" trimmed = dict(id=volume_type.get('id'), - name=volume_type.get('name'), - extra_specs=volume_type.get('extra_specs')) + name=volume_type.get('name'), + extra_specs=volume_type.get('extra_specs')) return trimmed if brief else dict(volume_type=trimmed) def index(self, request, volume_types): - """Index over trimmed volume types""" + """Index over trimmed volume types.""" volume_types_list = [self.show(request, volume_type, True) for volume_type in volume_types] return dict(volume_types=volume_types_list) diff --git a/cinder/api/openstack/volume/views/versions.py b/cinder/api/views/versions.py similarity index 74% rename from cinder/api/openstack/volume/views/versions.py rename to cinder/api/views/versions.py index f551382b93..40de329071 100644 --- a/cinder/api/openstack/volume/views/versions.py +++ b/cinder/api/views/versions.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010-2011 OpenStack LLC. +# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -26,7 +24,8 @@ def get_view_builder(req): class ViewBuilder(object): def __init__(self, base_url): - """ + """Initialize ViewBuilder. + :param base_url: url of the root wsgi application """ self.base_url = base_url @@ -38,14 +37,10 @@ def build_choices(self, VERSIONS, req): version_objs.append({ "id": version['id'], "status": version['status'], - "links": [ - { - "rel": "self", - "href": self.generate_href(req.path), - }, - ], - "media-types": version['media-types'], - }) + "links": [{"rel": "self", + "href": self.generate_href(version['id'], + req.path), }, ], + "media-types": version['media-types'], }) return dict(choices=version_objs) @@ -57,8 +52,7 @@ def build_versions(self, versions): "id": version['id'], "status": version['status'], "updated": version['updated'], - "links": self._build_links(version), - }) + "links": self._build_links(version), }) return dict(versions=version_objs) @@ -66,26 +60,25 @@ def build_version(self, version): reval = copy.deepcopy(version) reval['links'].insert(0, { "rel": "self", - "href": self.base_url.rstrip('/') + '/', - }) + "href": self.base_url.rstrip('/') + '/', }) return dict(version=reval) def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" - href = self.generate_href() + href = self.generate_href(version_data['id']) - links = [ - { - "rel": "self", - "href": href, - }, - ] + links = [{'rel': 'self', + 'href': href, }, ] return links - def generate_href(self, path=None): + def generate_href(self, version, path=None): """Create an url that refers to a specific version_number.""" - version_number = 'v1' + if version.find('v1.') == 0: + version_number = 'v1' + else: + version_number = 'v2' + if path: path = path.strip('/') return os.path.join(self.base_url, version_number, path) diff --git a/cinder/api/openstack/xmlutil.py b/cinder/api/xmlutil.py similarity index 90% rename from cinder/api/openstack/xmlutil.py rename to cinder/api/xmlutil.py index b8461ca5e5..cf96968d6f 100644 --- a/cinder/api/openstack/xmlutil.py +++ b/cinder/api/xmlutil.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -27,14 +25,16 @@ XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0' XMLNS_ATOM = 'http://www.w3.org/2005/Atom' XMLNS_VOLUME_V1 = 'http://docs.openstack.org/volume/api/v1' +XMLNS_VOLUME_V2 = ('http://docs.openstack.org/api/openstack-volume/2.0/' + 'content') def validate_schema(xml, schema_name): if isinstance(xml, str): xml = etree.fromstring(xml) - base_path = 'cinder/api/openstack/volume/schemas/v1.1/' + base_path = 'cinder/api/schemas/v1.1/' if schema_name in ('atom', 'atom-link'): - base_path = 'cinder/api/openstack/volume/schemas/' + base_path = 'cinder/api/schemas/' schema_path = os.path.join(utils.cinderdir(), '%s%s.rng' % (base_path, schema_name)) schema_doc = etree.parse(schema_path) @@ -343,6 +343,18 @@ def apply(self, elem, obj): # Attribute has no value, so don't include it pass + def getAttrib(self, obj): + """Get attribute.""" + tmpattrib = {} + #Now set up all the attributes... + for key, value in self.attrib.items(): + try: + tmpattrib[key] = value(obj) + except KeyError: + # Attribute has no value, so don't include it + pass + return tmpattrib + def _render(self, parent, datum, patches, nsmap): """Internal rendering. @@ -362,25 +374,61 @@ def _render(self, parent, datum, patches, nsmap): tagname = self.tag(datum) else: tagname = self.tag - elem = etree.Element(tagname, nsmap=nsmap) + + # If the datum is None + if datum is not None: + tmpattrib = self.getAttrib(datum) + else: + tmpattrib = {} + + tagnameList = tagname.split(':') + insertIndex = 0 + + #If parent is not none and has same tagname + if parent is not None: + for i in range(0, len(tagnameList)): + tmpInsertPos = parent.find(tagnameList[i]) + if tmpInsertPos is None: + break + elif not cmp(parent.attrib, tmpattrib) == 0: + break + parent = tmpInsertPos + insertIndex = i + 1 + + if insertIndex >= len(tagnameList): + insertIndex = insertIndex - 1 + + #Create root elem + elem = etree.Element(tagnameList[insertIndex], nsmap=nsmap) + rootelem = elem + subelem = elem + + #Create subelem + for i in range((insertIndex + 1), len(tagnameList)): + subelem = etree.SubElement(elem, tagnameList[i]) + elem = subelem # If we have a parent, append the node to the parent if parent is not None: - parent.append(elem) + #If we can merge this element, then insert + if insertIndex > 0: + parent.insert(len(list(parent)), rootelem) + else: + parent.append(rootelem) # If the datum is None, do nothing else if datum is None: - return elem + return rootelem # Apply this template element to the element - self.apply(elem, datum) + self.apply(subelem, datum) # Additionally, apply the patches for patch in patches: - patch.apply(elem, datum) + patch.apply(subelem, datum) # We have fully rendered the element; return it - return elem + return rootelem def render(self, parent, obj, patches=[], nsmap=None): """Render an object. @@ -549,7 +597,7 @@ def _serialize(self, parent, obj, siblings, nsmap=None): # First step, render the element elems = siblings[0].render(parent, obj, siblings[1:], nsmap) - # Now, recurse to all child elements + # Now, traverse all child elements seen = set() for idx, sibling in enumerate(siblings): for child in sibling: @@ -564,7 +612,7 @@ def _serialize(self, parent, obj, siblings, nsmap=None): if child.tag in sib: nieces.append(sib[child.tag]) - # Now we recurse for every data element + # Now call this function for all data elements recursively for elem, datum in elems: self._serialize(elem, datum, nieces) @@ -735,10 +783,10 @@ def attach(self, *slaves): # Make sure we have a tree match if slave.root.tag != self.root.tag: - slavetag = slave.root.tag - mastertag = self.root.tag - msg = _("Template tree mismatch; adding slave %(slavetag)s " - "to master %(mastertag)s") % locals() + msg = (_("Template tree mismatch; adding slave %(slavetag)s " + "to master %(mastertag)s") % + {'slavetag': slave.root.tag, + 'mastertag': self.root.tag}) raise ValueError(msg) # Make sure slave applies to this template @@ -860,9 +908,7 @@ def construct(self): def make_links(parent, selector=None): - """ - Attach an Atom element to the parent. - """ + """Attach an Atom element to the parent.""" elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM, selector=selector) @@ -875,14 +921,17 @@ def make_links(parent, selector=None): def make_flat_dict(name, selector=None, subselector=None, ns=None): - """ - Utility for simple XML templates that traditionally used - XMLDictSerializer with no metadata. Returns a template element - where the top-level element has the given tag name, and where - sub-elements have tag names derived from the object's keys and - text derived from the object's values. This only works for flat - dictionary objects, not dictionaries containing nested lists or - dictionaries. + """Utility for simple XML templates. + + Simple templates are templates that traditionally used + XMLDictSerializer with no metadata. + + Returns a template element where the top-level element has the + given tag name, and where sub-elements have tag names derived + from the object's keys and text derived from the object's values. + + This only works for flat dictionary objects, not dictionaries + containing nested lists or dictionaries. """ # Set up the names we need... diff --git a/cinder/backup/__init__.py b/cinder/backup/__init__.py new file mode 100644 index 0000000000..354580f362 --- /dev/null +++ b/cinder/backup/__init__.py @@ -0,0 +1,26 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Importing full names to not pollute the namespace and cause possible +# collisions with use of 'from cinder.backup import ' elsewhere. + + +from cinder.common import config +import cinder.openstack.common.importutils + + +CONF = config.CONF + +API = cinder.openstack.common.importutils.import_class(CONF.backup_api_class) diff --git a/cinder/backup/api.py b/cinder/backup/api.py new file mode 100644 index 0000000000..6eda76ebfd --- /dev/null +++ b/cinder/backup/api.py @@ -0,0 +1,199 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to the volume backups service. +""" + + +from eventlet import greenthread + +from oslo.config import cfg + +from cinder.backup import rpcapi as backup_rpcapi +from cinder import context +from cinder.db import base +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import utils + +import cinder.policy +import cinder.volume + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def check_policy(context, action): + target = { + 'project_id': context.project_id, + 'user_id': context.user_id, + } + _action = 'backup:%s' % action + cinder.policy.enforce(context, _action, target) + + +class API(base.Base): + """API for interacting with the volume backup manager.""" + + def __init__(self, db_driver=None): + self.backup_rpcapi = backup_rpcapi.BackupAPI() + self.volume_api = cinder.volume.API() + super(API, self).__init__(db_driver) + + def get(self, context, backup_id): + check_policy(context, 'get') + rv = self.db.backup_get(context, backup_id) + return dict(rv.iteritems()) + + def delete(self, context, backup_id): + """Make the RPC call to delete a volume backup.""" + check_policy(context, 'delete') + backup = self.get(context, backup_id) + if backup['status'] not in ['available', 'error']: + msg = _('Backup status must be available or error') + raise exception.InvalidBackup(reason=msg) + + self.db.backup_update(context, backup_id, {'status': 'deleting'}) + self.backup_rpcapi.delete_backup(context, + backup['host'], + backup['id']) + + # TODO(moorehef): Add support for search_opts, discarded atm + def get_all(self, context, search_opts=None): + if search_opts is None: + search_opts = {} + check_policy(context, 'get_all') + if context.is_admin: + backups = self.db.backup_get_all(context) + else: + backups = self.db.backup_get_all_by_project(context, + context.project_id) + + return backups + + def _is_backup_service_enabled(self, volume, volume_host): + """Check if there is a backup service available.""" + topic = CONF.backup_topic + ctxt = context.get_admin_context() + services = self.db.service_get_all_by_topic(ctxt, topic) + for srv in services: + if (srv['availability_zone'] == volume['availability_zone'] and + srv['host'] == volume_host and not srv['disabled'] and + utils.service_is_up(srv)): + return True + return False + + def create(self, context, name, description, volume_id, + container, availability_zone=None): + """Make the RPC call to create a volume backup.""" + check_policy(context, 'create') + volume = self.volume_api.get(context, volume_id) + if volume['status'] != "available": + msg = _('Volume to be backed up must be available') + raise exception.InvalidVolume(reason=msg) + volume_host = volume['host'].partition('@')[0] + if not self._is_backup_service_enabled(volume, volume_host): + raise exception.ServiceNotFound(service_id='cinder-backup') + + self.db.volume_update(context, volume_id, {'status': 'backing-up'}) + + options = {'user_id': context.user_id, + 'project_id': context.project_id, + 'display_name': name, + 'display_description': description, + 'volume_id': volume_id, + 'status': 'creating', + 'container': container, + 'size': volume['size'], + 'host': volume_host, } + + backup = self.db.backup_create(context, options) + + #TODO(DuncanT): In future, when we have a generic local attach, + # this can go via the scheduler, which enables + # better load balancing and isolation of services + self.backup_rpcapi.create_backup(context, + backup['host'], + backup['id'], + volume_id) + + return backup + + def restore(self, context, backup_id, volume_id=None): + """Make the RPC call to restore a volume backup.""" + check_policy(context, 'restore') + backup = self.get(context, backup_id) + if backup['status'] != 'available': + msg = _('Backup status must be available') + raise exception.InvalidBackup(reason=msg) + + size = backup['size'] + if size is None: + msg = _('Backup to be restored has invalid size') + raise exception.InvalidBackup(reason=msg) + + # Create a volume if none specified. If a volume is specified check + # it is large enough for the backup + if volume_id is None: + name = 'restore_backup_%s' % backup_id + description = 'auto-created_from_restore_from_backup' + + LOG.audit(_("Creating volume of %(size)s GB for restore of " + "backup %(backup_id)s"), + {'size': size, 'backup_id': backup_id}, + context=context) + volume = self.volume_api.create(context, size, name, description) + volume_id = volume['id'] + + while True: + volume = self.volume_api.get(context, volume_id) + if volume['status'] != 'creating': + break + greenthread.sleep(1) + else: + volume = self.volume_api.get(context, volume_id) + + if volume['status'] != "available": + msg = _('Volume to be restored to must be available') + raise exception.InvalidVolume(reason=msg) + + LOG.debug('Checking backup size %s against volume size %s', + size, volume['size']) + if size > volume['size']: + msg = (_('volume size %(volume_size)d is too small to restore ' + 'backup of size %(size)d.') % + {'volume_size': volume['size'], 'size': size}) + raise exception.InvalidVolume(reason=msg) + + LOG.audit(_("Overwriting volume %(volume_id)s with restore of " + "backup %(backup_id)s"), + {'volume_id': volume_id, 'backup_id': backup_id}, + context=context) + + # Setting the status here rather than setting at start and unrolling + # for each error condition, it should be a very small window + self.db.backup_update(context, backup_id, {'status': 'restoring'}) + self.db.volume_update(context, volume_id, {'status': + 'restoring-backup'}) + self.backup_rpcapi.restore_backup(context, + backup['host'], + backup['id'], + volume_id) + + d = {'backup_id': backup_id, + 'volume_id': volume_id, } + + return d diff --git a/cinder/backup/driver.py b/cinder/backup/driver.py new file mode 100644 index 0000000000..241ca83483 --- /dev/null +++ b/cinder/backup/driver.py @@ -0,0 +1,33 @@ +# Copyright (C) 2013 Deutsche Telekom AG +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base class for all backup drivers.""" + +from cinder.db import base + + +class BackupDriver(base.Base): + + def backup(self, backup, volume_file): + """Start a backup of a specified volume.""" + raise NotImplementedError() + + def restore(self, backup, volume_id, volume_file): + """Restore a saved backup.""" + raise NotImplementedError() + + def delete(self, backup): + """Delete a saved backup.""" + raise NotImplementedError() diff --git a/cinder/backup/drivers/__init__.py b/cinder/backup/drivers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/backup/drivers/ceph.py b/cinder/backup/drivers/ceph.py new file mode 100644 index 0000000000..94d5a401db --- /dev/null +++ b/cinder/backup/drivers/ceph.py @@ -0,0 +1,1043 @@ +# Copyright 2013 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Ceph Backup Service Implementation. + +This driver supports backing up volumes of any type to a Ceph backend store. It +is also capable of detecting whether the volume to be backed up is a Ceph RBD +volume and if so, attempts to perform incremental/differential backups. + +Support is also included for the following in the case of source volume being a +Ceph RBD volume: + + * backing up within the same Ceph pool (not recommended) + * backing up between different Ceph pools + * backing up between different Ceph clusters + +At the time of writing, differential backup support in Ceph/librbd was quite +new so this driver accounts for this by first attempting differential backup +and falling back to full backup/copy if the former fails. + +If incremental backups are used, multiple backups of the same volume are stored +as snapshots so that minimal space is consumed in the backup store and +restoring the volume takes a far reduced amount of time compared to a full +copy. + +Note that Cinder supports restoring to a new volume or the original volume the +backup was taken from. For the latter case, a full copy is enforced since this +was deemed the safest action to take. It is therefore recommended to always +restore to a new volume (default). +""" + +import fcntl +import os +import re +import subprocess +import time + +import eventlet +from oslo.config import cfg + +from cinder.backup.driver import BackupDriver +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import units +from cinder import utils +import cinder.volume.drivers.rbd as rbd_driver + +try: + import rados + import rbd +except ImportError: + rados = None + rbd = None + +LOG = logging.getLogger(__name__) + +service_opts = [ + cfg.StrOpt('backup_ceph_conf', default='/etc/ceph/ceph.conf', + help='Ceph config file to use.'), + cfg.StrOpt('backup_ceph_user', default='cinder', + help='the Ceph user to connect with'), + cfg.IntOpt('backup_ceph_chunk_size', default=(units.MiB * 128), + help='the chunk size in bytes that a backup will be broken ' + 'into before transfer to backup store'), + cfg.StrOpt('backup_ceph_pool', default='backups', + help='the Ceph pool to backup to'), + cfg.IntOpt('backup_ceph_stripe_unit', default=0, + help='RBD stripe unit to use when creating a backup image'), + cfg.IntOpt('backup_ceph_stripe_count', default=0, + help='RBD stripe count to use when creating a backup image'), + cfg.BoolOpt('restore_discard_excess_bytes', default=True, + help='If True, always discard excess bytes when restoring ' + 'volumes.') +] + +CONF = cfg.CONF +CONF.register_opts(service_opts) + + +class CephBackupDriver(BackupDriver): + """Backup up Cinder volumes to Ceph Object Store. + + This class enables backing up Cinder volumes to a Ceph object store. + Backups may be stored in their own pool or even cluster. Store location is + defined by the Ceph conf file and Service config options supplied. + + If the source volume is itself an RBD volume, the backup will be performed + using incremental differential backups which *should* give a performance + gain. + """ + + def __init__(self, context, db_driver=None, execute=None): + super(CephBackupDriver, self).__init__(db_driver) + self.rbd = rbd + self.rados = rados + self.context = context + self.chunk_size = CONF.backup_ceph_chunk_size + self._execute = execute or utils.execute + + if self._supports_stripingv2: + self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit + self.rbd_stripe_count = CONF.backup_ceph_stripe_count + else: + LOG.info(_("rbd striping not supported - ignoring configuration " + "settings for rbd striping")) + self.rbd_stripe_count = 0 + self.rbd_stripe_unit = 0 + + self._ceph_backup_user = self._utf8(CONF.backup_ceph_user) + self._ceph_backup_pool = self._utf8(CONF.backup_ceph_pool) + self._ceph_backup_conf = self._utf8(CONF.backup_ceph_conf) + + @staticmethod + def _utf8(s): + """Ensure string s is utf8 (i.e. not unicode).""" + if isinstance(s, str): + return s + + return s.encode('utf8') + + def _validate_string_args(self, *args): + """Ensure all args are non-None and non-empty.""" + return all(args) + + def _ceph_args(self, user, conf=None, pool=None): + """Create default ceph args for executing rbd commands. + + If no --conf is provided, rbd will look in the default locations e.g. + /etc/ceph/ceph.conf + """ + + # Make sure user arg is valid since rbd command may not fail if + # invalid/no user provided, resulting in unexpected behaviour. + if not self._validate_string_args(user): + raise exception.BackupInvalidCephArgs(_("invalid user '%s'") % + (user)) + + args = ['--id', user] + if conf: + args.extend(['--conf', conf]) + if pool: + args.extend(['--pool', pool]) + + return args + + @property + def _supports_layering(self): + """Determine if copy-on-write is supported by our version of librbd.""" + return hasattr(self.rbd, 'RBD_FEATURE_LAYERING') + + @property + def _supports_stripingv2(self): + """Determine if striping is supported by our version of librbd.""" + return hasattr(self.rbd, 'RBD_FEATURE_STRIPINGV2') + + def _get_rbd_support(self): + """Determine RBD features supported by our version of librbd.""" + old_format = True + features = 0 + if self._supports_layering: + old_format = False + features |= self.rbd.RBD_FEATURE_LAYERING + if self._supports_stripingv2: + old_format = False + features |= self.rbd.RBD_FEATURE_STRIPINGV2 + + return (old_format, features) + + def _connect_to_rados(self, pool=None): + """Establish connection to the backup Ceph cluster.""" + client = self.rados.Rados(rados_id=self._ceph_backup_user, + conffile=self._ceph_backup_conf) + try: + client.connect() + pool_to_open = self._utf8(pool or self._ceph_backup_pool) + ioctx = client.open_ioctx(pool_to_open) + return client, ioctx + except self.rados.Error: + # shutdown cannot raise an exception + client.shutdown() + raise + + def _disconnect_from_rados(self, client, ioctx): + """Terminate connection with the backup Ceph cluster.""" + # closing an ioctx cannot raise an exception + ioctx.close() + client.shutdown() + + def _get_backup_base_name(self, volume_id, backup_id=None, + diff_format=False): + """Return name of base image used for backup. + + Incremental backups use a new base name so we support old and new style + format. + """ + # Ensure no unicode + if diff_format: + return self._utf8("volume-%s.backup.base" % (volume_id)) + else: + if backup_id is None: + msg = _("backup_id required") + raise exception.InvalidParameterValue(msg) + return self._utf8("volume-%s.backup.%s" % (volume_id, backup_id)) + + def _discard_bytes(self, volume, offset, length): + """Trim length bytes from offset. + + If the volume is an rbd do a discard() otherwise assume it is a file + and pad with zeroes. + """ + if length: + LOG.debug(_("discarding %(length)s bytes from offset %(offset)s") % + {'length': length, 'offset': offset}) + if self._file_is_rbd(volume): + volume.rbd_image.discard(offset, length) + else: + zeroes = '\0' * length + chunks = int(length / self.chunk_size) + for chunk in xrange(0, chunks): + LOG.debug(_("writing zeroes chunk %d") % (chunk)) + volume.write(zeroes) + volume.flush() + # yield to any other pending backups + eventlet.sleep(0) + + rem = int(length % self.chunk_size) + if rem: + zeroes = '\0' * rem + volume.write(zeroes) + volume.flush() + + def _transfer_data(self, src, src_name, dest, dest_name, length): + """Transfer data between files (Python IO objects).""" + LOG.debug(_("transferring data between '%(src)s' and '%(dest)s'") % + {'src': src_name, 'dest': dest_name}) + + chunks = int(length / self.chunk_size) + LOG.debug(_("%(chunks)s chunks of %(bytes)s bytes to be transferred") % + {'chunks': chunks, 'bytes': self.chunk_size}) + + for chunk in xrange(0, chunks): + before = time.time() + data = src.read(self.chunk_size) + # If we have reach end of source, discard any extraneous bytes from + # destination volume if trim is enabled and stop writing. + if data == '': + if CONF.restore_discard_excess_bytes: + self._discard_bytes(dest, dest.tell(), + length - dest.tell()) + + return + + dest.write(data) + dest.flush() + delta = (time.time() - before) + rate = (self.chunk_size / delta) / 1024 + LOG.debug((_("transferred chunk %(chunk)s of %(chunks)s " + "(%(rate)dK/s)") % + {'chunk': chunk + 1, 'chunks': chunks, + 'rate': rate})) + + # yield to any other pending backups + eventlet.sleep(0) + + rem = int(length % self.chunk_size) + if rem: + LOG.debug(_("transferring remaining %s bytes") % (rem)) + data = src.read(rem) + if data == '': + if CONF.restore_discard_excess_bytes: + self._discard_bytes(dest, dest.tell(), rem) + else: + dest.write(data) + dest.flush() + # yield to any other pending backups + eventlet.sleep(0) + + def _create_base_image(self, name, size, rados_client): + """Create a base backup image. + + This will be the base image used for storing differential exports. + """ + LOG.debug(_("creating base image '%s'") % (name)) + old_format, features = self._get_rbd_support() + self.rbd.RBD().create(ioctx=rados_client.ioctx, + name=name, + size=size, + old_format=old_format, + features=features, + stripe_unit=self.rbd_stripe_unit, + stripe_count=self.rbd_stripe_count) + + def _delete_backup_snapshot(self, rados_client, base_name, backup_id): + """Delete snapshot associated with this backup if one exists. + + A backup should have at most ONE associated snapshot. + + This is required before attempting to delete the base image. The + snapshot on the original volume can be left as it will be purged when + the volume is deleted. + + Returns tuple(deleted_snap_name, num_of_remaining_snaps). + """ + remaining_snaps = 0 + base_rbd = self.rbd.Image(rados_client.ioctx, base_name) + try: + snap_name = self._get_backup_snap_name(base_rbd, base_name, + backup_id) + if snap_name: + LOG.debug(_("deleting backup snapshot='%s'") % (snap_name)) + base_rbd.remove_snap(snap_name) + else: + LOG.debug(_("no backup snapshot to delete")) + + # Now check whether any snapshots remain on the base image + backup_snaps = self.get_backup_snaps(base_rbd) + if backup_snaps: + remaining_snaps = len(backup_snaps) + finally: + base_rbd.close() + + return snap_name, remaining_snaps + + def _try_delete_base_image(self, backup_id, volume_id, base_name=None): + """Try to delete backup RBD image. + + If the rbd image is a base image for incremental backups, it may have + snapshots. Delete the snapshot associated with backup_id and if the + image has no more snapshots, delete it. Otherwise return. + + If no base name is provided try normal (full) format then diff format + image name. + + If a base name is provided but does not exist, ImageNotFound will be + raised. + + If the image is busy, a number of retries will be performed if + ImageBusy is received, after which the exception will be propagated to + the caller. + """ + retries = 3 + delay = 5 + try_diff_format = False + + if base_name is None: + try_diff_format = True + + base_name = self._get_backup_base_name(volume_id, backup_id) + LOG.debug(_("trying diff format name format basename='%s'") % + (base_name)) + + with rbd_driver.RADOSClient(self) as client: + rbd_exists, base_name = \ + self._rbd_image_exists(base_name, volume_id, client, + try_diff_format=try_diff_format) + if not rbd_exists: + raise self.rbd.ImageNotFound(_("image %s not found") % + (base_name)) + + while retries >= 0: + # First delete associated snapshot from base image (if exists) + snap, rem = self._delete_backup_snapshot(client, base_name, + backup_id) + if rem: + msg = (_("base image still has %s snapshots so skipping " + "base image delete") % (rem)) + LOG.info(msg) + return + + LOG.info(_("deleting base image='%s'") % (base_name)) + # Delete base if no more snapshots + try: + self.rbd.RBD().remove(client.ioctx, base_name) + except self.rbd.ImageBusy as exc: + # Allow a retry if the image is busy + if retries > 0: + LOG.info((_("image busy, retrying %(retries)s " + "more time(s) in %(delay)ss") % + {'retries': retries, 'delay': delay})) + eventlet.sleep(delay) + else: + LOG.error(_("max retries reached - raising error")) + raise exc + else: + LOG.debug(_("base backup image='%s' deleted)") % + (base_name)) + retries = 0 + finally: + retries -= 1 + + # Since we have deleted the base image we can delete the source + # volume backup snapshot. + src_name = self._utf8(volume_id) + if src_name in self.rbd.RBD().list(client.ioctx): + LOG.debug(_("deleting source snap '%s'") % snap) + src_rbd = self.rbd.Image(client.ioctx, src_name) + try: + src_rbd.remove_snap(snap) + finally: + src_rbd.close() + + def _piped_execute(self, cmd1, cmd2): + """Pipe output of cmd1 into cmd2.""" + LOG.debug("piping cmd1='%s' into..." % (' '.join(cmd1))) + LOG.debug("cmd2='%s'" % (' '.join(cmd2))) + + try: + p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + except OSError as e: + LOG.error("pipe1 failed - %s " % unicode(e)) + raise + + # NOTE(dosaboy): ensure that the pipe is blocking. This is to work + # around the case where evenlet.green.subprocess is used which seems to + # use a non-blocking pipe. + flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK) + fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags) + + try: + p2 = subprocess.Popen(cmd2, stdin=p1.stdout, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + except OSError as e: + LOG.error("pipe2 failed - %s " % unicode(e)) + raise + + p1.stdout.close() + stdout, stderr = p2.communicate() + return p2.returncode, stderr + + def _rbd_diff_transfer(self, src_name, src_pool, dest_name, dest_pool, + src_user, src_conf, dest_user, dest_conf, + src_snap=None, from_snap=None): + """Copy only extents changed between two points. + + If no snapshot is provided, the diff extents will be all those changed + since the rbd volume/base was created, otherwise it will be those + changed since the snapshot was created. + """ + LOG.debug(_("performing differential transfer from '%(src)s' to " + "'%(dest)s'") % + {'src': src_name, 'dest': dest_name}) + + # NOTE(dosaboy): Need to be tolerant of clusters/clients that do + # not support these operations since at the time of writing they + # were very new. + + src_ceph_args = self._ceph_args(src_user, src_conf, pool=src_pool) + dest_ceph_args = self._ceph_args(dest_user, dest_conf, pool=dest_pool) + + cmd1 = ['rbd', 'export-diff'] + src_ceph_args + if from_snap is not None: + cmd1.extend(['--from-snap', from_snap]) + if src_snap: + path = self._utf8("%s/%s@%s" % (src_pool, src_name, src_snap)) + else: + path = self._utf8("%s/%s" % (src_pool, src_name)) + cmd1.extend([path, '-']) + + cmd2 = ['rbd', 'import-diff'] + dest_ceph_args + cmd2.extend(['-', self._utf8("%s/%s" % (dest_pool, dest_name))]) + + ret, stderr = self._piped_execute(cmd1, cmd2) + if ret: + msg = (_("rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)") % + ({'ret': ret, 'stderr': stderr})) + LOG.info(msg) + raise exception.BackupRBDOperationFailed(msg) + + def _rbd_image_exists(self, name, volume_id, client, + try_diff_format=False): + """Return tuple (exists, name).""" + rbds = self.rbd.RBD().list(client.ioctx) + if name not in rbds: + msg = _("image '%s' not found - trying diff format name") % (name) + LOG.debug(msg) + if try_diff_format: + name = self._get_backup_base_name(volume_id, diff_format=True) + if name not in rbds: + msg = _("diff format image '%s' not found") % (name) + LOG.debug(msg) + return False, name + else: + return False, name + + return True, name + + def _snap_exists(self, base_name, snap_name, client): + """Return True if snapshot exists in base image.""" + base_rbd = self.rbd.Image(client.ioctx, base_name, read_only=True) + try: + snaps = base_rbd.list_snaps() + finally: + base_rbd.close() + + if snaps is None: + return False + + for snap in snaps: + if snap['name'] == snap_name: + return True + + return False + + def _backup_rbd(self, backup_id, volume_id, volume_file, volume_name, + length): + """Create a incremental backup from an RBD image.""" + rbd_user = volume_file.rbd_user + rbd_pool = volume_file.rbd_pool + rbd_conf = volume_file.rbd_conf + source_rbd_image = volume_file.rbd_image + + # Identify our --from-snap point (if one exists) + from_snap = self._get_most_recent_snap(source_rbd_image) + LOG.debug(_("using --from-snap '%s'") % from_snap) + + base_name = self._get_backup_base_name(volume_id, diff_format=True) + image_created = False + with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: + # If from_snap does not exist at the destination (and the + # destination exists), this implies a previous backup has failed. + # In this case we will force a full backup. + # + # TODO(dosaboy): find a way to repair the broken backup + # + if base_name not in self.rbd.RBD().list(ioctx=client.ioctx): + # If a from_snap is defined but the base does not exist, we + # ignore it since it is stale and waiting to be cleaned up. + if from_snap: + LOG.debug(_("source snap '%s' is stale so deleting") % + (from_snap)) + source_rbd_image.remove_snap(from_snap) + from_snap = None + + # Create new base image + self._create_base_image(base_name, length, client) + image_created = True + else: + # If a from_snap is defined but does not exist in the back base + # then we cannot proceed (see above) + if not self._snap_exists(base_name, from_snap, client): + errmsg = (_("snap='%(snap)s' does not exist in base " + "image='%(base)s' - aborting incremental " + "backup") % + {'snap': from_snap, 'base': base_name}) + LOG.info(errmsg) + # Raise this exception so that caller can try another + # approach + raise exception.BackupRBDOperationFailed(errmsg) + + # Snapshot source volume so that we have a new point-in-time + new_snap = self._get_new_snap_name(backup_id) + LOG.debug(_("creating backup snapshot='%s'") % (new_snap)) + source_rbd_image.create_snap(new_snap) + + # Attempt differential backup. If this fails, perhaps because librbd + # or Ceph cluster version does not support it, do a full backup + # instead. + # + # TODO(dosaboy): find a way to determine if the operation is supported + # rather than brute force approach. + try: + before = time.time() + self._rbd_diff_transfer(volume_name, rbd_pool, base_name, + self._ceph_backup_pool, + src_user=rbd_user, + src_conf=rbd_conf, + dest_user=self._ceph_backup_user, + dest_conf=self._ceph_backup_conf, + src_snap=new_snap, + from_snap=from_snap) + + LOG.debug(_("differential backup transfer completed in %.4fs") % + (time.time() - before)) + + # We don't need the previous snapshot (if there was one) anymore so + # delete it. + if from_snap: + source_rbd_image.remove_snap(from_snap) + + except exception.BackupRBDOperationFailed: + LOG.debug(_("differential backup transfer failed")) + + # Clean up if image was created as part of this operation + if image_created: + self._try_delete_base_image(backup_id, volume_id, + base_name=base_name) + + # Delete snapshot + LOG.debug(_("deleting backup snapshot='%s'") % (new_snap)) + source_rbd_image.remove_snap(new_snap) + + # Re-raise the exception so that caller can try another approach + raise + + def _file_is_rbd(self, volume_file): + """Returns True if the volume_file is actually an RBD image.""" + return hasattr(volume_file, 'rbd_image') + + def _full_backup(self, backup_id, volume_id, src_volume, src_name, length): + """Perform a full backup of src volume. + + First creates a base backup image in our backup location then performs + an chunked copy of all data from source volume to a new backup rbd + image. + """ + backup_name = self._get_backup_base_name(volume_id, backup_id) + + with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: + # First create base backup image + old_format, features = self._get_rbd_support() + LOG.debug(_("creating base image='%s'") % (backup_name)) + self.rbd.RBD().create(ioctx=client.ioctx, + name=backup_name, + size=length, + old_format=old_format, + features=features, + stripe_unit=self.rbd_stripe_unit, + stripe_count=self.rbd_stripe_count) + + LOG.debug(_("copying data")) + dest_rbd = self.rbd.Image(client.ioctx, backup_name) + try: + rbd_meta = rbd_driver.RBDImageMetadata(dest_rbd, + self._ceph_backup_pool, + self._ceph_backup_user, + self._ceph_backup_conf) + rbd_fd = rbd_driver.RBDImageIOWrapper(rbd_meta) + self._transfer_data(src_volume, src_name, rbd_fd, backup_name, + length) + finally: + dest_rbd.close() + + @staticmethod + def backup_snapshot_name_pattern(): + """Returns the pattern used to match backup snapshots. + + It is essential that snapshots created for purposes other than backups + do not have this name format. + """ + return r"^backup\.([a-z0-9\-]+?)\.snap\.(.+)$" + + @classmethod + def get_backup_snaps(cls, rbd_image, sort=False): + """Get all backup snapshots for the given rbd image. + + NOTE: this call is made public since these snapshots must be deleted + before the base volume can be deleted. + """ + snaps = rbd_image.list_snaps() + + backup_snaps = [] + for snap in snaps: + search_key = cls.backup_snapshot_name_pattern() + result = re.search(search_key, snap['name']) + if result: + backup_snaps.append({'name': result.group(0), + 'backup_id': result.group(1), + 'timestamp': result.group(2)}) + + if sort: + # Sort into ascending order of timestamp + backup_snaps.sort(key=lambda x: x['timestamp'], reverse=True) + + return backup_snaps + + def _get_new_snap_name(self, backup_id): + return self._utf8("backup.%s.snap.%s" % (backup_id, time.time())) + + def _get_backup_snap_name(self, rbd_image, name, backup_id): + """Return the name of the snapshot associated with backup_id. + + The rbd image provided must be the base image used for an incremental + backup. + + A backup is only allowed ONE associated snapshot. If more are found, + exception.BackupOperationError is raised. + """ + snaps = self.get_backup_snaps(rbd_image) + + LOG.debug(_("looking for snapshot of backup base '%s'") % (name)) + + if not snaps: + LOG.debug(_("backup base '%s' has no snapshots") % (name)) + return None + + snaps = [snap['name'] for snap in snaps + if snap['backup_id'] == backup_id] + + if not snaps: + LOG.debug(_("backup '%s' has no snapshot") % (backup_id)) + return None + + if len(snaps) > 1: + msg = (_("backup should only have one snapshot but instead has %s") + % (len(snaps))) + LOG.error(msg) + raise exception.BackupOperationError(msg) + + LOG.debug(_("found snapshot '%s'") % (snaps[0])) + return snaps[0] + + def _get_most_recent_snap(self, rbd_image): + """Get the most recent backup snapshot of the provided image. + + Returns name of most recent backup snapshot or None if there are no + backup snapshots. + """ + backup_snaps = self.get_backup_snaps(rbd_image, sort=True) + if not backup_snaps: + return None + + return backup_snaps[0]['name'] + + def _get_volume_size_gb(self, volume): + """Return the size in gigabytes of the given volume. + + Raises exception.InvalidParameterValue if volume size is 0. + """ + if int(volume['size']) == 0: + errmsg = _("need non-zero volume size") + raise exception.InvalidParameterValue(errmsg) + + return int(volume['size']) * units.GiB + + def backup(self, backup, volume_file): + """Backup the given volume to Ceph object store. + + If the source volume is an RBD we will attempt to do an + incremental/differential backup, otherwise a full copy is performed. + If this fails we will attempt to fall back to full copy. + """ + backup_id = backup['id'] + volume = self.db.volume_get(self.context, backup['volume_id']) + volume_id = volume['id'] + volume_name = volume['name'] + + LOG.debug(_("Starting backup of volume='%s'") % volume_name) + + # Ensure we are at the beginning of the volume + volume_file.seek(0) + length = self._get_volume_size_gb(volume) + + do_full_backup = False + if self._file_is_rbd(volume_file): + # If volume an RBD, attempt incremental backup. + try: + self._backup_rbd(backup_id, volume_id, volume_file, + volume_name, length) + except exception.BackupRBDOperationFailed: + LOG.debug(_("forcing full backup")) + do_full_backup = True + else: + do_full_backup = True + + if do_full_backup: + self._full_backup(backup_id, volume_id, volume_file, + volume_name, length) + + self.db.backup_update(self.context, backup_id, + {'container': self._ceph_backup_pool}) + + LOG.debug(_("backup '%s' finished.") % (backup_id)) + + def _full_restore(self, backup_id, volume_id, dest_file, dest_name, + length, src_snap=None): + """Restore volume using full copy i.e. all extents. + + This will result in all extents being copied from source to + destination. + """ + with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: + # If a source snapshot is provided we assume the base is diff + # format. + if src_snap: + diff_format = True + else: + diff_format = False + + backup_name = self._get_backup_base_name(volume_id, + backup_id=backup_id, + diff_format=diff_format) + + # Retrieve backup volume + src_rbd = self.rbd.Image(client.ioctx, backup_name, + snapshot=src_snap, read_only=True) + try: + rbd_meta = rbd_driver.RBDImageMetadata(src_rbd, + self._ceph_backup_pool, + self._ceph_backup_user, + self._ceph_backup_conf) + rbd_fd = rbd_driver.RBDImageIOWrapper(rbd_meta) + self._transfer_data(rbd_fd, backup_name, dest_file, dest_name, + length) + finally: + src_rbd.close() + + def _check_restore_vol_size(self, backup_base, restore_vol, restore_length, + src_pool): + """Ensure that the restore volume is the correct size. + + If the restore volume was bigger than the backup, the diff restore will + shrink it to the size of the original backup so we need to + post-process and resize it back to its expected size. + """ + with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: + adjust_size = 0 + base_image = self.rbd.Image(client.ioctx, self._utf8(backup_base), + read_only=True) + try: + if restore_length != base_image.size(): + adjust_size = restore_length + finally: + base_image.close() + + if adjust_size: + with rbd_driver.RADOSClient(self, src_pool) as client: + dest_image = self.rbd.Image(client.ioctx, + self._utf8(restore_vol)) + try: + LOG.debug(_("adjusting restore vol size")) + dest_image.resize(adjust_size) + finally: + dest_image.close() + + def _diff_restore_rbd(self, base_name, restore_file, restore_name, + restore_point, restore_length): + """Attempt restore rbd volume from backup using diff transfer.""" + rbd_user = restore_file.rbd_user + rbd_pool = restore_file.rbd_pool + rbd_conf = restore_file.rbd_conf + + LOG.debug(_("trying incremental restore from base='%(base)s' " + "snap='%(snap)s'") % + {'base': base_name, 'snap': restore_point}) + before = time.time() + try: + self._rbd_diff_transfer(base_name, self._ceph_backup_pool, + restore_name, rbd_pool, + src_user=self._ceph_backup_user, + src_conf=self._ceph_backup_conf, + dest_user=rbd_user, dest_conf=rbd_conf, + src_snap=restore_point) + except exception.BackupRBDOperationFailed: + LOG.exception(_("differential restore failed, trying full " + "restore")) + raise + + # If the volume we are restoring to is larger than the backup volume, + # we will need to resize it after the diff import since import-diff + # appears to shrink the target rbd volume to the size of the original + # backup volume. + self._check_restore_vol_size(base_name, restore_name, restore_length, + rbd_pool) + + LOG.debug(_("restore transfer completed in %.4fs") % + (time.time() - before)) + + def _num_backup_snaps(self, backup_base_name): + """Return the number of snapshots that exist on the base image.""" + with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: + base_rbd = self.rbd.Image(client.ioctx, backup_base_name, + read_only=True) + try: + snaps = self.get_backup_snaps(base_rbd) + finally: + base_rbd.close() + + if snaps: + return len(snaps) + else: + return 0 + + def _get_restore_point(self, base_name, backup_id): + """Get restore point snapshot name for incremental backup. + + If the backup was not incremental None is returned. + """ + with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: + base_rbd = self.rbd.Image(client.ioctx, base_name, read_only=True) + try: + restore_point = self._get_backup_snap_name(base_rbd, base_name, + backup_id) + finally: + base_rbd.close() + + return restore_point + + def _rbd_has_extents(self, rbd_volume): + """Check whether the given rbd volume has extents. + + Return True if has extents, otherwise False. + """ + extents = [] + + def iter_cb(offset, length, exists): + if exists: + extents.append(length) + + rbd_volume.diff_iterate(0, rbd_volume.size(), None, iter_cb) + + if extents: + LOG.debug(_("rbd has %s extents") % (sum(extents))) + return True + + return False + + def _diff_restore_allowed(self, base_name, backup, volume, volume_file, + rados_client): + """Determine whether a differential restore is possible/allowed. + + In order for a differential restore to be performed we need: + * destination volume must be RBD + * destination volume must have zero extents + * backup base image must exist + * backup must have a restore point + + Returns True if differential restore is allowed, False otherwise. + """ + not_allowed = (False, None) + + # If the volume we are restoring to is the volume the backup was made + # from, force a full restore since a diff will not work in this case. + if volume['id'] == backup['volume_id']: + LOG.debug(_("dest volume is original volume - forcing full copy")) + return not_allowed + + if self._file_is_rbd(volume_file): + rbd_exists, base_name = self._rbd_image_exists(base_name, + backup['volume_id'], + rados_client) + + if not rbd_exists: + return not_allowed + + # Get the restore point. If no restore point is found, we assume + # that the backup was not performed using diff/incremental methods + # so we enforce full copy. + restore_point = self._get_restore_point(base_name, backup['id']) + if restore_point: + # If the destination volume has extents we cannot allow a diff + # restore. + if self._rbd_has_extents(volume_file.rbd_image): + # We return the restore point so that a full copy is done + # from snapshot. + LOG.debug(_("destination has extents - forcing full copy")) + return False, restore_point + + return True, restore_point + else: + LOG.info(_("no restore point found for backup='%s', forcing " + "full copy") % (backup['id'])) + + return not_allowed + + def _restore_volume(self, backup, volume, volume_file): + """Restore volume from backup using diff transfer if possible. + + Attempts a differential restore and reverts to full copy if diff fails. + """ + volume_name = volume['name'] + backup_id = backup['id'] + backup_volume_id = backup['volume_id'] + length = int(volume['size']) * units.GiB + + base_name = self._get_backup_base_name(backup['volume_id'], + diff_format=True) + + with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: + diff_allowed, restore_point = \ + self._diff_restore_allowed(base_name, backup, volume, + volume_file, client) + + do_full_restore = True + if diff_allowed: + # Attempt diff + try: + self._diff_restore_rbd(base_name, volume_file, volume_name, + restore_point, length) + do_full_restore = False + except exception.BackupRBDOperationFailed: + LOG.debug(_("forcing full restore")) + + if do_full_restore: + # Otherwise full copy + self._full_restore(backup_id, backup_volume_id, volume_file, + volume_name, length, src_snap=restore_point) + + def restore(self, backup, volume_id, volume_file): + """Restore the given volume backup from Ceph object store.""" + target_volume = self.db.volume_get(self.context, volume_id) + LOG.debug(_('starting restore from Ceph backup=%(src)s to ' + 'volume=%(dest)s') % + {'src': backup['id'], 'dest': target_volume['name']}) + + try: + self._restore_volume(backup, target_volume, volume_file) + + # Be tolerant of IO implementations that do not support fileno() + try: + fileno = volume_file.fileno() + except IOError: + LOG.info(_("volume_file does not support fileno() so skipping " + "fsync()")) + else: + os.fsync(fileno) + + LOG.debug(_('restore finished successfully.')) + except exception.BackupOperationError as e: + LOG.error(_('restore finished with error - %s') % (e)) + raise + + def delete(self, backup): + """Delete the given backup from Ceph object store.""" + backup_id = backup['id'] + LOG.debug(_('delete started for backup=%s') % backup['id']) + + try: + self._try_delete_base_image(backup['id'], backup['volume_id']) + except self.rbd.ImageNotFound: + msg = _("rbd image not found but continuing anyway so " + "that db entry can be removed") + LOG.warning(msg) + LOG.info(_("delete '%s' finished with warning") % (backup_id)) + else: + LOG.debug(_("delete '%s' finished") % (backup_id)) + + +def get_backup_driver(context): + return CephBackupDriver(context) diff --git a/cinder/backup/drivers/swift.py b/cinder/backup/drivers/swift.py new file mode 100644 index 0000000000..0073354b0d --- /dev/null +++ b/cinder/backup/drivers/swift.py @@ -0,0 +1,472 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of a backup service that uses Swift as the backend + +**Related Flags** + +:backup_swift_url: The URL of the Swift endpoint (default: + localhost:8080). +:backup_swift_object_size: The size in bytes of the Swift objects used + for volume backups (default: 52428800). +:backup_swift_retry_attempts: The number of retries to make for Swift + operations (default: 10). +:backup_swift_retry_backoff: The backoff time in seconds between retrying + failed Swift operations (default: 10). +:backup_compression_algorithm: Compression algorithm to use for volume + backups. Supported options are: + None (to disable), zlib and bz2 (default: zlib) +""" + +import hashlib +import httplib +import json +import os +import socket +import StringIO + +import eventlet +from oslo.config import cfg + +from cinder.backup.driver import BackupDriver +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import units +from swiftclient import client as swift + + +LOG = logging.getLogger(__name__) + +swiftbackup_service_opts = [ + cfg.StrOpt('backup_swift_url', + default='http://localhost:8080/v1/AUTH_', + help='The URL of the Swift endpoint'), + cfg.StrOpt('backup_swift_auth', + default='per_user', + help='Swift authentication mechanism'), + cfg.StrOpt('backup_swift_user', + default=None, + help='Swift user name'), + cfg.StrOpt('backup_swift_key', + default=None, + help='Swift key for authentication'), + cfg.StrOpt('backup_swift_container', + default='volumebackups', + help='The default Swift container to use'), + cfg.IntOpt('backup_swift_object_size', + default=52428800, + help='The size in bytes of Swift backup objects'), + cfg.IntOpt('backup_swift_retry_attempts', + default=3, + help='The number of retries to make for Swift operations'), + cfg.IntOpt('backup_swift_retry_backoff', + default=2, + help='The backoff time in seconds between Swift retries'), + cfg.StrOpt('backup_compression_algorithm', + default='zlib', + help='Compression algorithm (None to disable)'), +] + +CONF = cfg.CONF +CONF.register_opts(swiftbackup_service_opts) + + +class SwiftBackupDriver(BackupDriver): + """Provides backup, restore and delete of backup objects within Swift.""" + + DRIVER_VERSION = '1.0.0' + DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'} + + def _get_compressor(self, algorithm): + try: + if algorithm.lower() in ('none', 'off', 'no'): + return None + elif algorithm.lower() in ('zlib', 'gzip'): + import zlib as compressor + return compressor + elif algorithm.lower() in ('bz2', 'bzip2'): + import bz2 as compressor + return compressor + except ImportError: + pass + + err = _('unsupported compression algorithm: %s') % algorithm + raise ValueError(unicode(err)) + + def __init__(self, context, db_driver=None): + self.context = context + self.swift_url = '%s%s' % (CONF.backup_swift_url, + self.context.project_id) + self.az = CONF.storage_availability_zone + self.data_block_size_bytes = CONF.backup_swift_object_size + self.swift_attempts = CONF.backup_swift_retry_attempts + self.swift_backoff = CONF.backup_swift_retry_backoff + self.compressor = \ + self._get_compressor(CONF.backup_compression_algorithm) + LOG.debug('Connect to %s in "%s" mode' % (CONF.backup_swift_url, + CONF.backup_swift_auth)) + if CONF.backup_swift_auth == 'single_user': + if CONF.backup_swift_user is None: + LOG.error(_("single_user auth mode enabled, " + "but %(param)s not set") + % {'param': 'backup_swift_user'}) + raise exception.ParameterNotFound(param='backup_swift_user') + self.conn = swift.Connection(authurl=CONF.backup_swift_url, + user=CONF.backup_swift_user, + key=CONF.backup_swift_key, + retries=self.swift_attempts, + starting_backoff=self.swift_backoff) + else: + self.conn = swift.Connection(retries=self.swift_attempts, + preauthurl=self.swift_url, + preauthtoken=self.context.auth_token, + starting_backoff=self.swift_backoff) + + super(SwiftBackupDriver, self).__init__(db_driver) + + def _check_container_exists(self, container): + LOG.debug(_('_check_container_exists: container: %s') % container) + try: + self.conn.head_container(container) + except swift.ClientException as error: + if error.http_status == httplib.NOT_FOUND: + LOG.debug(_('container %s does not exist') % container) + return False + else: + raise + else: + LOG.debug(_('container %s exists') % container) + return True + + def _create_container(self, context, backup): + backup_id = backup['id'] + container = backup['container'] + LOG.debug(_('_create_container started, container: %(container)s,' + 'backup: %(backup_id)s') % + {'container': container, 'backup_id': backup_id}) + if container is None: + container = CONF.backup_swift_container + self.db.backup_update(context, backup_id, {'container': container}) + if not self._check_container_exists(container): + self.conn.put_container(container) + return container + + def _generate_swift_object_name_prefix(self, backup): + az = 'az_%s' % self.az + backup_name = '%s_backup_%s' % (az, backup['id']) + volume = 'volume_%s' % (backup['volume_id']) + timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S") + prefix = volume + '/' + timestamp + '/' + backup_name + LOG.debug(_('_generate_swift_object_name_prefix: %s') % prefix) + return prefix + + def _generate_object_names(self, backup): + prefix = backup['service_metadata'] + swift_objects = self.conn.get_container(backup['container'], + prefix=prefix, + full_listing=True)[1] + swift_object_names = [swift_obj['name'] for swift_obj in swift_objects] + LOG.debug(_('generated object list: %s') % swift_object_names) + return swift_object_names + + def _metadata_filename(self, backup): + swift_object_name = backup['service_metadata'] + filename = '%s_metadata' % swift_object_name + return filename + + def _write_metadata(self, backup, volume_id, container, object_list): + filename = self._metadata_filename(backup) + LOG.debug(_('_write_metadata started, container name: %(container)s,' + ' metadata filename: %(filename)s') % + {'container': container, 'filename': filename}) + metadata = {} + metadata['version'] = self.DRIVER_VERSION + metadata['backup_id'] = backup['id'] + metadata['volume_id'] = volume_id + metadata['backup_name'] = backup['display_name'] + metadata['backup_description'] = backup['display_description'] + metadata['created_at'] = str(backup['created_at']) + metadata['objects'] = object_list + metadata_json = json.dumps(metadata, sort_keys=True, indent=2) + reader = StringIO.StringIO(metadata_json) + etag = self.conn.put_object(container, filename, reader, + content_length=reader.len) + md5 = hashlib.md5(metadata_json).hexdigest() + if etag != md5: + err = _('error writing metadata file to swift, MD5 of metadata' + ' file in swift [%(etag)s] is not the same as MD5 of ' + 'metadata file sent to swift [%(md5)s]') % {'etag': etag, + 'md5': md5} + raise exception.InvalidBackup(reason=err) + LOG.debug(_('_write_metadata finished')) + + def _read_metadata(self, backup): + container = backup['container'] + filename = self._metadata_filename(backup) + LOG.debug(_('_read_metadata started, container name: %(container)s, ' + 'metadata filename: %(filename)s') % + {'container': container, 'filename': filename}) + (resp, body) = self.conn.get_object(container, filename) + metadata = json.loads(body) + LOG.debug(_('_read_metadata finished (%s)') % metadata) + return metadata + + def _prepare_backup(self, backup): + """Prepare the backup process and return the backup metadata.""" + backup_id = backup['id'] + volume_id = backup['volume_id'] + volume = self.db.volume_get(self.context, volume_id) + + if volume['size'] <= 0: + err = _('volume size %d is invalid.') % volume['size'] + raise exception.InvalidVolume(reason=err) + + try: + container = self._create_container(self.context, backup) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + + object_prefix = self._generate_swift_object_name_prefix(backup) + backup['service_metadata'] = object_prefix + self.db.backup_update(self.context, backup_id, {'service_metadata': + object_prefix}) + volume_size_bytes = volume['size'] * units.GiB + availability_zone = self.az + LOG.debug(_('starting backup of volume: %(volume_id)s to swift,' + ' volume size: %(volume_size_bytes)d, swift object names' + ' prefix %(object_prefix)s, availability zone:' + ' %(availability_zone)s') % + { + 'volume_id': volume_id, + 'volume_size_bytes': volume_size_bytes, + 'object_prefix': object_prefix, + 'availability_zone': availability_zone, + }) + object_meta = {'id': 1, 'list': [], 'prefix': object_prefix} + return object_meta, container + + def _backup_chunk(self, backup, container, data, data_offset, object_meta): + """Backup data chunk based on the object metadata and offset.""" + object_prefix = object_meta['prefix'] + object_list = object_meta['list'] + object_id = object_meta['id'] + object_name = '%s-%05d' % (object_prefix, object_id) + obj = {} + obj[object_name] = {} + obj[object_name]['offset'] = data_offset + obj[object_name]['length'] = len(data) + LOG.debug(_('reading chunk of data from volume')) + if self.compressor is not None: + algorithm = CONF.backup_compression_algorithm.lower() + obj[object_name]['compression'] = algorithm + data_size_bytes = len(data) + data = self.compressor.compress(data) + comp_size_bytes = len(data) + LOG.debug(_('compressed %(data_size_bytes)d bytes of data ' + 'to %(comp_size_bytes)d bytes using ' + '%(algorithm)s') % + { + 'data_size_bytes': data_size_bytes, + 'comp_size_bytes': comp_size_bytes, + 'algorithm': algorithm, + }) + else: + LOG.debug(_('not compressing data')) + obj[object_name]['compression'] = 'none' + + reader = StringIO.StringIO(data) + LOG.debug(_('About to put_object')) + try: + etag = self.conn.put_object(container, object_name, reader, + content_length=len(data)) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + LOG.debug(_('swift MD5 for %(object_name)s: %(etag)s') % + {'object_name': object_name, 'etag': etag, }) + md5 = hashlib.md5(data).hexdigest() + obj[object_name]['md5'] = md5 + LOG.debug(_('backup MD5 for %(object_name)s: %(md5)s') % + {'object_name': object_name, 'md5': md5}) + if etag != md5: + err = _('error writing object to swift, MD5 of object in ' + 'swift %(etag)s is not the same as MD5 of object sent ' + 'to swift %(md5)s') % {'etag': etag, 'md5': md5} + raise exception.InvalidBackup(reason=err) + object_list.append(obj) + object_id += 1 + object_meta['list'] = object_list + object_meta['id'] = object_id + LOG.debug(_('Calling eventlet.sleep(0)')) + eventlet.sleep(0) + + def _finalize_backup(self, backup, container, object_meta): + """Finalize the backup by updating its metadata on Swift.""" + object_list = object_meta['list'] + object_id = object_meta['id'] + try: + self._write_metadata(backup, + backup['volume_id'], + container, + object_list) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + self.db.backup_update(self.context, backup['id'], + {'object_count': object_id}) + LOG.debug(_('backup %s finished.') % backup['id']) + + def backup(self, backup, volume_file): + """Backup the given volume to swift using the given backup metadata.""" + object_meta, container = self._prepare_backup(backup) + while True: + data = volume_file.read(self.data_block_size_bytes) + data_offset = volume_file.tell() + if data == '': + break + self._backup_chunk(backup, container, data, + data_offset, object_meta) + self._finalize_backup(backup, container, object_meta) + + def _restore_v1(self, backup, volume_id, metadata, volume_file): + """Restore a v1 swift volume backup from swift.""" + backup_id = backup['id'] + LOG.debug(_('v1 swift volume backup restore of %s started'), backup_id) + container = backup['container'] + metadata_objects = metadata['objects'] + metadata_object_names = sum((obj.keys() for obj in metadata_objects), + []) + LOG.debug(_('metadata_object_names = %s') % metadata_object_names) + prune_list = [self._metadata_filename(backup)] + swift_object_names = [swift_object_name for swift_object_name in + self._generate_object_names(backup) + if swift_object_name not in prune_list] + if sorted(swift_object_names) != sorted(metadata_object_names): + err = _('restore_backup aborted, actual swift object list in ' + 'swift does not match object list stored in metadata') + raise exception.InvalidBackup(reason=err) + + for metadata_object in metadata_objects: + object_name = metadata_object.keys()[0] + LOG.debug(_('restoring object from swift. backup: %(backup_id)s, ' + 'container: %(container)s, swift object name: ' + '%(object_name)s, volume: %(volume_id)s') % + { + 'backup_id': backup_id, + 'container': container, + 'object_name': object_name, + 'volume_id': volume_id, + }) + try: + (resp, body) = self.conn.get_object(container, object_name) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + compression_algorithm = metadata_object[object_name]['compression'] + decompressor = self._get_compressor(compression_algorithm) + if decompressor is not None: + LOG.debug(_('decompressing data using %s algorithm') % + compression_algorithm) + decompressed = decompressor.decompress(body) + volume_file.write(decompressed) + else: + volume_file.write(body) + + # force flush every write to avoid long blocking write on close + volume_file.flush() + + # Be tolerant to IO implementations that do not support fileno() + try: + fileno = volume_file.fileno() + except IOError: + LOG.info("volume_file does not support fileno() so skipping " + "fsync()") + else: + os.fsync(fileno) + + # Restoring a backup to a volume can take some time. Yield so other + # threads can run, allowing for among other things the service + # status to be updated + eventlet.sleep(0) + LOG.debug(_('v1 swift volume backup restore of %s finished'), + backup_id) + + def restore(self, backup, volume_id, volume_file): + """Restore the given volume backup from swift.""" + backup_id = backup['id'] + container = backup['container'] + object_prefix = backup['service_metadata'] + LOG.debug(_('starting restore of backup %(object_prefix)s from swift' + ' container: %(container)s, to volume %(volume_id)s, ' + 'backup: %(backup_id)s') % + { + 'object_prefix': object_prefix, + 'container': container, + 'volume_id': volume_id, + 'backup_id': backup_id, + }) + try: + metadata = self._read_metadata(backup) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + metadata_version = metadata['version'] + LOG.debug(_('Restoring swift backup version %s'), metadata_version) + try: + restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get( + metadata_version)) + except TypeError: + err = (_('No support to restore swift backup version %s') + % metadata_version) + raise exception.InvalidBackup(reason=err) + restore_func(backup, volume_id, metadata, volume_file) + LOG.debug(_('restore %(backup_id)s to %(volume_id)s finished.') % + {'backup_id': backup_id, 'volume_id': volume_id}) + + def delete(self, backup): + """Delete the given backup from swift.""" + container = backup['container'] + LOG.debug('delete started, backup: %s, container: %s, prefix: %s', + backup['id'], container, backup['service_metadata']) + + if container is not None: + swift_object_names = [] + try: + swift_object_names = self._generate_object_names(backup) + except Exception: + LOG.warn(_('swift error while listing objects, continuing' + ' with delete')) + + for swift_object_name in swift_object_names: + try: + self.conn.delete_object(container, swift_object_name) + except socket.error as err: + raise exception.SwiftConnectionFailed(reason=str(err)) + except Exception: + LOG.warn(_('swift error while deleting object %s, ' + 'continuing with delete') % swift_object_name) + else: + LOG.debug(_('deleted swift object: %(swift_object_name)s' + ' in container: %(container)s') % + { + 'swift_object_name': swift_object_name, + 'container': container + }) + # Deleting a backup's objects from swift can take some time. + # Yield so other threads can run + eventlet.sleep(0) + + LOG.debug(_('delete %s finished') % backup['id']) + + +def get_backup_driver(context): + return SwiftBackupDriver(context) diff --git a/cinder/backup/drivers/tsm.py b/cinder/backup/drivers/tsm.py new file mode 100644 index 0000000000..3f0806a06d --- /dev/null +++ b/cinder/backup/drivers/tsm.py @@ -0,0 +1,444 @@ +# Copyright 2013 IBM Corp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Backup driver for IBM Tivoli Storage Manager (TSM). + +Implementation of a backup service that uses IBM Tivoli Storage Manager (TSM) +as the backend. The driver uses TSM command line dsmc utility to +run an image backup and restore. +This version supports backup of block devices, e.g, FC, iSCSI, local. + +A prerequisite for using the IBM TSM backup service is configuring the +Cinder host for using TSM. +""" + +import os +import stat + +from oslo.config import cfg + +from cinder.backup.driver import BackupDriver +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import utils + +LOG = logging.getLogger(__name__) + +tsmbackup_service_opts = [ + cfg.StrOpt('backup_tsm_volume_prefix', + default='backup', + help='Volume prefix for the backup id when backing up to TSM'), + cfg.StrOpt('backup_tsm_password', + default='password', + help='TSM password for the running username'), + cfg.BoolOpt('backup_tsm_compression', + default=True, + help='Enable or Disable compression for backups'), +] + +CONF = cfg.CONF +CONF.register_opts(tsmbackup_service_opts) + + +class TSMBackupDriver(BackupDriver): + """Provides backup, restore and delete of volumes backup for TSM.""" + + DRIVER_VERSION = '1.0.0' + + def __init__(self, context, db_driver=None): + self.context = context + self.tsm_password = CONF.backup_tsm_password + self.volume_prefix = CONF.backup_tsm_volume_prefix + super(TSMBackupDriver, self).__init__(db_driver) + + def _make_link(self, volume_path, backup_path, vol_id): + """Create a hard link for the volume block device. + + The IBM TSM client performs an image backup on a block device. + The name of the block device is the backup prefix plus the backup id + + :param volume_path: real device path name for volume + :param backup_path: path name TSM will use as volume to backup + :param vol_id: id of volume to backup (for reporting) + + :raises: InvalidBackup + """ + + try: + utils.execute('ln', volume_path, backup_path, + run_as_root=True, + check_exit_code=True) + except processutils.ProcessExecutionError as e: + err = (_('backup: %(vol_id)s Failed to create device hardlink ' + 'from %(vpath)s to %(bpath)s.\n' + 'stdout: %(out)s\n stderr: %(err)s') + % {'vol_id': vol_id, + 'vpath': volume_path, + 'bpath': backup_path, + 'out': e.stdout, + 'err': e.stderr}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + + def _check_dsmc_output(self, output, check_attrs): + """Check dsmc command line utility output. + + Parse the output of the dsmc command and make sure that a given + attribute is present, and that it has the proper value. + TSM attribute has the format of "text : value". + + :param output: TSM output to parse + :param check_attrs: text to identify in the output + :returns bool -- indicate if requited output attribute found in output + """ + + parsed_attrs = {} + for line in output.split('\n'): + # parse TSM output: look for "msg : value + key, sep, val = line.partition(':') + if (sep is not None and key is not None and len(val.strip()) > 0): + parsed_attrs[key] = val.strip() + + for k, v in check_attrs.iteritems(): + if k not in parsed_attrs or parsed_attrs[k] != v: + return False + return True + + def _do_backup(self, backup_path, vol_id): + """Perform the actual backup operation. + + :param backup_path: volume path + :param vol_id: volume id + :raises: InvalidBackup + """ + + backup_attrs = {'Total number of objects backed up': '1'} + compr_flag = 'yes' if CONF.backup_tsm_compression else 'no' + + out, err = utils.execute('dsmc', + 'backup', + 'image', + '-quiet', + '-compression=%s' % compr_flag, + '-password=%s' % CONF.backup_tsm_password, + backup_path, + run_as_root=True, + check_exit_code=False) + + success = self._check_dsmc_output(out, backup_attrs) + if not success: + err = (_('backup: %(vol_id)s Failed to obtain backup ' + 'success notification from server.\n' + 'stdout: %(out)s\n stderr: %(err)s') + % {'vol_id': vol_id, + 'out': out, + 'err': err}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + + def _do_restore(self, restore_path, vol_id): + """Perform the actual restore operation. + + :param restore_path: volume path + :param vol_id: volume id + :raises: InvalidBackup + """ + + restore_attrs = {'Total number of objects restored': '1'} + out, err = utils.execute('dsmc', + 'restore', + 'image', + '-quiet', + '-password=%s' % self.tsm_password, + '-noprompt', + restore_path, + run_as_root=True, + check_exit_code=False) + + success = self._check_dsmc_output(out, restore_attrs) + if not success: + err = (_('restore: %(vol_id)s Failed.\n' + 'stdout: %(out)s\n stderr: %(err)s') + % {'vol_id': vol_id, + 'out': out, + 'err': err}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + + def _get_volume_realpath(self, volume_file, volume_id): + """Get the real path for the volume block device. + + If the volume is not a block device then issue an + InvalidBackup exception. + + :param volume_file: file object representing the volume + :param volume_id: Volume id for backup or as restore target + :raises: InvalidBackup + :returns str -- real path of volume device + """ + + try: + # Get real path + volume_path = os.path.realpath(volume_file.name) + # Verify that path is a block device + volume_mode = os.stat(volume_path).st_mode + if not stat.S_ISBLK(volume_mode): + err = (_('backup: %(vol_id)s Failed. ' + '%(path)s is not a block device.') + % {'vol_id': volume_id, + 'path': volume_path}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + except AttributeError as e: + err = (_('backup: %(vol_id)s Failed. Cannot obtain real path ' + 'to device %(path)s.') + % {'vol_id': volume_id, + 'path': volume_file}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + except OSError as e: + err = (_('backup: %(vol_id)s Failed. ' + '%(path)s is not a file.') + % {'vol_id': volume_id, + 'path': volume_path}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + return volume_path + + def _create_device_link_using_backupid(self, + backup_id, + volume_path, + volume_id): + """Create a consistent hardlink for the volume block device. + + Create a consistent hardlink using the backup id so TSM + will be able to backup and restore to the same block device. + + :param backup_id: the backup id + :param volume_path: real path of the backup/restore device + :param volume_id: Volume id for backup or as restore target + :raises: InvalidBackup + :returns str -- hardlink path of the volume block device + """ + + hardlink_path = utils.make_dev_path('%s-%s' % + (self.volume_prefix, + backup_id)) + self._make_link(volume_path, hardlink_path, volume_id) + return hardlink_path + + def _cleanup_device_hardlink(self, + hardlink_path, + volume_path, + volume_id): + """Remove the hardlink for the volume block device. + + :param hardlink_path: hardlink to the volume block device + :param volume_path: real path of the backup/restore device + :param volume_id: Volume id for backup or as restore target + """ + + try: + utils.execute('rm', + '-f', + hardlink_path, + run_as_root=True) + except processutils.ProcessExecutionError as e: + err = (_('backup: %(vol_id)s Failed to remove backup hardlink' + ' from %(vpath)s to %(bpath)s.\n' + 'stdout: %(out)s\n stderr: %(err)s') + % {'vol_id': volume_id, + 'vpath': volume_path, + 'bpath': hardlink_path, + 'out': e.stdout, + 'err': e.stderr}) + LOG.error(err) + + def backup(self, backup, volume_file): + """Backup the given volume to TSM. + + TSM performs an image backup of a volume. The volume_file is + used to determine the path of the block device that TSM will + back-up. + + :param backup: backup information for volume + :param volume_file: file object representing the volume + :raises InvalidBackup + """ + + backup_id = backup['id'] + volume_id = backup['volume_id'] + volume_path = self._get_volume_realpath(volume_file, volume_id) + + LOG.debug(_('starting backup of volume: %(volume_id)s to TSM,' + ' volume path: %(volume_path)s,') + % {'volume_id': volume_id, + 'volume_path': volume_path}) + + backup_path = \ + self._create_device_link_using_backupid(backup_id, + volume_path, + volume_id) + try: + self._do_backup(backup_path, volume_id) + except processutils.ProcessExecutionError as e: + err = (_('backup: %(vol_id)s Failed to run dsmc ' + 'on %(bpath)s.\n' + 'stdout: %(out)s\n stderr: %(err)s') + % {'vol_id': volume_id, + 'bpath': backup_path, + 'out': e.stdout, + 'err': e.stderr}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + except exception.Error as e: + err = (_('backup: %(vol_id)s Failed to run dsmc ' + 'due to invalid arguments ' + 'on %(bpath)s.\n' + 'stdout: %(out)s\n stderr: %(err)s') + % {'vol_id': volume_id, + 'bpath': backup_path, + 'out': e.stdout, + 'err': e.stderr}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + + finally: + self._cleanup_device_hardlink(backup_path, + volume_path, + volume_id) + + LOG.debug(_('backup %s finished.') % backup_id) + + def restore(self, backup, volume_id, volume_file): + """Restore the given volume backup from TSM server. + + :param backup: backup information for volume + :param volume_id: volume id + :param volume_file: file object representing the volume + :raises InvalidBackup + """ + + backup_id = backup['id'] + volume_path = self._get_volume_realpath(volume_file, volume_id) + + LOG.debug(_('restore: starting restore of backup from TSM' + ' to volume %(volume_id)s, ' + ' backup: %(backup_id)s') + % {'volume_id': volume_id, + 'backup_id': backup_id}) + + restore_path = \ + self._create_device_link_using_backupid(backup_id, + volume_path, + volume_id) + + try: + self._do_restore(restore_path, volume_id) + except processutils.ProcessExecutionError as e: + err = (_('restore: %(vol_id)s Failed to run dsmc ' + 'on %(bpath)s.\n' + 'stdout: %(out)s\n stderr: %(err)s') + % {'vol_id': volume_id, + 'bpath': restore_path, + 'out': e.stdout, + 'err': e.stderr}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + except exception.Error as e: + err = (_('restore: %(vol_id)s Failed to run dsmc ' + 'due to invalid arguments ' + 'on %(bpath)s.\n' + 'stdout: %(out)s\n stderr: %(err)s') + % {'vol_id': volume_id, + 'bpath': restore_path, + 'out': e.stdout, + 'err': e.stderr}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + + finally: + self._cleanup_device_hardlink(restore_path, + volume_path, + volume_id) + + LOG.debug(_('restore %(backup_id)s to %(volume_id)s finished.') + % {'backup_id': backup_id, + 'volume_id': volume_id}) + + def delete(self, backup): + """Delete the given backup from TSM server. + + :param backup: backup information for volume + :raises InvalidBackup + """ + + delete_attrs = {'Total number of objects deleted': '1'} + + volume_id = backup['volume_id'] + backup_id = backup['id'] + LOG.debug('delete started, backup: %s', + backup['id']) + + volume_path = utils.make_dev_path('%s-%s' % + (self.volume_prefix, backup_id)) + + try: + out, err = utils.execute('dsmc', + 'delete', + 'backup', + '-quiet', + '-noprompt', + '-objtype=image', + '-deltype=all', + '-password=%s' % self.tsm_password, + volume_path, + run_as_root=True, + check_exit_code=False) + + except processutils.ProcessExecutionError as e: + err = (_('delete: %(vol_id)s Failed to run dsmc with ' + 'stdout: %(out)s\n stderr: %(err)s') + % {'vol_id': volume_id, + 'out': e.stdout, + 'err': e.stderr}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + except exception.Error as e: + err = (_('restore: %(vol_id)s Failed to run dsmc ' + 'due to invalid arguments with ' + 'stdout: %(out)s\n stderr: %(err)s') + % {'vol_id': volume_id, + 'out': e.stdout, + 'err': e.stderr}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + + success = self._check_dsmc_output(out, delete_attrs) + if not success: + err = (_('delete: %(vol_id)s Failed with ' + 'stdout: %(out)s\n stderr: %(err)s') + % {'vol_id': volume_id, + 'out': out, + 'err': err}) + LOG.error(err) + raise exception.InvalidBackup(reason=err) + + LOG.debug(_('delete %s finished') % backup['id']) + + +def get_backup_driver(context): + return TSMBackupDriver(context) diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py new file mode 100644 index 0000000000..a36790850e --- /dev/null +++ b/cinder/backup/manager.py @@ -0,0 +1,422 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Backup manager manages volume backups. + +Volume Backups are full copies of persistent volumes stored in a backup +store e.g. an object store or any other backup store if and when support is +added. They are usable without the original object being available. A +volume backup can be restored to the original volume it was created from or +any other available volume with a minimum size of the original volume. +Volume backups can be created, restored, deleted and listed. + +**Related Flags** + +:backup_topic: What :mod:`rpc` topic to listen to (default: + `cinder-backup`). +:backup_manager: The module name of a class derived from + :class:`manager.Manager` (default: + :class:`cinder.backup.manager.Manager`). + +""" + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder import manager +from cinder.openstack.common import excutils +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + +backup_manager_opts = [ + cfg.StrOpt('backup_driver', + default='cinder.backup.drivers.swift', + help='Driver to use for backups.', + deprecated_name='backup_service'), +] + +# This map doesn't need to be extended in the future since it's only +# for old backup services +mapper = {'cinder.backup.services.swift': 'cinder.backup.drivers.swift', + 'cinder.backup.services.ceph': 'cinder.backup.drivers.ceph'} + +CONF = cfg.CONF +CONF.register_opts(backup_manager_opts) + + +class BackupManager(manager.SchedulerDependentManager): + """Manages backup of block storage devices.""" + + RPC_API_VERSION = '1.0' + + def __init__(self, service_name=None, *args, **kwargs): + self.service = importutils.import_module(self.driver_name) + self.az = CONF.storage_availability_zone + self.volume_managers = {} + self._setup_volume_drivers() + super(BackupManager, self).__init__(service_name='backup', + *args, **kwargs) + + @property + def driver_name(self): + """This function maps old backup services to backup drivers.""" + + return self._map_service_to_driver(CONF.backup_driver) + + def _map_service_to_driver(self, service): + """Maps services to drivers.""" + + if service in mapper: + return mapper[service] + return service + + @property + def driver(self): + return self._get_driver() + + def _get_volume_backend(self, host=None, allow_null_host=False): + if host is None: + if not allow_null_host: + msg = _("NULL host not allowed for volume backend lookup.") + raise exception.BackupFailedToGetVolumeBackend(msg) + else: + LOG.debug(_("Checking hostname '%s' for backend info.") % (host)) + part = host.partition('@') + if (part[1] == '@') and (part[2] != ''): + backend = part[2] + LOG.debug("Got backend '%s'." % (backend)) + return backend + + LOG.info(_("Backend not found in hostname (%s) so using default.") % + (host)) + + if 'default' not in self.volume_managers: + # For multi-backend we just pick the top of the list. + return self.volume_managers.keys()[0] + + return 'default' + + def _get_manager(self, backend): + LOG.debug(_("Manager requested for volume_backend '%s'.") % + (backend)) + if backend is None: + LOG.debug(_("Fetching default backend.")) + backend = self._get_volume_backend(allow_null_host=True) + if backend not in self.volume_managers: + msg = (_("Volume manager for backend '%s' does not exist.") % + (backend)) + raise exception.BackupFailedToGetVolumeBackend(msg) + return self.volume_managers[backend] + + def _get_driver(self, backend=None): + LOG.debug(_("Driver requested for volume_backend '%s'.") % + (backend)) + if backend is None: + LOG.debug(_("Fetching default backend.")) + backend = self._get_volume_backend(allow_null_host=True) + mgr = self._get_manager(backend) + mgr.driver.db = self.db + return mgr.driver + + def _setup_volume_drivers(self): + if CONF.enabled_backends: + for backend in CONF.enabled_backends: + host = "%s@%s" % (CONF.host, backend) + mgr = importutils.import_object(CONF.volume_manager, + host=host, + service_name=backend) + config = mgr.configuration + backend_name = config.safe_get('volume_backend_name') + LOG.debug(_("Registering backend %(backend)s (host=%(host)s " + "backend_name=%(backend_name)s).") % + {'backend': backend, 'host': host, + 'backend_name': backend_name}) + self.volume_managers[backend] = mgr + else: + default = importutils.import_object(CONF.volume_manager) + LOG.debug(_("Registering default backend %s.") % (default)) + self.volume_managers['default'] = default + + def _init_volume_driver(self, ctxt, driver): + LOG.info(_("Starting volume driver %(driver_name)s (%(version)s).") % + {'driver_name': driver.__class__.__name__, + 'version': driver.get_version()}) + try: + driver.do_setup(ctxt) + driver.check_for_setup_error() + except Exception as ex: + LOG.error(_("Error encountered during initialization of driver: " + "%(name)s.") % + {'name': driver.__class__.__name__}) + LOG.exception(ex) + # we don't want to continue since we failed + # to initialize the driver correctly. + return + + driver.set_initialized() + + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service. + """ + ctxt = context.get_admin_context() + + for mgr in self.volume_managers.itervalues(): + self._init_volume_driver(ctxt, mgr.driver) + + LOG.info(_("Cleaning up incomplete backup operations.")) + volumes = self.db.volume_get_all_by_host(ctxt, self.host) + for volume in volumes: + backend = self._get_volume_backend(host=volume['host']) + if volume['status'] == 'backing-up': + LOG.info(_('Resetting volume %s to available ' + '(was backing-up).') % volume['id']) + mgr = self._get_manager(backend) + mgr.detach_volume(ctxt, volume['id']) + if volume['status'] == 'restoring-backup': + LOG.info(_('Resetting volume %s to error_restoring ' + '(was restoring-backup).') % volume['id']) + mgr = self._get_manager(backend) + mgr.detach_volume(ctxt, volume['id']) + self.db.volume_update(ctxt, volume['id'], + {'status': 'error_restoring'}) + + # TODO(smulcahy) implement full resume of backup and restore + # operations on restart (rather than simply resetting) + backups = self.db.backup_get_all_by_host(ctxt, self.host) + for backup in backups: + if backup['status'] == 'creating': + LOG.info(_('Resetting backup %s to error (was creating).') + % backup['id']) + err = 'incomplete backup reset on manager restart' + self.db.backup_update(ctxt, backup['id'], {'status': 'error', + 'fail_reason': err}) + if backup['status'] == 'restoring': + LOG.info(_('Resetting backup %s to available (was restoring).') + % backup['id']) + self.db.backup_update(ctxt, backup['id'], + {'status': 'available'}) + if backup['status'] == 'deleting': + LOG.info(_('Resuming delete on backup: %s.') % backup['id']) + self.delete_backup(ctxt, backup['id']) + + def create_backup(self, context, backup_id): + """Create volume backups using configured backup service.""" + backup = self.db.backup_get(context, backup_id) + volume_id = backup['volume_id'] + volume = self.db.volume_get(context, volume_id) + LOG.info(_('Create backup started, backup: %(backup_id)s ' + 'volume: %(volume_id)s.') % + {'backup_id': backup_id, 'volume_id': volume_id}) + backend = self._get_volume_backend(host=volume['host']) + + self.db.backup_update(context, backup_id, {'host': self.host, + 'service': + self.driver_name}) + + expected_status = 'backing-up' + actual_status = volume['status'] + if actual_status != expected_status: + err = _('Create backup aborted, expected volume status ' + '%(expected_status)s but got %(actual_status)s.') % { + 'expected_status': expected_status, + 'actual_status': actual_status, + } + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + raise exception.InvalidVolume(reason=err) + + expected_status = 'creating' + actual_status = backup['status'] + if actual_status != expected_status: + err = _('Create backup aborted, expected backup status ' + '%(expected_status)s but got %(actual_status)s.') % { + 'expected_status': expected_status, + 'actual_status': actual_status, + } + self.db.volume_update(context, volume_id, {'status': 'available'}) + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + raise exception.InvalidBackup(reason=err) + + try: + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught, + # the volume status will be set back to available and + # the backup status to 'error' + utils.require_driver_initialized(self.driver) + + backup_service = self.service.get_backup_driver(context) + self._get_driver(backend).backup_volume(context, backup, + backup_service) + except Exception as err: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, volume_id, + {'status': 'available'}) + self.db.backup_update(context, backup_id, + {'status': 'error', + 'fail_reason': unicode(err)}) + + self.db.volume_update(context, volume_id, {'status': 'available'}) + self.db.backup_update(context, backup_id, {'status': 'available', + 'size': volume['size'], + 'availability_zone': + self.az}) + LOG.info(_('Create backup finished. backup: %s.'), backup_id) + + def restore_backup(self, context, backup_id, volume_id): + """Restore volume backups from configured backup service.""" + LOG.info(_('Restore backup started, backup: %(backup_id)s ' + 'volume: %(volume_id)s.') % + {'backup_id': backup_id, 'volume_id': volume_id}) + + backup = self.db.backup_get(context, backup_id) + volume = self.db.volume_get(context, volume_id) + backend = self._get_volume_backend(host=volume['host']) + + self.db.backup_update(context, backup_id, {'host': self.host}) + + expected_status = 'restoring-backup' + actual_status = volume['status'] + if actual_status != expected_status: + err = _('Restore backup aborted: expected volume status ' + '%(expected_status)s but got %(actual_status)s.') % { + 'expected_status': expected_status, + 'actual_status': actual_status + } + self.db.backup_update(context, backup_id, {'status': 'available'}) + raise exception.InvalidVolume(reason=err) + + expected_status = 'restoring' + actual_status = backup['status'] + if actual_status != expected_status: + err = _('Restore backup aborted: expected backup status ' + '%(expected_status)s but got %(actual_status)s.') % { + 'expected_status': expected_status, + 'actual_status': actual_status + } + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + self.db.volume_update(context, volume_id, {'status': 'error'}) + raise exception.InvalidBackup(reason=err) + + if volume['size'] > backup['size']: + LOG.warn('Volume: %s, size: %d is larger than backup: %s, ' + 'size: %d, continuing with restore.', + volume['id'], volume['size'], + backup['id'], backup['size']) + + backup_service = self._map_service_to_driver(backup['service']) + configured_service = self.driver_name + if backup_service != configured_service: + err = _('Restore backup aborted, the backup service currently' + ' configured [%(configured_service)s] is not the' + ' backup service that was used to create this' + ' backup [%(backup_service)s].') % { + 'configured_service': configured_service, + 'backup_service': backup_service, + } + self.db.backup_update(context, backup_id, {'status': 'available'}) + self.db.volume_update(context, volume_id, {'status': 'error'}) + raise exception.InvalidBackup(reason=err) + + try: + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught, + # the volume status will be set back to available and + # the backup status to 'error' + utils.require_driver_initialized(self.driver) + + backup_service = self.service.get_backup_driver(context) + self._get_driver(backend).restore_backup(context, backup, + volume, + backup_service) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, volume_id, + {'status': 'error_restoring'}) + self.db.backup_update(context, backup_id, + {'status': 'available'}) + + self.db.volume_update(context, volume_id, {'status': 'available'}) + self.db.backup_update(context, backup_id, {'status': 'available'}) + LOG.info(_('Restore backup finished, backup %(backup_id)s restored' + ' to volume %(volume_id)s.') % + {'backup_id': backup_id, 'volume_id': volume_id}) + + def delete_backup(self, context, backup_id): + """Delete volume backup from configured backup service.""" + try: + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the backup status updated. Fail early since there + # are no other status to change but backup's + utils.require_driver_initialized(self.driver) + except exception.DriverNotInitialized as err: + with excutils.save_and_reraise_exception(): + self.db.backup_update(context, backup_id, + {'status': 'error', + 'fail_reason': + unicode(err)}) + + LOG.info(_('Delete backup started, backup: %s.'), backup_id) + backup = self.db.backup_get(context, backup_id) + self.db.backup_update(context, backup_id, {'host': self.host}) + + expected_status = 'deleting' + actual_status = backup['status'] + if actual_status != expected_status: + err = _('Delete_backup aborted, expected backup status ' + '%(expected_status)s but got %(actual_status)s.') % { + 'expected_status': expected_status, + 'actual_status': actual_status, + } + self.db.backup_update(context, backup_id, {'status': 'error', + 'fail_reason': err}) + raise exception.InvalidBackup(reason=err) + + backup_service = self._map_service_to_driver(backup['service']) + if backup_service is not None: + configured_service = self.driver_name + if backup_service != configured_service: + err = _('Delete backup aborted, the backup service currently' + ' configured [%(configured_service)s] is not the' + ' backup service that was used to create this' + ' backup [%(backup_service)s].') % { + 'configured_service': configured_service, + 'backup_service': backup_service, + } + self.db.backup_update(context, backup_id, + {'status': 'error'}) + raise exception.InvalidBackup(reason=err) + + try: + backup_service = self.service.get_backup_driver(context) + backup_service.delete(backup) + except Exception as err: + with excutils.save_and_reraise_exception(): + self.db.backup_update(context, backup_id, + {'status': 'error', + 'fail_reason': + unicode(err)}) + + context = context.elevated() + self.db.backup_destroy(context, backup_id) + LOG.info(_('Delete backup finished, backup %s deleted.'), backup_id) diff --git a/cinder/backup/rpcapi.py b/cinder/backup/rpcapi.py new file mode 100644 index 0000000000..42941ff05c --- /dev/null +++ b/cinder/backup/rpcapi.py @@ -0,0 +1,73 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Client side of the volume backup RPC API. +""" + + +from oslo.config import cfg + +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +import cinder.openstack.common.rpc.proxy + + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + + +class BackupAPI(cinder.openstack.common.rpc.proxy.RpcProxy): + """Client side of the volume rpc API. + + API version history: + + 1.0 - Initial version. + """ + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self): + super(BackupAPI, self).__init__( + topic=CONF.backup_topic, + default_version=self.BASE_RPC_API_VERSION) + + def create_backup(self, ctxt, host, backup_id, volume_id): + LOG.debug("create_backup in rpcapi backup_id %s", backup_id) + topic = rpc.queue_get_for(ctxt, self.topic, host) + LOG.debug("create queue topic=%s", topic) + self.cast(ctxt, + self.make_msg('create_backup', + backup_id=backup_id), + topic=topic) + + def restore_backup(self, ctxt, host, backup_id, volume_id): + LOG.debug("restore_backup in rpcapi backup_id %s", backup_id) + topic = rpc.queue_get_for(ctxt, self.topic, host) + LOG.debug("restore queue topic=%s", topic) + self.cast(ctxt, + self.make_msg('restore_backup', + backup_id=backup_id, + volume_id=volume_id), + topic=topic) + + def delete_backup(self, ctxt, host, backup_id): + LOG.debug("delete_backup rpcapi backup_id %s", backup_id) + topic = rpc.queue_get_for(ctxt, self.topic, host) + self.cast(ctxt, + self.make_msg('delete_backup', + backup_id=backup_id), + topic=topic) diff --git a/cinder/brick/README.txt b/cinder/brick/README.txt new file mode 100644 index 0000000000..bc26cd11bb --- /dev/null +++ b/cinder/brick/README.txt @@ -0,0 +1,9 @@ +Brick is a new library that currently is maintained in Cinder for +the Havana release. It will eventually be moved external to Cinder, +possibly oslo, or pypi. Any defects found in Brick, should be submitted +against Cinder and fixed there, then pulled into other projects that +are using brick. + +* Brick is used outside of Cinder and therefore + cannot have any dependencies on Cinder and/or + it's database. diff --git a/cinder/brick/__init__.py b/cinder/brick/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/brick/exception.py b/cinder/brick/exception.py new file mode 100644 index 0000000000..3d6e014dcd --- /dev/null +++ b/cinder/brick/exception.py @@ -0,0 +1,117 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Exceptions for the Brick library.""" + +import sys + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class BrickException(Exception): + """Base Brick Exception + + To correctly use this class, inherit from it and define + a 'msg_fmt' property. That msg_fmt will get printf'd + with the keyword arguments provided to the constructor. + """ + msg_fmt = _("An unknown exception occurred.") + code = 500 + headers = {} + safe = False + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if 'code' not in self.kwargs: + try: + self.kwargs['code'] = self.code + except AttributeError: + pass + + if not message: + try: + message = self.msg_fmt % kwargs + + except Exception: + exc_info = sys.exc_info() + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + msg = (_("Exception in string format operation. msg='%s'") + % self.msg_fmt) + LOG.exception(msg) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + + # at least get the core message out if something happened + message = self.msg_fmt + + super(BrickException, self).__init__(message) + + +class NotFound(BrickException): + message = _("Resource could not be found.") + code = 404 + safe = True + + +class Invalid(BrickException): + message = _("Unacceptable parameters.") + code = 400 + + +# Cannot be templated as the error syntax varies. +# msg needs to be constructed when raised. +class InvalidParameterValue(Invalid): + message = _("%(err)s") + + +class NoFibreChannelHostsFound(BrickException): + message = _("We are unable to locate any Fibre Channel devices.") + + +class NoFibreChannelVolumeDeviceFound(BrickException): + message = _("Unable to find a Fibre Channel volume device.") + + +class VolumeDeviceNotFound(BrickException): + message = _("Volume device not found at %(device)s.") + + +class VolumeGroupNotFound(BrickException): + message = _('Unable to find Volume Group: %(vg_name)s') + + +class VolumeGroupCreationFailed(BrickException): + message = _('Failed to create Volume Group: %(vg_name)s') + + +class ISCSITargetCreateFailed(BrickException): + message = _("Failed to create iscsi target for volume %(volume_id)s.") + + +class ISCSITargetRemoveFailed(BrickException): + message = _("Failed to remove iscsi target for volume %(volume_id)s.") + + +class ISCSITargetAttachFailed(BrickException): + message = _("Failed to attach iSCSI target for volume %(volume_id)s.") + + +class ProtocolNotSupported(BrickException): + message = _("Connect to volume via protocol %(protocol)s not supported.") diff --git a/cinder/brick/executor.py b/cinder/brick/executor.py new file mode 100644 index 0000000000..de689521cc --- /dev/null +++ b/cinder/brick/executor.py @@ -0,0 +1,34 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic exec utility that allows us to set the + execute and root_helper attributes for putils. + Some projects need their own execute wrapper + and root_helper settings, so this provides that hook. +""" + +from cinder.openstack.common import processutils as putils + + +class Executor(object): + def __init__(self, root_helper, execute=putils.execute, + *args, **kwargs): + self.set_execute(execute) + self.set_root_helper(root_helper) + + def set_execute(self, execute): + self._execute = execute + + def set_root_helper(self, helper): + self._root_helper = helper diff --git a/cinder/brick/initiator/__init__.py b/cinder/brick/initiator/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/brick/initiator/connector.py b/cinder/brick/initiator/connector.py new file mode 100644 index 0000000000..3ffc6b5901 --- /dev/null +++ b/cinder/brick/initiator/connector.py @@ -0,0 +1,925 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import socket +import time + +from cinder.brick import exception +from cinder.brick import executor +from cinder.brick.initiator import host_driver +from cinder.brick.initiator import linuxfc +from cinder.brick.initiator import linuxscsi +from cinder.brick.remotefs import remotefs +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import lockutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import loopingcall +from cinder.openstack.common import processutils as putils + +LOG = logging.getLogger(__name__) + +synchronized = lockutils.synchronized_with_prefix('brick-') +DEVICE_SCAN_ATTEMPTS_DEFAULT = 3 + + +def get_connector_properties(root_helper, my_ip): + """Get the connection properties for all protocols.""" + + iscsi = ISCSIConnector(root_helper=root_helper) + fc = linuxfc.LinuxFibreChannel(root_helper=root_helper) + + props = {} + props['ip'] = my_ip + props['host'] = socket.gethostname() + initiator = iscsi.get_initiator() + if initiator: + props['initiator'] = initiator + wwpns = fc.get_fc_wwpns() + if wwpns: + props['wwpns'] = wwpns + wwnns = fc.get_fc_wwnns() + if wwnns: + props['wwnns'] = wwnns + + return props + + +class InitiatorConnector(executor.Executor): + def __init__(self, root_helper, driver=None, + execute=putils.execute, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + super(InitiatorConnector, self).__init__(root_helper, execute=execute, + *args, **kwargs) + if not driver: + driver = host_driver.HostDriver() + self.set_driver(driver) + self.device_scan_attempts = device_scan_attempts + + def set_driver(self, driver): + """The driver is used to find used LUNs.""" + + self.driver = driver + + @staticmethod + def factory(protocol, root_helper, driver=None, + execute=putils.execute, use_multipath=False, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + """Build a Connector object based upon protocol.""" + LOG.debug("Factory for %s" % protocol) + protocol = protocol.upper() + if protocol == "ISCSI": + return ISCSIConnector(root_helper=root_helper, + driver=driver, + execute=execute, + use_multipath=use_multipath, + device_scan_attempts=device_scan_attempts, + *args, **kwargs) + elif protocol == "ISER": + return ISERConnector(root_helper=root_helper, + driver=driver, + execute=execute, + use_multipath=use_multipath, + device_scan_attempts=device_scan_attempts, + *args, **kwargs) + elif protocol == "FIBRE_CHANNEL": + return FibreChannelConnector(root_helper=root_helper, + driver=driver, + execute=execute, + use_multipath=use_multipath, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) + elif protocol == "AOE": + return AoEConnector(root_helper=root_helper, + driver=driver, + execute=execute, + device_scan_attempts=device_scan_attempts, + *args, **kwargs) + elif protocol == "NFS" or protocol == "GLUSTERFS": + return RemoteFsConnector(mount_type=protocol.lower(), + root_helper=root_helper, + driver=driver, + execute=execute, + device_scan_attempts=device_scan_attempts, + *args, **kwargs) + elif protocol == "LOCAL": + return LocalConnector(root_helper=root_helper, + driver=driver, + execute=execute, + device_scan_attempts=device_scan_attempts, + *args, **kwargs) + else: + msg = (_("Invalid InitiatorConnector protocol " + "specified %(protocol)s") % + dict(protocol=protocol)) + raise ValueError(msg) + + def check_valid_device(self, path): + cmd = ('dd', 'if=%(path)s' % {"path": path}, + 'of=/dev/null', 'count=1') + out, info = None, None + try: + out, info = self._execute(*cmd, run_as_root=True, + root_helper=self._root_helper) + except putils.ProcessExecutionError as e: + LOG.error(_("Failed to access the device on the path " + "%(path)s: %(error)s %(info)s.") % + {"path": path, "error": e.stderr, + "info": info}) + return False + # If the info is none, the path does not exist. + if info is None: + return False + return True + + def connect_volume(self, connection_properties): + """Connect to a volume. + + The connection_properties describes the information needed by + the specific protocol to use to make the connection. + """ + raise NotImplementedError() + + def disconnect_volume(self, connection_properties, device_info): + """Disconnect a volume from the local host. + + The connection_properties are the same as from connect_volume. + The device_info is returned from connect_volume. + """ + raise NotImplementedError() + + +class ISCSIConnector(InitiatorConnector): + """Connector class to attach/detach iSCSI volumes.""" + + def __init__(self, root_helper, driver=None, + execute=putils.execute, use_multipath=False, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + self._linuxscsi = linuxscsi.LinuxSCSI(root_helper, execute) + super(ISCSIConnector, self).__init__(root_helper, driver=driver, + execute=execute, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) + self.use_multipath = use_multipath + + def set_execute(self, execute): + super(ISCSIConnector, self).set_execute(execute) + self._linuxscsi.set_execute(execute) + + @synchronized('connect_volume') + def connect_volume(self, connection_properties): + """Attach the volume to instance_name. + + connection_properties for iSCSI must include: + target_portal - ip and optional port + target_iqn - iSCSI Qualified Name + target_lun - LUN id of the volume + """ + + device_info = {'type': 'block'} + + if self.use_multipath: + #multipath installed, discovering other targets if available + target_portal = connection_properties['target_portal'] + out = self._run_iscsiadm_bare(['-m', + 'discovery', + '-t', + 'sendtargets', + '-p', + target_portal], + check_exit_code=[0, 255])[0] \ + or "" + + for ip, iqn in self._get_target_portals_from_iscsiadm_output(out): + props = connection_properties.copy() + props['target_portal'] = ip + props['target_iqn'] = iqn + self._connect_to_iscsi_portal(props) + + self._rescan_iscsi() + else: + self._connect_to_iscsi_portal(connection_properties) + + host_device = self._get_device_path(connection_properties) + + # The /dev/disk/by-path/... node is not always present immediately + # TODO(justinsb): This retry-with-delay is a pattern, move to utils? + tries = 0 + while not os.path.exists(host_device): + if tries >= self.device_scan_attempts: + raise exception.VolumeDeviceNotFound(device=host_device) + + LOG.warn(_("ISCSI volume not yet found at: %(host_device)s. " + "Will rescan & retry. Try number: %(tries)s"), + {'host_device': host_device, + 'tries': tries}) + + # The rescan isn't documented as being necessary(?), but it helps + self._run_iscsiadm(connection_properties, ("--rescan",)) + + tries = tries + 1 + if not os.path.exists(host_device): + time.sleep(tries ** 2) + + if tries != 0: + LOG.debug(_("Found iSCSI node %(host_device)s " + "(after %(tries)s rescans)"), + {'host_device': host_device, 'tries': tries}) + + if self.use_multipath: + #we use the multipath device instead of the single path device + self._rescan_multipath() + multipath_device = self._get_multipath_device_name(host_device) + if multipath_device is not None: + host_device = multipath_device + + device_info['path'] = host_device + return device_info + + @synchronized('connect_volume') + def disconnect_volume(self, connection_properties, device_info): + """Detach the volume from instance_name. + + connection_properties for iSCSI must include: + target_portal - IP and optional port + target_iqn - iSCSI Qualified Name + target_lun - LUN id of the volume + """ + # Moved _rescan_iscsi and _rescan_multipath + # from _disconnect_volume_multipath_iscsi to here. + # Otherwise, if we do rescan after _linuxscsi.remove_multipath_device + # but before logging out, the removed devices under /dev/disk/by-path + # will reappear after rescan. + self._rescan_iscsi() + host_device = self._get_device_path(connection_properties) + multipath_device = None + if self.use_multipath: + self._rescan_multipath() + multipath_device = self._get_multipath_device_name(host_device) + if multipath_device: + device_realpath = os.path.realpath(host_device) + self._linuxscsi.remove_multipath_device(device_realpath) + return self._disconnect_volume_multipath_iscsi( + connection_properties, multipath_device) + + # remove the device from the scsi subsystem + # this eliminates any stale entries until logout + dev_name = self._linuxscsi.get_name_from_path(host_device) + if dev_name: + self._linuxscsi.remove_scsi_device(dev_name) + + # NOTE(vish): Only disconnect from the target if no luns from the + # target are in use. + device_prefix = ("/dev/disk/by-path/ip-%(portal)s-iscsi-%(iqn)s-lun-" % + {'portal': connection_properties['target_portal'], + 'iqn': connection_properties['target_iqn']}) + devices = self.driver.get_all_block_devices() + devices = [dev for dev in devices if dev.startswith(device_prefix)] + + if not devices: + self._disconnect_from_iscsi_portal(connection_properties) + + def _get_device_path(self, connection_properties): + path = ("/dev/disk/by-path/ip-%(portal)s-iscsi-%(iqn)s-lun-%(lun)s" % + {'portal': connection_properties['target_portal'], + 'iqn': connection_properties['target_iqn'], + 'lun': connection_properties.get('target_lun', 0)}) + return path + + def get_initiator(self): + """Secure helper to read file as root.""" + file_path = '/etc/iscsi/initiatorname.iscsi' + try: + lines, _err = self._execute('cat', file_path, run_as_root=True, + root_helper=self._root_helper) + + for l in lines.split('\n'): + if l.startswith('InitiatorName='): + return l[l.index('=') + 1:].strip() + except putils.ProcessExecutionError: + msg = (_("Could not find the iSCSI Initiator File %s") + % file_path) + LOG.warn(msg) + return None + + def _run_iscsiadm(self, connection_properties, iscsi_command, **kwargs): + check_exit_code = kwargs.pop('check_exit_code', 0) + (out, err) = self._execute('iscsiadm', '-m', 'node', '-T', + connection_properties['target_iqn'], + '-p', + connection_properties['target_portal'], + *iscsi_command, run_as_root=True, + root_helper=self._root_helper, + check_exit_code=check_exit_code) + LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % + (iscsi_command, out, err)) + return (out, err) + + def _iscsiadm_update(self, connection_properties, property_key, + property_value, **kwargs): + iscsi_command = ('--op', 'update', '-n', property_key, + '-v', property_value) + return self._run_iscsiadm(connection_properties, iscsi_command, + **kwargs) + + def _get_target_portals_from_iscsiadm_output(self, output): + # return both portals and iqns + return [line.split() for line in output.splitlines()] + + def _disconnect_volume_multipath_iscsi(self, connection_properties, + multipath_name): + """This removes a multipath device and it's LUNs.""" + LOG.debug("Disconnect multipath device %s" % multipath_name) + block_devices = self.driver.get_all_block_devices() + devices = [] + for dev in block_devices: + if "/mapper/" in dev: + devices.append(dev) + else: + mpdev = self._get_multipath_device_name(dev) + if mpdev: + devices.append(mpdev) + + # Do a discovery to find all targets. + # Targets for multiple paths for the same multipath device + # may not be the same. + out = self._run_iscsiadm_bare(['-m', + 'discovery', + '-t', + 'sendtargets', + '-p', + connection_properties['target_portal']], + check_exit_code=[0, 255])[0] \ + or "" + + ips_iqns = self._get_target_portals_from_iscsiadm_output(out) + + if not devices: + # disconnect if no other multipath devices + self._disconnect_mpath(connection_properties, ips_iqns) + return + + # Get a target for all other multipath devices + other_iqns = [self._get_multipath_iqn(device) + for device in devices] + # Get all the targets for the current multipath device + current_iqns = [iqn for ip, iqn in ips_iqns] + + in_use = False + for current in current_iqns: + if current in other_iqns: + in_use = True + break + + # If no other multipath device attached has the same iqn + # as the current device + if not in_use: + # disconnect if no other multipath devices with same iqn + self._disconnect_mpath(connection_properties, ips_iqns) + return + + # else do not disconnect iscsi portals, + # as they are used for other luns + return + + def _connect_to_iscsi_portal(self, connection_properties): + # NOTE(vish): If we are on the same host as nova volume, the + # discovery makes the target so we don't need to + # run --op new. Therefore, we check to see if the + # target exists, and if we get 255 (Not Found), then + # we run --op new. This will also happen if another + # volume is using the same target. + try: + self._run_iscsiadm(connection_properties, ()) + except putils.ProcessExecutionError as exc: + # iscsiadm returns 21 for "No records found" after version 2.0-871 + if exc.exit_code in [21, 255]: + self._run_iscsiadm(connection_properties, ('--op', 'new')) + else: + raise + + if connection_properties.get('auth_method'): + self._iscsiadm_update(connection_properties, + "node.session.auth.authmethod", + connection_properties['auth_method']) + self._iscsiadm_update(connection_properties, + "node.session.auth.username", + connection_properties['auth_username']) + self._iscsiadm_update(connection_properties, + "node.session.auth.password", + connection_properties['auth_password']) + + #duplicate logins crash iscsiadm after load, + #so we scan active sessions to see if the node is logged in. + out = self._run_iscsiadm_bare(["-m", "session"], + run_as_root=True, + check_exit_code=[0, 1, 21])[0] or "" + + portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]} + for p in out.splitlines() if p.startswith("tcp:")] + + stripped_portal = connection_properties['target_portal'].split(",")[0] + if len(portals) == 0 or len([s for s in portals + if stripped_portal == + s['portal'].split(",")[0] + and + s['iqn'] == + connection_properties['target_iqn']] + ) == 0: + try: + self._run_iscsiadm(connection_properties, + ("--login",), + check_exit_code=[0, 255]) + except putils.ProcessExecutionError as err: + #as this might be one of many paths, + #only set successful logins to startup automatically + if err.exit_code in [15]: + self._iscsiadm_update(connection_properties, + "node.startup", + "automatic") + return + + self._iscsiadm_update(connection_properties, + "node.startup", + "automatic") + + def _disconnect_from_iscsi_portal(self, connection_properties): + self._iscsiadm_update(connection_properties, "node.startup", "manual", + check_exit_code=[0, 21, 255]) + self._run_iscsiadm(connection_properties, ("--logout",), + check_exit_code=[0, 21, 255]) + self._run_iscsiadm(connection_properties, ('--op', 'delete'), + check_exit_code=[0, 21, 255]) + + def _get_multipath_device_name(self, single_path_device): + device = os.path.realpath(single_path_device) + out = self._run_multipath(['-ll', + device], + check_exit_code=[0, 1])[0] + mpath_line = [line for line in out.splitlines() + if "scsi_id" not in line] # ignore udev errors + if len(mpath_line) > 0 and len(mpath_line[0]) > 0: + return "/dev/mapper/%s" % mpath_line[0].split(" ")[0] + + return None + + def _get_iscsi_devices(self): + try: + devices = list(os.walk('/dev/disk/by-path'))[0][-1] + except IndexError: + return [] + return [entry for entry in devices if entry.startswith("ip-")] + + def _disconnect_mpath(self, connection_properties, ips_iqns): + for ip, iqn in ips_iqns: + props = connection_properties.copy() + props['target_portal'] = ip + props['target_iqn'] = iqn + self._disconnect_from_iscsi_portal(props) + + self._rescan_multipath() + + def _get_multipath_iqn(self, multipath_device): + entries = self._get_iscsi_devices() + for entry in entries: + entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry) + entry_multipath = self._get_multipath_device_name(entry_real_path) + if entry_multipath == multipath_device: + return entry.split("iscsi-")[1].split("-lun")[0] + return None + + def _run_iscsiadm_bare(self, iscsi_command, **kwargs): + check_exit_code = kwargs.pop('check_exit_code', 0) + (out, err) = self._execute('iscsiadm', + *iscsi_command, + run_as_root=True, + root_helper=self._root_helper, + check_exit_code=check_exit_code) + LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % + (iscsi_command, out, err)) + return (out, err) + + def _run_multipath(self, multipath_command, **kwargs): + check_exit_code = kwargs.pop('check_exit_code', 0) + (out, err) = self._execute('multipath', + *multipath_command, + run_as_root=True, + root_helper=self._root_helper, + check_exit_code=check_exit_code) + LOG.debug("multipath %s: stdout=%s stderr=%s" % + (multipath_command, out, err)) + return (out, err) + + def _rescan_iscsi(self): + self._run_iscsiadm_bare(('-m', 'node', '--rescan'), + check_exit_code=[0, 1, 21, 255]) + self._run_iscsiadm_bare(('-m', 'session', '--rescan'), + check_exit_code=[0, 1, 21, 255]) + + def _rescan_multipath(self): + self._run_multipath('-r', check_exit_code=[0, 1, 21]) + + +class ISERConnector(ISCSIConnector): + + def _get_device_path(self, iser_properties): + return ("/dev/disk/by-path/ip-%s-iser-%s-lun-%s" % + (iser_properties['target_portal'], + iser_properties['target_iqn'], + iser_properties.get('target_lun', 0))) + + +class FibreChannelConnector(InitiatorConnector): + """Connector class to attach/detach Fibre Channel volumes.""" + + def __init__(self, root_helper, driver=None, + execute=putils.execute, use_multipath=False, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + self._linuxscsi = linuxscsi.LinuxSCSI(root_helper, execute) + self._linuxfc = linuxfc.LinuxFibreChannel(root_helper, execute) + super(FibreChannelConnector, self).__init__(root_helper, driver=driver, + execute=execute, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) + self.use_multipath = use_multipath + + def set_execute(self, execute): + super(FibreChannelConnector, self).set_execute(execute) + self._linuxscsi.set_execute(execute) + self._linuxfc.set_execute(execute) + + @synchronized('connect_volume') + def connect_volume(self, connection_properties): + """Attach the volume to instance_name. + + connection_properties for Fibre Channel must include: + target_portal - ip and optional port + target_iqn - iSCSI Qualified Name + target_lun - LUN id of the volume + """ + LOG.debug("execute = %s" % self._execute) + device_info = {'type': 'block'} + + ports = connection_properties['target_wwn'] + wwns = [] + # we support a list of wwns or a single wwn + if isinstance(ports, list): + for wwn in ports: + wwns.append(str(wwn)) + elif isinstance(ports, basestring): + wwns.append(str(ports)) + + # We need to look for wwns on every hba + # because we don't know ahead of time + # where they will show up. + hbas = self._linuxfc.get_fc_hbas_info() + host_devices = [] + for hba in hbas: + pci_num = self._get_pci_num(hba) + if pci_num is not None: + for wwn in wwns: + target_wwn = "0x%s" % wwn.lower() + host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" % + (pci_num, + target_wwn, + connection_properties.get('target_lun', 0))) + host_devices.append(host_device) + + if len(host_devices) == 0: + # this is empty because we don't have any FC HBAs + msg = _("We are unable to locate any Fibre Channel devices") + LOG.warn(msg) + raise exception.NoFibreChannelHostsFound() + + # The /dev/disk/by-path/... node is not always present immediately + # We only need to find the first device. Once we see the first device + # multipath will have any others. + def _wait_for_device_discovery(host_devices): + tries = self.tries + for device in host_devices: + LOG.debug(_("Looking for Fibre Channel dev %(device)s"), + {'device': device}) + if os.path.exists(device): + self.host_device = device + # get the /dev/sdX device. This is used + # to find the multipath device. + self.device_name = os.path.realpath(device) + raise loopingcall.LoopingCallDone() + + if self.tries >= self.device_scan_attempts: + msg = _("Fibre Channel volume device not found.") + LOG.error(msg) + raise exception.NoFibreChannelVolumeDeviceFound() + + LOG.warn(_("Fibre volume not yet found. " + "Will rescan & retry. Try number: %(tries)s"), + {'tries': tries}) + + self._linuxfc.rescan_hosts(hbas) + self.tries = self.tries + 1 + + self.host_device = None + self.device_name = None + self.tries = 0 + timer = loopingcall.FixedIntervalLoopingCall( + _wait_for_device_discovery, host_devices) + timer.start(interval=2).wait() + + tries = self.tries + if self.host_device is not None and self.device_name is not None: + LOG.debug(_("Found Fibre Channel volume %(name)s " + "(after %(tries)s rescans)"), + {'name': self.device_name, 'tries': tries}) + + # see if the new drive is part of a multipath + # device. If so, we'll use the multipath device. + if self.use_multipath: + mdev_info = self._linuxscsi.find_multipath_device(self.device_name) + if mdev_info is not None: + LOG.debug(_("Multipath device discovered %(device)s") + % {'device': mdev_info['device']}) + device_path = mdev_info['device'] + devices = mdev_info['devices'] + device_info['multipath_id'] = mdev_info['id'] + else: + # we didn't find a multipath device. + # so we assume the kernel only sees 1 device + device_path = self.host_device + dev_info = self._linuxscsi.get_device_info(self.device_name) + devices = [dev_info] + else: + device_path = self.host_device + dev_info = self._linuxscsi.get_device_info(self.device_name) + devices = [dev_info] + + device_info['path'] = device_path + device_info['devices'] = devices + return device_info + + @synchronized('connect_volume') + def disconnect_volume(self, connection_properties, device_info): + """Detach the volume from instance_name. + + connection_properties for Fibre Channel must include: + target_wwn - iSCSI Qualified Name + target_lun - LUN id of the volume + """ + devices = device_info['devices'] + + # If this is a multipath device, we need to search again + # and make sure we remove all the devices. Some of them + # might not have shown up at attach time. + if self.use_multipath and 'multipath_id' in device_info: + multipath_id = device_info['multipath_id'] + mdev_info = self._linuxscsi.find_multipath_device(multipath_id) + devices = mdev_info['devices'] + LOG.debug("devices to remove = %s" % devices) + + # There may have been more than 1 device mounted + # by the kernel for this volume. We have to remove + # all of them + for device in devices: + self._linuxscsi.remove_scsi_device(device["device"]) + + def _get_pci_num(self, hba): + # NOTE(walter-boring) + # device path is in format of + # /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2 + # sometimes an extra entry exists before the host2 value + # we always want the value prior to the host2 value + pci_num = None + if hba is not None: + if "device_path" in hba: + index = 0 + device_path = hba['device_path'].split('/') + for value in device_path: + if value.startswith('host'): + break + index = index + 1 + + if index > 0: + pci_num = device_path[index - 1] + + return pci_num + + +class AoEConnector(InitiatorConnector): + """Connector class to attach/detach AoE volumes.""" + def __init__(self, root_helper, driver=None, + execute=putils.execute, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + super(AoEConnector, self).__init__(root_helper, driver=driver, + execute=execute, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) + + def _get_aoe_info(self, connection_properties): + shelf = connection_properties['target_shelf'] + lun = connection_properties['target_lun'] + aoe_device = 'e%(shelf)s.%(lun)s' % {'shelf': shelf, + 'lun': lun} + aoe_path = '/dev/etherd/%s' % (aoe_device) + return aoe_device, aoe_path + + @lockutils.synchronized('aoe_control', 'aoe-') + def connect_volume(self, connection_properties): + """Discover and attach the volume. + + connection_properties for AoE must include: + target_shelf - shelf id of volume + target_lun - lun id of volume + """ + aoe_device, aoe_path = self._get_aoe_info(connection_properties) + + device_info = { + 'type': 'block', + 'device': aoe_device, + 'path': aoe_path, + } + + if os.path.exists(aoe_path): + self._aoe_revalidate(aoe_device) + else: + self._aoe_discover() + + waiting_status = {'tries': 0} + + #NOTE(jbr_): Device path is not always present immediately + def _wait_for_discovery(aoe_path): + if os.path.exists(aoe_path): + raise loopingcall.LoopingCallDone + + if waiting_status['tries'] >= self.device_scan_attempts: + raise exception.VolumeDeviceNotFound(device=aoe_path) + + LOG.warn(_("AoE volume not yet found at: %(path)s. " + "Try number: %(tries)s"), + {'path': aoe_device, + 'tries': waiting_status['tries']}) + + self._aoe_discover() + waiting_status['tries'] += 1 + + timer = loopingcall.FixedIntervalLoopingCall(_wait_for_discovery, + aoe_path) + timer.start(interval=2).wait() + + if waiting_status['tries']: + LOG.debug(_("Found AoE device %(path)s " + "(after %(tries)s rediscover)"), + {'path': aoe_path, + 'tries': waiting_status['tries']}) + + return device_info + + @lockutils.synchronized('aoe_control', 'aoe-') + def disconnect_volume(self, connection_properties, device_info): + """Detach and flush the volume. + + connection_properties for AoE must include: + target_shelf - shelf id of volume + target_lun - lun id of volume + """ + aoe_device, aoe_path = self._get_aoe_info(connection_properties) + + if os.path.exists(aoe_path): + self._aoe_flush(aoe_device) + + def _aoe_discover(self): + (out, err) = self._execute('aoe-discover', + run_as_root=True, + root_helper=self._root_helper, + check_exit_code=0) + + LOG.debug(_('aoe-discover: stdout=%(out)s stderr%(err)s') % + {'out': out, 'err': err}) + + def _aoe_revalidate(self, aoe_device): + (out, err) = self._execute('aoe-revalidate', + aoe_device, + run_as_root=True, + root_helper=self._root_helper, + check_exit_code=0) + + LOG.debug(_('aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s') % + {'dev': aoe_device, 'out': out, 'err': err}) + + def _aoe_flush(self, aoe_device): + (out, err) = self._execute('aoe-flush', + aoe_device, + run_as_root=True, + root_helper=self._root_helper, + check_exit_code=0) + LOG.debug(_('aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s') % + {'dev': aoe_device, 'out': out, 'err': err}) + + +class RemoteFsConnector(InitiatorConnector): + """Connector class to attach/detach NFS and GlusterFS volumes.""" + + def __init__(self, mount_type, root_helper, driver=None, + execute=putils.execute, + device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, + *args, **kwargs): + kwargs = kwargs or {} + conn = kwargs.get('conn') + if conn: + mount_point_base = conn.get('mount_point_base') + if mount_type.lower() == 'nfs': + kwargs['nfs_mount_point_base'] =\ + kwargs.get('nfs_mount_point_base') or\ + mount_point_base + elif mount_type.lower() == 'glusterfs': + kwargs['glusterfs_mount_point_base'] =\ + kwargs.get('glusterfs_mount_point_base') or\ + mount_point_base + else: + LOG.warn(_("Connection details not present." + " RemoteFsClient may not initialize properly.")) + self._remotefsclient = remotefs.RemoteFsClient(mount_type, root_helper, + execute=execute, + *args, **kwargs) + super(RemoteFsConnector, self).__init__(root_helper, driver=driver, + execute=execute, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) + + def set_execute(self, execute): + super(RemoteFsConnector, self).set_execute(execute) + self._remotefsclient.set_execute(execute) + + def connect_volume(self, connection_properties): + """Ensure that the filesystem containing the volume is mounted. + + connection_properties must include: + export - remote filesystem device (e.g. '172.18.194.100:/var/nfs') + name - file name within the filesystem + + connection_properties may optionally include: + options - options to pass to mount + """ + + mnt_flags = [] + if connection_properties.get('options'): + mnt_flags = connection_properties['options'].split() + + nfs_share = connection_properties['export'] + self._remotefsclient.mount(nfs_share, mnt_flags) + mount_point = self._remotefsclient.get_mount_point(nfs_share) + + path = mount_point + '/' + connection_properties['name'] + + return {'path': path} + + def disconnect_volume(self, connection_properties, device_info): + """No need to do anything to disconnect a volume in a filesystem.""" + + +class LocalConnector(InitiatorConnector): + """"Connector class to attach/detach File System backed volumes.""" + + def __init__(self, root_helper, driver=None, execute=putils.execute, + *args, **kwargs): + super(LocalConnector, self).__init__(root_helper, driver=driver, + execute=execute, *args, **kwargs) + + def connect_volume(self, connection_properties): + """Connect to a volume. + + connection_properties must include: + device_path - path to the volume to be connected + """ + if 'device_path' not in connection_properties: + msg = (_("Invalid connection_properties specified " + "no device_path attribute")) + raise ValueError(msg) + + device_info = {'type': 'local', + 'path': connection_properties['device_path']} + return device_info + + def disconnect_volume(self, connection_properties, device_info): + """Disconnect a volume from the local host.""" + pass diff --git a/cinder/brick/initiator/host_driver.py b/cinder/brick/initiator/host_driver.py new file mode 100644 index 0000000000..5de57ccdac --- /dev/null +++ b/cinder/brick/initiator/host_driver.py @@ -0,0 +1,30 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + + +class HostDriver(object): + + def get_all_block_devices(self): + """Get the list of all block devices seen in /dev/disk/by-path/.""" + files = [] + dir = "/dev/disk/by-path/" + if os.path.isdir(dir): + files = os.listdir(dir) + devices = [] + for file in files: + devices.append(dir + file) + return devices diff --git a/cinder/brick/initiator/linuxfc.py b/cinder/brick/initiator/linuxfc.py new file mode 100644 index 0000000000..2b6099c825 --- /dev/null +++ b/cinder/brick/initiator/linuxfc.py @@ -0,0 +1,139 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic linux Fibre Channel utilities.""" + +import errno + +from cinder.brick.initiator import linuxscsi +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils as putils + +LOG = logging.getLogger(__name__) + + +class LinuxFibreChannel(linuxscsi.LinuxSCSI): + def __init__(self, root_helper, execute=putils.execute, + *args, **kwargs): + super(LinuxFibreChannel, self).__init__(root_helper, execute, + *args, **kwargs) + + def rescan_hosts(self, hbas): + for hba in hbas: + self.echo_scsi_command("/sys/class/scsi_host/%s/scan" + % hba['host_device'], "- - -") + + def get_fc_hbas(self): + """Get the Fibre Channel HBA information.""" + out = None + try: + out, err = self._execute('systool', '-c', 'fc_host', '-v', + run_as_root=True, + root_helper=self._root_helper) + except putils.ProcessExecutionError as exc: + # This handles the case where rootwrap is used + # and systool is not installed + # 96 = nova.cmd.rootwrap.RC_NOEXECFOUND: + if exc.exit_code == 96: + LOG.warn(_("systool is not installed")) + return [] + except OSError as exc: + # This handles the case where rootwrap is NOT used + # and systool is not installed + if exc.errno == errno.ENOENT: + LOG.warn(_("systool is not installed")) + return [] + + # No FC HBAs were found + if out is None: + return [] + + lines = out.split('\n') + # ignore the first 2 lines + lines = lines[2:] + hbas = [] + hba = {} + lastline = None + for line in lines: + line = line.strip() + # 2 newlines denotes a new hba port + if line == '' and lastline == '': + if len(hba) > 0: + hbas.append(hba) + hba = {} + else: + val = line.split('=') + if len(val) == 2: + key = val[0].strip().replace(" ", "") + value = val[1].strip() + hba[key] = value.replace('"', '') + lastline = line + + return hbas + + def get_fc_hbas_info(self): + """Get Fibre Channel WWNs and device paths from the system, if any.""" + + # Note(walter-boring) modern Linux kernels contain the FC HBA's in /sys + # and are obtainable via the systool app + hbas = self.get_fc_hbas() + if not hbas: + return [] + + hbas_info = [] + for hba in hbas: + wwpn = hba['port_name'].replace('0x', '') + wwnn = hba['node_name'].replace('0x', '') + device_path = hba['ClassDevicepath'] + device = hba['ClassDevice'] + hbas_info.append({'port_name': wwpn, + 'node_name': wwnn, + 'host_device': device, + 'device_path': device_path}) + return hbas_info + + def get_fc_wwpns(self): + """Get Fibre Channel WWPNs from the system, if any.""" + + # Note(walter-boring) modern Linux kernels contain the FC HBA's in /sys + # and are obtainable via the systool app + hbas = self.get_fc_hbas() + + wwpns = [] + if hbas: + for hba in hbas: + if hba['port_state'] == 'Online': + wwpn = hba['port_name'].replace('0x', '') + wwpns.append(wwpn) + + return wwpns + + def get_fc_wwnns(self): + """Get Fibre Channel WWNNs from the system, if any.""" + + # Note(walter-boring) modern Linux kernels contain the FC HBA's in /sys + # and are obtainable via the systool app + hbas = self.get_fc_hbas() + if not hbas: + return [] + + wwnns = [] + if hbas: + for hba in hbas: + if hba['port_state'] == 'Online': + wwnn = hba['node_name'].replace('0x', '') + wwnns.append(wwnn) + + return wwnns diff --git a/cinder/brick/initiator/linuxscsi.py b/cinder/brick/initiator/linuxscsi.py new file mode 100644 index 0000000000..83e41d99db --- /dev/null +++ b/cinder/brick/initiator/linuxscsi.py @@ -0,0 +1,172 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic linux scsi subsystem and Multipath utilities. + + Note, this is not iSCSI. +""" +import os + +from cinder.brick import executor +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils as putils + +LOG = logging.getLogger(__name__) + + +class LinuxSCSI(executor.Executor): + def __init__(self, root_helper, execute=putils.execute, + *args, **kwargs): + super(LinuxSCSI, self).__init__(root_helper, execute, + *args, **kwargs) + + def echo_scsi_command(self, path, content): + """Used to echo strings to scsi subsystem.""" + + args = ["-a", path] + kwargs = dict(process_input=content, + run_as_root=True, + root_helper=self._root_helper) + self._execute('tee', *args, **kwargs) + + def get_name_from_path(self, path): + """Translates /dev/disk/by-path/ entry to /dev/sdX.""" + + name = os.path.realpath(path) + if name.startswith("/dev/"): + return name + else: + return None + + def remove_scsi_device(self, device): + """Removes a scsi device based upon /dev/sdX name.""" + + path = "/sys/block/%s/device/delete" % device.replace("/dev/", "") + if os.path.exists(path): + LOG.debug("Remove SCSI device(%s) with %s" % (device, path)) + self.echo_scsi_command(path, "1") + + def get_device_info(self, device): + (out, err) = self._execute('sg_scan', device, run_as_root=True, + root_helper=self._root_helper) + dev_info = {'device': device, 'host': None, + 'channel': None, 'id': None, 'lun': None} + if out: + line = out.strip() + line = line.replace(device + ": ", "") + info = line.split(" ") + + for item in info: + if '=' in item: + pair = item.split('=') + dev_info[pair[0]] = pair[1] + elif 'scsi' in item: + dev_info['host'] = item.replace('scsi', '') + + return dev_info + + def remove_multipath_device(self, multipath_name): + """This removes LUNs associated with a multipath device + and the multipath device itself. + """ + + LOG.debug("remove multipath device %s" % multipath_name) + mpath_dev = self.find_multipath_device(multipath_name) + if mpath_dev: + devices = mpath_dev['devices'] + LOG.debug("multipath LUNs to remove %s" % devices) + for device in devices: + self.remove_scsi_device(device['device']) + self.flush_multipath_device(mpath_dev['id']) + + def flush_multipath_device(self, device): + try: + self._execute('multipath', '-f', device, run_as_root=True, + root_helper=self._root_helper) + except putils.ProcessExecutionError as exc: + LOG.warn(_("multipath call failed exit (%(code)s)") + % {'code': exc.exit_code}) + + def flush_multipath_devices(self): + try: + self._execute('multipath', '-F', run_as_root=True, + root_helper=self._root_helper) + except putils.ProcessExecutionError as exc: + LOG.warn(_("multipath call failed exit (%(code)s)") + % {'code': exc.exit_code}) + + def find_multipath_device(self, device): + """Find a multipath device associated with a LUN device name. + + device can be either a /dev/sdX entry or a multipath id. + """ + + mdev = None + devices = [] + out = None + try: + (out, err) = self._execute('multipath', '-l', device, + run_as_root=True, + root_helper=self._root_helper) + except putils.ProcessExecutionError as exc: + LOG.warn(_("multipath call failed exit (%(code)s)") + % {'code': exc.exit_code}) + return None + + if out: + lines = out.strip() + lines = lines.split("\n") + if lines: + line = lines[0] + info = line.split(" ") + # device line output is different depending + # on /etc/multipath.conf settings. + if info[1][:2] == "dm": + mdev = "/dev/%s" % info[1] + mdev_id = info[0] + elif info[2][:2] == "dm": + mdev = "/dev/%s" % info[2] + mdev_id = info[1].replace('(', '') + mdev_id = mdev_id.replace(')', '') + + if mdev is None: + LOG.warn(_("Couldn't find multipath device %(line)s") + % {'line': line}) + return None + + LOG.debug(_("Found multipath device = %(mdev)s") + % {'mdev': mdev}) + device_lines = lines[3:] + for dev_line in device_lines: + if dev_line.find("policy") != -1: + continue + + dev_line = dev_line.lstrip(' |-`') + dev_info = dev_line.split() + address = dev_info[0].split(":") + + dev = {'device': '/dev/%s' % dev_info[1], + 'host': address[0], 'channel': address[1], + 'id': address[2], 'lun': address[3] + } + + devices.append(dev) + + if mdev is not None: + info = {"device": mdev, + "id": mdev_id, + "devices": devices} + return info + return None diff --git a/cinder/brick/iscsi/__init__.py b/cinder/brick/iscsi/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/brick/iscsi/iscsi.py b/cinder/brick/iscsi/iscsi.py new file mode 100644 index 0000000000..cb042ba832 --- /dev/null +++ b/cinder/brick/iscsi/iscsi.py @@ -0,0 +1,595 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Helper code for the iSCSI volume driver. + +""" + +import contextlib +import os +import re +import stat +import time + +from cinder.brick import exception +from cinder.brick import executor +from cinder.openstack.common import fileutils +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils as putils + + +LOG = logging.getLogger(__name__) + + +class TargetAdmin(executor.Executor): + """iSCSI target administration. + + Base class for iSCSI target admin helpers. + """ + + def __init__(self, cmd, root_helper, execute): + super(TargetAdmin, self).__init__(root_helper, execute=execute) + self._cmd = cmd + + def _run(self, *args, **kwargs): + self._execute(self._cmd, *args, run_as_root=True, **kwargs) + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + """Create a iSCSI target and logical unit.""" + raise NotImplementedError() + + def update_iscsi_target(self, name): + """Update an iSCSI target.""" + raise NotImplementedError() + + def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): + """Remove a iSCSI target and logical unit.""" + raise NotImplementedError() + + def _new_target(self, name, tid, **kwargs): + """Create a new iSCSI target.""" + raise NotImplementedError() + + def _delete_target(self, tid, **kwargs): + """Delete a target.""" + raise NotImplementedError() + + def show_target(self, tid, iqn=None, **kwargs): + """Query the given target ID.""" + raise NotImplementedError() + + def _new_logicalunit(self, tid, lun, path, **kwargs): + """Create a new LUN on a target using the supplied path.""" + raise NotImplementedError() + + def _delete_logicalunit(self, tid, lun, **kwargs): + """Delete a logical unit from a target.""" + raise NotImplementedError() + + +class TgtAdm(TargetAdmin): + """iSCSI target administration using tgtadm.""" + VOLUME_CONF = """ + + backing-store %s + + """ + VOLUME_CONF_WITH_CHAP_AUTH = """ + + backing-store %s + %s + + """ + + def __init__(self, root_helper, volumes_dir, + target_prefix='iqn.2010-10.org.openstack:', + execute=putils.execute): + super(TgtAdm, self).__init__('tgtadm', root_helper, execute) + + self.iscsi_target_prefix = target_prefix + self.volumes_dir = volumes_dir + + def _get_target(self, iqn): + (out, err) = self._execute('tgt-admin', '--show', run_as_root=True) + lines = out.split('\n') + for line in lines: + if iqn in line: + parsed = line.split() + tid = parsed[1] + return tid[:-1] + + return None + + def _verify_backing_lun(self, iqn, tid): + backing_lun = True + capture = False + target_info = [] + + (out, err) = self._execute('tgt-admin', '--show', run_as_root=True) + lines = out.split('\n') + + for line in lines: + if iqn in line and "Target %s" % tid in line: + capture = True + if capture: + target_info.append(line) + if iqn not in line and 'Target ' in line: + capture = False + + if ' LUN: 1' not in target_info: + backing_lun = False + + return backing_lun + + def _recreate_backing_lun(self, iqn, tid, name, path): + LOG.warning(_('Attempting recreate of backing lun...')) + + # Since we think the most common case of this is a dev busy + # (create vol from snapshot) we're going to add a sleep here + # this will hopefully give things enough time to stabilize + # how long should we wait?? I have no idea, let's go big + # and error on the side of caution + + time.sleep(10) + try: + (out, err) = self._execute('tgtadm', '--lld', 'iscsi', + '--op', 'new', '--mode', + 'logicalunit', '--tid', + tid, '--lun', '1', '-b', + path, run_as_root=True) + LOG.debug('StdOut from recreate backing lun: %s' % out) + LOG.debug('StdErr from recreate backing lun: %s' % err) + except putils.ProcessExecutionError as e: + LOG.error(_("Failed to recover attempt to create " + "iscsi backing lun for volume " + "id:%(vol_id)s: %(e)s") + % {'vol_id': name, 'e': str(e)}) + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + # Note(jdg) tid and lun aren't used by TgtAdm but remain for + # compatibility + + fileutils.ensure_tree(self.volumes_dir) + + vol_id = name.split(':')[1] + if chap_auth is None: + volume_conf = self.VOLUME_CONF % (name, path) + else: + volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (name, + path, chap_auth) + + LOG.info(_('Creating iscsi_target for: %s') % vol_id) + volumes_dir = self.volumes_dir + volume_path = os.path.join(volumes_dir, vol_id) + + f = open(volume_path, 'w+') + f.write(volume_conf) + f.close() + LOG.debug(_('Created volume path %(vp)s,\n' + 'content: %(vc)%') + % {'vp': volume_path, 'vc': volume_conf}) + + old_persist_file = None + old_name = kwargs.get('old_name', None) + if old_name is not None: + old_persist_file = os.path.join(volumes_dir, old_name) + + try: + # with the persistent tgts we create them + # by creating the entry in the persist file + # and then doing an update to get the target + # created. + (out, err) = self._execute('tgt-admin', '--update', name, + run_as_root=True) + LOG.debug("StdOut from tgt-admin --update: %s", out) + LOG.debug("StdErr from tgt-admin --update: %s", err) + + # Grab targets list for debug + # Consider adding a check for lun 0 and 1 for tgtadm + # before considering this as valid + (out, err) = self._execute('tgtadm', + '--lld', + 'iscsi', + '--op', + 'show', + '--mode', + 'target', + run_as_root=True) + LOG.debug("Targets after update: %s" % out) + except putils.ProcessExecutionError as e: + LOG.warning(_("Failed to create iscsi target for volume " + "id:%(vol_id)s: %(e)s") + % {'vol_id': vol_id, 'e': str(e)}) + + #Don't forget to remove the persistent file we created + os.unlink(volume_path) + raise exception.ISCSITargetCreateFailed(volume_id=vol_id) + + iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) + tid = self._get_target(iqn) + if tid is None: + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s. Please ensure your tgtd config file " + "contains 'include %(volumes_dir)s/*'") % { + 'vol_id': vol_id, + 'volumes_dir': volumes_dir, + }) + raise exception.NotFound() + + # NOTE(jdg): Sometimes we have some issues with the backing lun + # not being created, believe this is due to a device busy + # or something related, so we're going to add some code + # here that verifies the backing lun (lun 1) was created + # and we'll try and recreate it if it's not there + if not self._verify_backing_lun(iqn, tid): + try: + self._recreate_backing_lun(iqn, tid, name, path) + except putils.ProcessExecutionError: + os.unlink(volume_path) + raise exception.ISCSITargetCreateFailed(volume_id=vol_id) + + # Finally check once more and if no go, fail and punt + if not self._verify_backing_lun(iqn, tid): + os.unlink(volume_path) + raise exception.ISCSITargetCreateFailed(volume_id=vol_id) + + if old_persist_file is not None and os.path.exists(old_persist_file): + os.unlink(old_persist_file) + + return tid + + def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): + LOG.info(_('Removing iscsi_target for: %s') % vol_id) + vol_uuid_file = vol_name + volume_path = os.path.join(self.volumes_dir, vol_uuid_file) + if not os.path.exists(volume_path): + LOG.warning(_('Volume path %s does not exist, ' + 'nothing to remove.') % volume_path) + return + + if os.path.isfile(volume_path): + iqn = '%s%s' % (self.iscsi_target_prefix, + vol_uuid_file) + else: + raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) + try: + # NOTE(vish): --force is a workaround for bug: + # https://bugs.launchpad.net/cinder/+bug/1159948 + self._execute('tgt-admin', + '--force', + '--delete', + iqn, + run_as_root=True) + except putils.ProcessExecutionError as e: + LOG.error(_("Failed to remove iscsi target for volume " + "id:%(vol_id)s: %(e)s") + % {'vol_id': vol_id, 'e': str(e)}) + raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) + + os.unlink(volume_path) + + def show_target(self, tid, iqn=None, **kwargs): + if iqn is None: + raise exception.InvalidParameterValue( + err=_('valid iqn needed for show_target')) + + tid = self._get_target(iqn) + if tid is None: + raise exception.NotFound() + + +class IetAdm(TargetAdmin): + """iSCSI target administration using ietadm.""" + + def __init__(self, root_helper, iet_conf='/etc/iet/ietd.conf', + iscsi_iotype='fileio', execute=putils.execute): + super(IetAdm, self).__init__('ietadm', root_helper, execute) + self.iet_conf = iet_conf + self.iscsi_iotype = iscsi_iotype + + def _is_block(self, path): + mode = os.stat(path).st_mode + return stat.S_ISBLK(mode) + + def _iotype(self, path): + if self.iscsi_iotype == 'auto': + return 'blockio' if self._is_block(path) else 'fileio' + else: + return self.iscsi_iotype + + @contextlib.contextmanager + def temporary_chown(self, path, owner_uid=None): + """Temporarily chown a path. + + :params path: The path to chown + :params owner_uid: UID of temporary owner (defaults to current user) + """ + if owner_uid is None: + owner_uid = os.getuid() + + orig_uid = os.stat(path).st_uid + + if orig_uid != owner_uid: + putils.execute('chown', owner_uid, path, + root_helper=self._root_helper, run_as_root=True) + try: + yield + finally: + if orig_uid != owner_uid: + putils.execute('chown', orig_uid, path, + root_helper=self._root_helper, run_as_root=True) + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + + # NOTE (jdg): Address bug: 1175207 + kwargs.pop('old_name', None) + + self._new_target(name, tid, **kwargs) + self._new_logicalunit(tid, lun, path, **kwargs) + if chap_auth is not None: + (type, username, password) = chap_auth.split() + self._new_auth(tid, type, username, password, **kwargs) + + conf_file = self.iet_conf + if os.path.exists(conf_file): + try: + volume_conf = """ + Target %s + %s + Lun 0 Path=%s,Type=%s + """ % (name, chap_auth, path, self._iotype(path)) + + with self.temporary_chown(conf_file): + f = open(conf_file, 'a+') + f.write(volume_conf) + f.close() + except putils.ProcessExecutionError as e: + vol_id = name.split(':')[1] + LOG.error(_("Failed to create iscsi target for volume " + "id:%(vol_id)s: %(e)s") + % {'vol_id': vol_id, 'e': str(e)}) + raise exception.ISCSITargetCreateFailed(volume_id=vol_id) + return tid + + def update_iscsi_target(self, name): + pass + + def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): + LOG.info(_('Removing iscsi_target for volume: %s') % vol_id) + self._delete_logicalunit(tid, lun, **kwargs) + self._delete_target(tid, **kwargs) + vol_uuid_file = vol_name + conf_file = self.iet_conf + if os.path.exists(conf_file): + with self.temporary_chown(conf_file): + try: + iet_conf_text = open(conf_file, 'r+') + full_txt = iet_conf_text.readlines() + new_iet_conf_txt = [] + count = 0 + for line in full_txt: + if count > 0: + count -= 1 + continue + elif re.search(vol_uuid_file, line): + count = 2 + continue + else: + new_iet_conf_txt.append(line) + + iet_conf_text.seek(0) + iet_conf_text.truncate(0) + iet_conf_text.writelines(new_iet_conf_txt) + finally: + iet_conf_text.close() + + def _new_target(self, name, tid, **kwargs): + self._run('--op', 'new', + '--tid=%s' % tid, + '--params', 'Name=%s' % name, + **kwargs) + + def _delete_target(self, tid, **kwargs): + self._run('--op', 'delete', + '--tid=%s' % tid, + **kwargs) + + def show_target(self, tid, iqn=None, **kwargs): + self._run('--op', 'show', + '--tid=%s' % tid, + **kwargs) + + def _new_logicalunit(self, tid, lun, path, **kwargs): + self._run('--op', 'new', + '--tid=%s' % tid, + '--lun=%d' % lun, + '--params', 'Path=%s,Type=%s' % (path, self._iotype(path)), + **kwargs) + + def _delete_logicalunit(self, tid, lun, **kwargs): + self._run('--op', 'delete', + '--tid=%s' % tid, + '--lun=%d' % lun, + **kwargs) + + def _new_auth(self, tid, type, username, password, **kwargs): + self._run('--op', 'new', + '--tid=%s' % tid, + '--user', + '--params=%s=%s,Password=%s' % (type, username, password), + **kwargs) + + +class FakeIscsiHelper(object): + + def __init__(self): + self.tid = 1 + self._execute = None + + def set_execute(self, execute): + self._execute = execute + + def create_iscsi_target(self, *args, **kwargs): + self.tid += 1 + return self.tid + + +class LioAdm(TargetAdmin): + """iSCSI target administration for LIO using python-rtslib.""" + def __init__(self, root_helper, lio_initiator_iqns='', + iscsi_target_prefix='iqn.2010-10.org.openstack:', + execute=putils.execute): + super(LioAdm, self).__init__('cinder-rtstool', root_helper, execute) + + self.iscsi_target_prefix = iscsi_target_prefix + self.lio_initiator_iqns = lio_initiator_iqns + self._verify_rtstool() + + def _verify_rtstool(self): + try: + self._execute('cinder-rtstool', 'verify') + except (OSError, putils.ProcessExecutionError): + LOG.error(_('cinder-rtstool is not installed correctly')) + raise + + def _get_target(self, iqn): + (out, err) = self._execute('cinder-rtstool', + 'get-targets', + run_as_root=True) + lines = out.split('\n') + for line in lines: + if iqn in line: + return line + + return None + + def create_iscsi_target(self, name, tid, lun, path, + chap_auth=None, **kwargs): + # tid and lun are not used + + vol_id = name.split(':')[1] + + LOG.info(_('Creating iscsi_target for volume: %s') % vol_id) + + # rtstool requires chap_auth, but unit tests don't provide it + chap_auth_userid = 'test_id' + chap_auth_password = 'test_pass' + + if chap_auth is not None: + (chap_auth_userid, chap_auth_password) = chap_auth.split(' ')[1:] + + extra_args = [] + if self.lio_initiator_iqns: + extra_args.append(self.lio_initiator_iqns) + + try: + command_args = ['cinder-rtstool', + 'create', + path, + name, + chap_auth_userid, + chap_auth_password] + if extra_args: + command_args.extend(extra_args) + self._execute(*command_args, run_as_root=True) + except putils.ProcessExecutionError as e: + LOG.error(_("Failed to create iscsi target for volume " + "id:%s.") % vol_id) + LOG.error("%s" % str(e)) + + raise exception.ISCSITargetCreateFailed(volume_id=vol_id) + + iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) + tid = self._get_target(iqn) + if tid is None: + LOG.error(_("Failed to create iscsi target for volume " + "id:%s.") % vol_id) + raise exception.NotFound() + + return tid + + def update_iscsi_target(self, name): + pass + + def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): + LOG.info(_('Removing iscsi_target: %s') % vol_id) + vol_uuid_name = vol_name + iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_name) + + try: + self._execute('cinder-rtstool', + 'delete', + iqn, + run_as_root=True) + except putils.ProcessExecutionError as e: + LOG.error(_("Failed to remove iscsi target for volume " + "id:%s.") % vol_id) + LOG.error("%s" % str(e)) + raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) + + def show_target(self, tid, iqn=None, **kwargs): + if iqn is None: + raise exception.InvalidParameterValue( + err=_('valid iqn needed for show_target')) + + tid = self._get_target(iqn) + if tid is None: + raise exception.NotFound() + + def initialize_connection(self, volume, connector): + volume_iqn = volume['provider_location'].split(' ')[1] + + (auth_method, auth_user, auth_pass) = \ + volume['provider_auth'].split(' ', 3) + + # Add initiator iqns to target ACL + try: + self._execute('cinder-rtstool', 'add-initiator', + volume_iqn, + auth_user, + auth_pass, + connector['initiator'], + run_as_root=True) + except putils.ProcessExecutionError: + LOG.error(_("Failed to add initiator iqn %s to target") % + connector['initiator']) + raise exception.ISCSITargetAttachFailed(volume_id=volume['id']) + + +class ISERTgtAdm(TgtAdm): + VOLUME_CONF = """ + + driver iser + backing-store %s + + """ + VOLUME_CONF_WITH_CHAP_AUTH = """ + + driver iser + backing-store %s + %s + + """ + + def __init__(self, root_helper, volumes_dir, + target_prefix='iqn.2010-10.org.iser.openstack:', + execute=putils.execute): + super(ISERTgtAdm, self).__init__(root_helper, volumes_dir, + target_prefix, execute) diff --git a/cinder/brick/local_dev/__init__.py b/cinder/brick/local_dev/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/brick/local_dev/lvm.py b/cinder/brick/local_dev/lvm.py new file mode 100644 index 0000000000..caccbb087f --- /dev/null +++ b/cinder/brick/local_dev/lvm.py @@ -0,0 +1,640 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +LVM class for performing LVM operations. +""" + +import math +import re + +import itertools + +from cinder.brick import exception +from cinder.brick import executor +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils as putils + +LOG = logging.getLogger(__name__) + + +class LVM(executor.Executor): + """LVM object to enable various LVM related operations.""" + + def __init__(self, vg_name, root_helper, create_vg=False, + physical_volumes=None, lvm_type='default', + executor=putils.execute): + + """Initialize the LVM object. + + The LVM object is based on an LVM VolumeGroup, one instantiation + for each VolumeGroup you have/use. + + :param vg_name: Name of existing VG or VG to create + :param root_helper: Execution root_helper method to use + :param create_vg: Indicates the VG doesn't exist + and we want to create it + :param physical_volumes: List of PVs to build VG on + :param lvm_type: VG and Volume type (default, or thin) + :param executor: Execute method to use, None uses common/processutils + + """ + super(LVM, self).__init__(execute=executor, root_helper=root_helper) + self.vg_name = vg_name + self.pv_list = [] + self.lv_list = [] + self.vg_size = 0.0 + self.vg_free_space = 0.0 + self.vg_lv_count = 0 + self.vg_uuid = None + self.vg_thin_pool = None + self.vg_thin_pool_size = 0.0 + self.vg_thin_pool_free_space = 0.0 + self._supports_snapshot_lv_activation = None + self._supports_lvchange_ignoreskipactivation = None + + if create_vg and physical_volumes is not None: + self.pv_list = physical_volumes + + try: + self._create_vg(physical_volumes) + except putils.ProcessExecutionError as err: + LOG.exception(_('Error creating Volume Group')) + LOG.error(_('Cmd :%s') % err.cmd) + LOG.error(_('StdOut :%s') % err.stdout) + LOG.error(_('StdErr :%s') % err.stderr) + raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name) + + if self._vg_exists() is False: + LOG.error(_('Unable to locate Volume Group %s') % vg_name) + raise exception.VolumeGroupNotFound(vg_name=vg_name) + + # NOTE: we assume that the VG has been activated outside of Cinder + + if lvm_type == 'thin': + pool_name = "%s-pool" % self.vg_name + if self.get_volume(pool_name) is None: + self.create_thin_pool(pool_name) + else: + self.vg_thin_pool = pool_name + + self.activate_lv(self.vg_thin_pool) + self.pv_list = self.get_all_physical_volumes(root_helper, vg_name) + + def _vg_exists(self): + """Simple check to see if VG exists. + + :returns: True if vg specified in object exists, else False + + """ + exists = False + (out, err) = self._execute( + 'env', 'LC_ALL=C', 'vgs', '--noheadings', '-o', 'name', + self.vg_name, root_helper=self._root_helper, run_as_root=True) + + if out is not None: + volume_groups = out.split() + if self.vg_name in volume_groups: + exists = True + + return exists + + def _create_vg(self, pv_list): + cmd = ['vgcreate', self.vg_name, ','.join(pv_list)] + self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) + + def _get_vg_uuid(self): + (out, err) = self._execute('env', 'LC_ALL=C', 'vgs', '--noheadings', + '-o uuid', self.vg_name) + if out is not None: + return out.split() + else: + return [] + + def _get_thin_pool_free_space(self, vg_name, thin_pool_name): + """Returns available thin pool free space. + + :param vg_name: the vg where the pool is placed + :param thin_pool_name: the thin pool to gather info for + :returns: Free space in GB (float), calculated using data_percent + + """ + cmd = ['env', 'LC_ALL=C', 'lvs', '--noheadings', '--unit=g', + '-o', 'size,data_percent', '--separator', ':', '--nosuffix'] + + # NOTE(gfidente): data_percent only applies to some types of LV so we + # make sure to append the actual thin pool name + cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name)) + + free_space = 0.0 + + try: + (out, err) = self._execute(*cmd, + root_helper=self._root_helper, + run_as_root=True) + if out is not None: + out = out.strip() + data = out.split(':') + pool_size = float(data[0]) + data_percent = float(data[1]) + consumed_space = pool_size / 100 * data_percent + free_space = pool_size - consumed_space + free_space = round(free_space, 2) + except putils.ProcessExecutionError as err: + LOG.exception(_('Error querying thin pool about data_percent')) + LOG.error(_('Cmd :%s') % err.cmd) + LOG.error(_('StdOut :%s') % err.stdout) + LOG.error(_('StdErr :%s') % err.stderr) + + return free_space + + @staticmethod + def get_lvm_version(root_helper): + """Static method to get LVM version from system. + + :param root_helper: root_helper to use for execute + :returns: version 3-tuple + + """ + + cmd = ['env', 'LC_ALL=C', 'vgs', '--version'] + (out, err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) + lines = out.split('\n') + + for line in lines: + if 'LVM version' in line: + version_list = line.split() + # NOTE(gfidente): version is formatted as follows: + # major.minor.patchlevel(library API version)[-customisation] + version = version_list[2] + version_filter = r"(\d+)\.(\d+)\.(\d+).*" + r = re.search(version_filter, version) + version_tuple = tuple(map(int, r.group(1, 2, 3))) + return version_tuple + + @staticmethod + def supports_thin_provisioning(root_helper): + """Static method to check for thin LVM support on a system. + + :param root_helper: root_helper to use for execute + :returns: True if supported, False otherwise + + """ + + return LVM.get_lvm_version(root_helper) >= (2, 2, 95) + + @property + def supports_snapshot_lv_activation(self): + """Property indicating whether snap activation changes are supported. + + Check for LVM version >= 2.02.91. + (LVM2 git: e8a40f6 Allow to activate snapshot) + + :returns: True/False indicating support + """ + + if self._supports_snapshot_lv_activation is not None: + return self._supports_snapshot_lv_activation + + self._supports_snapshot_lv_activation = ( + self.get_lvm_version(self._root_helper) >= (2, 2, 91)) + + return self._supports_snapshot_lv_activation + + @property + def supports_lvchange_ignoreskipactivation(self): + """Property indicating whether lvchange can ignore skip activation. + + Check for LVM version >= 2.02.99. + (LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange) + """ + + if self._supports_lvchange_ignoreskipactivation is not None: + return self._supports_lvchange_ignoreskipactivation + + self._supports_lvchange_ignoreskipactivation = ( + self.get_lvm_version(self._root_helper) >= (2, 2, 99)) + + return self._supports_lvchange_ignoreskipactivation + + @staticmethod + def get_all_volumes(root_helper, vg_name=None): + """Static method to get all LV's on a system. + + :param root_helper: root_helper to use for execute + :param vg_name: optional, gathers info for only the specified VG + :returns: List of Dictionaries with LV info + + """ + + cmd = ['env', 'LC_ALL=C', 'lvs', '--noheadings', '--unit=g', + '-o', 'vg_name,name,size', '--nosuffix'] + + if vg_name is not None: + cmd.append(vg_name) + + (out, err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) + + lv_list = [] + if out is not None: + volumes = out.split() + for vg, name, size in itertools.izip(*[iter(volumes)] * 3): + lv_list.append({"vg": vg, "name": name, "size": size}) + + return lv_list + + def get_volumes(self): + """Get all LV's associated with this instantiation (VG). + + :returns: List of Dictionaries with LV info + + """ + self.lv_list = self.get_all_volumes(self._root_helper, self.vg_name) + return self.lv_list + + def get_volume(self, name): + """Get reference object of volume specified by name. + + :returns: dict representation of Logical Volume if exists + + """ + ref_list = self.get_volumes() + for r in ref_list: + if r['name'] == name: + return r + + @staticmethod + def get_all_physical_volumes(root_helper, vg_name=None): + """Static method to get all PVs on a system. + + :param root_helper: root_helper to use for execute + :param vg_name: optional, gathers info for only the specified VG + :returns: List of Dictionaries with PV info + + """ + cmd = ['env', 'LC_ALL=C', 'pvs', '--noheadings', + '--unit=g', + '-o', 'vg_name,name,size,free', + '--separator', ':', + '--nosuffix'] + + (out, err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) + + pvs = out.split() + if vg_name is not None: + pvs = [pv for pv in pvs if vg_name == pv.split(':')[0]] + + pv_list = [] + for pv in pvs: + fields = pv.split(':') + pv_list.append({'vg': fields[0], + 'name': fields[1], + 'size': float(fields[2]), + 'available': float(fields[3])}) + return pv_list + + def get_physical_volumes(self): + """Get all PVs associated with this instantiation (VG). + + :returns: List of Dictionaries with PV info + + """ + self.pv_list = self.get_all_physical_volumes(self._root_helper, + self.vg_name) + return self.pv_list + + @staticmethod + def get_all_volume_groups(root_helper, vg_name=None): + """Static method to get all VGs on a system. + + :param root_helper: root_helper to use for execute + :param vg_name: optional, gathers info for only the specified VG + :returns: List of Dictionaries with VG info + + """ + cmd = ['env', 'LC_ALL=C', 'vgs', '--noheadings', '--unit=g', + '-o', 'name,size,free,lv_count,uuid', '--separator', ':', + '--nosuffix'] + + if vg_name is not None: + cmd.append(vg_name) + + (out, err) = putils.execute(*cmd, + root_helper=root_helper, + run_as_root=True) + + vg_list = [] + if out is not None: + vgs = out.split() + for vg in vgs: + fields = vg.split(':') + vg_list.append({'name': fields[0], + 'size': float(fields[1]), + 'available': float(fields[2]), + 'lv_count': int(fields[3]), + 'uuid': fields[4]}) + + return vg_list + + def update_volume_group_info(self): + """Update VG info for this instantiation. + + Used to update member fields of object and + provide a dict of info for caller. + + :returns: Dictionaries of VG info + + """ + vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name) + + if len(vg_list) != 1: + LOG.error(_('Unable to find VG: %s') % self.vg_name) + raise exception.VolumeGroupNotFound(vg_name=self.vg_name) + + self.vg_size = float(vg_list[0]['size']) + self.vg_free_space = float(vg_list[0]['available']) + self.vg_lv_count = int(vg_list[0]['lv_count']) + self.vg_uuid = vg_list[0]['uuid'] + + if self.vg_thin_pool is not None: + for lv in self.get_all_volumes(self._root_helper, self.vg_name): + if lv['name'] == self.vg_thin_pool: + self.vg_thin_pool_size = lv['size'] + tpfs = self._get_thin_pool_free_space(self.vg_name, + self.vg_thin_pool) + self.vg_thin_pool_free_space = tpfs + + def _calculate_thin_pool_size(self): + """Calculates the correct size for a thin pool. + + Ideally we would use 100% of the containing volume group and be done. + But the 100%VG notation to lvcreate is not implemented and thus cannot + be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347 + + Further, some amount of free space must remain in the volume group for + metadata for the contained logical volumes. The exact amount depends + on how much volume sharing you expect. + + :returns: An lvcreate-ready string for the number of calculated bytes. + """ + + # make sure volume group information is current + self.update_volume_group_info() + + # leave 5% free for metadata + return "%sg" % (self.vg_free_space * 0.95) + + def create_thin_pool(self, name=None, size_str=None): + """Creates a thin provisioning pool for this VG. + + The syntax here is slightly different than the default + lvcreate -T, so we'll just write a custom cmd here + and do it. + + :param name: Name to use for pool, default is "-pool" + :param size_str: Size to allocate for pool, default is entire VG + :returns: The size string passed to the lvcreate command + + """ + + if not self.supports_thin_provisioning(self._root_helper): + LOG.error(_('Requested to setup thin provisioning, ' + 'however current LVM version does not ' + 'support it.')) + return None + + if name is None: + name = '%s-pool' % self.vg_name + + vg_pool_name = '%s/%s' % (self.vg_name, name) + + if not size_str: + size_str = self._calculate_thin_pool_size() + + cmd = ['lvcreate', '-T', '-L', size_str, vg_pool_name] + LOG.debug(_('Created thin pool \'%(pool)s\' with size %(size)s of ' + 'total %(free)sg') % {'pool': vg_pool_name, + 'size': size_str, + 'free': self.vg_free_space}) + + self._execute(*cmd, + root_helper=self._root_helper, + run_as_root=True) + + self.vg_thin_pool = name + return size_str + + def create_volume(self, name, size_str, lv_type='default', mirror_count=0): + """Creates a logical volume on the object's VG. + + :param name: Name to use when creating Logical Volume + :param size_str: Size to use when creating Logical Volume + :param lv_type: Type of Volume (default or thin) + :param mirror_count: Use LVM mirroring with specified count + + """ + + if lv_type == 'thin': + pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool) + cmd = ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path] + else: + cmd = ['lvcreate', '-n', name, self.vg_name, '-L', size_str] + + if mirror_count > 0: + cmd.extend(['-m', mirror_count, '--nosync', + '--mirrorlog mirrored']) + terras = int(size_str[:-1]) / 1024.0 + if terras >= 1.5: + rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) + # NOTE(vish): Next power of two for region size. See: + # http://red.ht/U2BPOD + cmd.extend(['-R', str(rsize)]) + + try: + self._execute(*cmd, + root_helper=self._root_helper, + run_as_root=True) + except putils.ProcessExecutionError as err: + LOG.exception(_('Error creating Volume')) + LOG.error(_('Cmd :%s') % err.cmd) + LOG.error(_('StdOut :%s') % err.stdout) + LOG.error(_('StdErr :%s') % err.stderr) + raise + + def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): + """Creates a snapshot of a logical volume. + + :param name: Name to assign to new snapshot + :param source_lv_name: Name of Logical Volume to snapshot + :param lv_type: Type of LV (default or thin) + + """ + source_lvref = self.get_volume(source_lv_name) + if source_lvref is None: + LOG.error(_("Unable to find LV: %s") % source_lv_name) + return False + cmd = ['lvcreate', '--name', name, + '--snapshot', '%s/%s' % (self.vg_name, source_lv_name)] + if lv_type != 'thin': + size = source_lvref['size'] + cmd.extend(['-L', '%sg' % (size)]) + + try: + self._execute(*cmd, + root_helper=self._root_helper, + run_as_root=True) + except putils.ProcessExecutionError as err: + LOG.exception(_('Error creating snapshot')) + LOG.error(_('Cmd :%s') % err.cmd) + LOG.error(_('StdOut :%s') % err.stdout) + LOG.error(_('StdErr :%s') % err.stderr) + raise + + def _mangle_lv_name(self, name): + # Linux LVM reserves name that starts with snapshot, so that + # such volume name can't be created. Mangle it. + if not name.startswith('snapshot'): + return name + return '_' + name + + def activate_lv(self, name, is_snapshot=False): + """Ensure that logical volume/snapshot logical volume is activated. + + :param name: Name of LV to activate + :raises: putils.ProcessExecutionError + """ + + # This is a no-op if requested for a snapshot on a version + # of LVM that doesn't support snapshot activation. + # (Assume snapshot LV is always active.) + if is_snapshot and not self.supports_snapshot_lv_activation: + return + + lv_path = self.vg_name + '/' + self._mangle_lv_name(name) + + # Must pass --yes to activate both the snap LV and its origin LV. + # Otherwise lvchange asks if you would like to do this interactively, + # and fails. + cmd = ['lvchange', '-a', 'y', '--yes'] + + if self.supports_lvchange_ignoreskipactivation: + cmd.append('-K') + + cmd.append(lv_path) + + try: + self._execute(*cmd, + root_helper=self._root_helper, + run_as_root=True) + except putils.ProcessExecutionError as err: + LOG.exception(_('Error activating LV')) + LOG.error(_('Cmd :%s') % err.cmd) + LOG.error(_('StdOut :%s') % err.stdout) + LOG.error(_('StdErr :%s') % err.stderr) + raise + + def delete(self, name): + """Delete logical volume or snapshot. + + :param name: Name of LV to delete + + """ + try: + self._execute('lvremove', + '-f', + '%s/%s' % (self.vg_name, name), + root_helper=self._root_helper, run_as_root=True) + except putils.ProcessExecutionError as err: + mesg = (_('Error reported running lvremove: CMD: %(command)s, ' + 'RESPONSE: %(response)s') % + {'command': err.cmd, 'response': err.stderr}) + LOG.debug(mesg) + + LOG.debug(_('Attempting udev settle and retry of lvremove...')) + self._execute('udevadm', 'settle', + root_helper=self._root_helper, + run_as_root=True) + + self._execute('lvremove', + '-f', + '%s/%s' % (self.vg_name, name), + root_helper=self._root_helper, run_as_root=True) + + def revert(self, snapshot_name): + """Revert an LV from snapshot. + + :param snapshot_name: Name of snapshot to revert + + """ + self._execute('lvconvert', '--merge', + snapshot_name, root_helper=self._root_helper, + run_as_root=True) + + def lv_has_snapshot(self, name): + out, err = self._execute( + 'env', 'LC_ALL=C', 'lvdisplay', '--noheading', + '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name), + root_helper=self._root_helper, run_as_root=True) + if out: + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + return True + return False + + def extend_volume(self, lv_name, new_size): + """Extend the size of an existing volume.""" + + try: + self._execute('lvextend', '-L', new_size, + '%s/%s' % (self.vg_name, lv_name), + root_helper=self._root_helper, + run_as_root=True) + except putils.ProcessExecutionError as err: + LOG.exception(_('Error extending Volume')) + LOG.error(_('Cmd :%s') % err.cmd) + LOG.error(_('StdOut :%s') % err.stdout) + LOG.error(_('StdErr :%s') % err.stderr) + raise + + def vg_mirror_free_space(self, mirror_count): + free_capacity = 0.0 + + disks = [] + for pv in self.pv_list: + disks.append(float(pv['available'])) + + while True: + disks = sorted([a for a in disks if a > 0.0], reverse=True) + if len(disks) <= mirror_count: + break + # consume the smallest disk + disk = disks[-1] + disks = disks[:-1] + # match extents for each mirror on the largest disks + for index in list(range(mirror_count)): + disks[index] -= disk + free_capacity += disk + + return free_capacity + + def vg_mirror_size(self, mirror_count): + return (self.vg_free_space / (mirror_count + 1)) diff --git a/cinder/brick/remotefs/__init__.py b/cinder/brick/remotefs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/brick/remotefs/remotefs.py b/cinder/brick/remotefs/remotefs.py new file mode 100644 index 0000000000..8239d9e724 --- /dev/null +++ b/cinder/brick/remotefs/remotefs.py @@ -0,0 +1,99 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Remote filesystem client utilities.""" + +import hashlib +import os + +from cinder.brick import exception +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils as putils + +LOG = logging.getLogger(__name__) + + +class RemoteFsClient(object): + + def __init__(self, mount_type, root_helper, + execute=putils.execute, *args, **kwargs): + + self._mount_type = mount_type + if mount_type == "nfs": + self._mount_base = kwargs.get('nfs_mount_point_base', None) + if not self._mount_base: + raise exception.InvalidParameterValue( + err=_('nfs_mount_point_base required')) + self._mount_options = kwargs.get('nfs_mount_options', None) + elif mount_type == "glusterfs": + self._mount_base = kwargs.get('glusterfs_mount_point_base', None) + if not self._mount_base: + raise exception.InvalidParameterValue( + err=_('glusterfs_mount_point_base required')) + self._mount_options = None + else: + raise exception.ProtocolNotSupported(protocol=mount_type) + self.root_helper = root_helper + self.set_execute(execute) + + def set_execute(self, execute): + self._execute = execute + + def _get_hash_str(self, base_str): + """Return a string that represents hash of base_str + (in a hex format). + """ + return hashlib.md5(base_str).hexdigest() + + def get_mount_point(self, device_name): + """Get Mount Point. + + :param device_name: example 172.18.194.100:/var/nfs + """ + return os.path.join(self._mount_base, + self._get_hash_str(device_name)) + + def _read_mounts(self): + (out, err) = self._execute('mount', check_exit_code=0) + lines = out.split('\n') + mounts = {} + for line in lines: + tokens = line.split() + if 2 < len(tokens): + device = tokens[0] + mnt_point = tokens[2] + mounts[mnt_point] = device + return mounts + + def mount(self, nfs_share, flags=None): + """Mount NFS share.""" + mount_path = self.get_mount_point(nfs_share) + + if mount_path in self._read_mounts(): + LOG.info(_('Already mounted: %s') % mount_path) + return + + self._execute('mkdir', '-p', mount_path, check_exit_code=0) + + mnt_cmd = ['mount', '-t', self._mount_type] + if self._mount_options is not None: + mnt_cmd.extend(['-o', self._mount_options]) + if flags is not None: + mnt_cmd.extend(flags) + mnt_cmd.extend([nfs_share, mount_path]) + + self._execute(*mnt_cmd, root_helper=self.root_helper, + run_as_root=True, check_exit_code=0) diff --git a/cinder/common/__init__.py b/cinder/common/__init__.py index 0a3b98867a..e69de29bb2 100644 --- a/cinder/common/__init__.py +++ b/cinder/common/__init__.py @@ -1,15 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/cinder/flags.py b/cinder/common/config.py similarity index 59% rename from cinder/flags.py rename to cinder/common/config.py index 1c61cad7ac..4b4703673f 100644 --- a/cinder/flags.py +++ b/cinder/common/config.py @@ -1,9 +1,8 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. +# Copyright 2013 NTT corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -28,30 +27,13 @@ import os import socket -import sys - -from cinder.openstack.common import cfg - - -FLAGS = cfg.CONF - - -def parse_args(argv, default_config_files=None): - FLAGS.disable_interspersed_args() - return argv[:1] + FLAGS(argv[1:], - project='cinder', - default_config_files=default_config_files) +from oslo.config import cfg -class UnrecognizedFlag(Exception): - pass +from cinder.openstack.common.gettextutils import _ -def DECLARE(name, module_string, flag_values=FLAGS): - if module_string not in sys.modules: - __import__(module_string, globals(), locals()) - if name not in flag_values: - raise UnrecognizedFlag('%s not defined by %s' % (name, module_string)) +CONF = cfg.CONF def _get_my_ip(): @@ -74,38 +56,23 @@ def _get_my_ip(): core_opts = [ - cfg.StrOpt('connection_type', - default=None, - help='Virtualization api connection type : libvirt, xenapi, ' - 'or fake'), - cfg.StrOpt('sql_connection', - default='sqlite:///$state_path/$sqlite_db', - help='The SQLAlchemy connection string used to connect to the ' - 'database'), - cfg.IntOpt('sql_connection_debug', - default=0, - help='Verbosity of SQL debugging information. 0=None, ' - '100=Everything'), cfg.StrOpt('api_paste_config', default="api-paste.ini", help='File name for the paste.deploy config for cinder-api'), cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), - '../')), + '..', + '..')), help='Directory where the cinder python module is installed'), - cfg.StrOpt('bindir', - default='$pybasedir/bin', - help='Directory where cinder binaries are installed'), cfg.StrOpt('state_path', default='$pybasedir', - help="Top-level directory for maintaining cinder's state"), - ] + help="Top-level directory for maintaining cinder's state"), ] debug_opts = [ ] -FLAGS.register_cli_opts(core_opts) -FLAGS.register_cli_opts(debug_opts) +CONF.register_cli_opts(core_opts) +CONF.register_cli_opts(debug_opts) global_opts = [ cfg.StrOpt('my_ip', @@ -121,55 +88,61 @@ def _get_my_ip(): default=['$glance_host:$glance_port'], help='A list of the glance api servers available to cinder ' '([hostname|ip]:port)'), + cfg.IntOpt('glance_api_version', + default=1, + help='Version of the glance api to use'), cfg.IntOpt('glance_num_retries', - default=0, - help='Number retries when downloading an image from glance'), + default=0, + help='Number retries when downloading an image from glance'), + cfg.BoolOpt('glance_api_insecure', + default=False, + help='Allow to perform insecure SSL (https) requests to ' + 'glance'), + cfg.BoolOpt('glance_api_ssl_compression', + default=False, + help='Whether to attempt to negotiate SSL layer compression ' + 'when using SSL (https) requests. Set to False to ' + 'disable SSL layer compression. In some cases disabling ' + 'this may improve data throughput, eg when high network ' + 'bandwidth is available and you are using already ' + 'compressed image formats such as qcow2 .'), + cfg.IntOpt('glance_request_timeout', + default=None, + help='http/https timeout value for glance operations. If no ' + 'value (None) is supplied here, the glanceclient default ' + 'value is used.'), cfg.StrOpt('scheduler_topic', default='cinder-scheduler', help='the topic scheduler nodes listen on'), cfg.StrOpt('volume_topic', default='cinder-volume', help='the topic volume nodes listen on'), + cfg.StrOpt('backup_topic', + default='cinder-backup', + help='the topic volume backup nodes listen on'), + cfg.BoolOpt('enable_v1_api', + default=True, + help=_("Deploy v1 of the Cinder API.")), + cfg.BoolOpt('enable_v2_api', + default=True, + help=_("Deploy v2 of the Cinder API.")), cfg.BoolOpt('api_rate_limit', default=True, help='whether to rate limit the api'), cfg.ListOpt('osapi_volume_ext_list', default=[], help='Specify list of extensions to load when using osapi_' - 'volume_extension option with cinder.api.openstack.' - 'volume.contrib.select_extensions'), + 'volume_extension option with cinder.api.contrib.' + 'select_extensions'), cfg.MultiStrOpt('osapi_volume_extension', - default=[ - 'cinder.api.openstack.volume.contrib.standard_extensions' - ], + default=['cinder.api.contrib.standard_extensions'], help='osapi volume extension to load'), - cfg.StrOpt('osapi_compute_link_prefix', - default=None, - help='Base URL that will be presented to users in links ' - 'to the OpenStack Compute API'), - cfg.IntOpt('osapi_max_limit', - default=1000, - help='the maximum number of items returned in a single ' - 'response from a collection resource'), - cfg.StrOpt('sqlite_db', - default='cinder.sqlite', - help='the filename to use with sqlite'), - cfg.BoolOpt('sqlite_synchronous', - default=True, - help='If passed, use synchronous mode for sqlite'), - cfg.IntOpt('sql_idle_timeout', - default=3600, - help='timeout before idle sql connections are reaped'), - cfg.IntOpt('sql_max_retries', - default=10, - help='maximum db connection retries during startup. ' - '(setting -1 implies an infinite retry count)'), - cfg.IntOpt('sql_retry_interval', - default=10, - help='interval between retries of opening a sql connection'), cfg.StrOpt('volume_manager', default='cinder.volume.manager.VolumeManager', help='full class name for the Manager for volume'), + cfg.StrOpt('backup_manager', + default='cinder.backup.manager.BackupManager', + help='full class name for the Manager for volume backup'), cfg.StrOpt('scheduler_manager', default='cinder.scheduler.manager.SchedulerManager', help='full class name for the Manager for scheduler'), @@ -181,23 +154,26 @@ def _get_my_ip(): cfg.StrOpt('storage_availability_zone', default='nova', help='availability zone of this node'), - cfg.ListOpt('memcached_servers', - default=None, - help='Memcached servers or None for in process cache.'), + cfg.StrOpt('default_availability_zone', + default=None, + help='default availability zone to use when creating a new volume. ' + 'If this is not set then we use the value from the ' + 'storage_availability_zone option as the default ' + 'availability_zone for new volumes.'), + cfg.StrOpt('default_volume_type', + default=None, + help='default volume type to use'), cfg.StrOpt('volume_usage_audit_period', default='month', help='time period to generate volume usages for. ' 'Time period must be hour, day, month or year'), - cfg.StrOpt('root_helper', - default='sudo', - help='Deprecated: command to use for running commands as root'), cfg.StrOpt('rootwrap_config', - default=None, + default='/etc/cinder/rootwrap.conf', help='Path to the rootwrap configuration file to use for ' 'running commands as root'), cfg.BoolOpt('monkey_patch', default=False, - help='Whether to log monkey patching'), + help='Enable monkey patching'), cfg.ListOpt('monkey_patch_modules', default=[], help='List of modules/decorators to monkey patch'), @@ -205,15 +181,25 @@ def _get_my_ip(): default=60, help='maximum time since last check-in for up service'), cfg.StrOpt('volume_api_class', - default='cinder.volume.api.API', - help='The full class name of the volume API class to use'), + default='cinder.volume.api.API', + help='The full class name of the volume API class to use'), + cfg.StrOpt('backup_api_class', + default='cinder.backup.api.API', + help='The full class name of the volume backup API class'), cfg.StrOpt('auth_strategy', default='noauth', help='The strategy to use for auth. Supports noauth, keystone, ' 'and deprecated.'), - cfg.StrOpt('control_exchange', - default='cinder', - help='AMQP exchange to connect to if using RabbitMQ or Qpid'), -] + cfg.ListOpt('enabled_backends', + default=None, + help='A list of backend names to use. These backend names ' + 'should be backed by a unique [CONFIG] group ' + 'with its options'), + cfg.BoolOpt('no_snapshot_gb_quota', + default=False, + help='Whether snapshots count against GigaByte quota'), + cfg.StrOpt('transfer_api_class', + default='cinder.transfer.api.API', + help='The full class name of the volume transfer API class'), ] -FLAGS.register_opts(global_opts) +CONF.register_opts(global_opts) diff --git a/cinder/common/deprecated.py b/cinder/common/deprecated.py deleted file mode 100644 index b06c6f2c85..0000000000 --- a/cinder/common/deprecated.py +++ /dev/null @@ -1,55 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 IBM -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import warnings - -from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg -from cinder.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -deprecate_opts = [ - cfg.BoolOpt('fatal_deprecations', - default=False, - help='make deprecations fatal') - ] -FLAGS = flags.FLAGS -FLAGS.register_opts(deprecate_opts) - - -def _showwarning(message, category, filename, lineno, file=None, line=None): - """ - Redirect warnings into logging. - """ - LOG.warn(str(message)) - - -# Install our warnings handler -warnings.showwarning = _showwarning - - -def warn(msg=""): - """ - Warn of a deprecated config option that an operator has specified. - This should be added in the code where we've made a change in how - we use some operator changeable parameter to indicate that it will - go away in a future version of OpenStack. - """ - warnings.warn(_("Deprecated Config: %s") % msg) - if FLAGS.fatal_deprecations: - raise exception.DeprecatedConfig(msg=msg) diff --git a/cinder/common/sqlalchemyutils.py b/cinder/common/sqlalchemyutils.py new file mode 100644 index 0000000000..dca0bd5aa3 --- /dev/null +++ b/cinder/common/sqlalchemyutils.py @@ -0,0 +1,126 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010-2011 OpenStack Foundation +# Copyright 2012 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of paginate query.""" + +import sqlalchemy + +from cinder import exception +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +# copied from glance/db/sqlalchemy/api.py +def paginate_query(query, model, limit, sort_keys, marker=None, + sort_dir=None, sort_dirs=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort_key, specified by sort_keys. + (If sort_keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort_key, this would be easy: sort_key > X. + With a compound-values sort_key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + + We also have to cope with different sort_directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sort_keys: array of attributes by which results should be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :param sort_dir: direction in which results should be sorted (asc, desc) + :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys + + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + + if 'id' not in sort_keys: + # TODO(justinsb): If this ever gives a false-positive, check + # the actual primary key, rather than assuming its id + LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) + + assert(not (sort_dir and sort_dirs)) + + # Default the sort direction to ascending + if sort_dirs is None and sort_dir is None: + sort_dir = 'asc' + + # Ensure a per-column sort direction + if sort_dirs is None: + sort_dirs = [sort_dir for _sort_key in sort_keys] + + assert(len(sort_dirs) == len(sort_keys)) + + # Add sorting + for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): + sort_dir_func = { + 'asc': sqlalchemy.asc, + 'desc': sqlalchemy.desc, + }[current_sort_dir] + + try: + sort_key_attr = getattr(model, current_sort_key) + except AttributeError: + raise exception.InvalidInput(reason='Invalid sort key') + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker is not None: + marker_values = [] + for sort_key in sort_keys: + v = getattr(marker, sort_key) + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in xrange(0, len(sort_keys)): + crit_attrs = [] + for j in xrange(0, i): + model_attr = getattr(model, sort_keys[j]) + crit_attrs.append((model_attr == marker_values[j])) + + model_attr = getattr(model, sort_keys[i]) + if sort_dirs[i] == 'desc': + crit_attrs.append((model_attr < marker_values[i])) + elif sort_dirs[i] == 'asc': + crit_attrs.append((model_attr > marker_values[i])) + else: + raise ValueError(_("Unknown sort direction, " + "must be 'desc' or 'asc'")) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit is not None: + query = query.limit(limit) + + return query diff --git a/cinder/compute/__init__.py b/cinder/compute/__init__.py index e69de29bb2..c70d141099 100644 --- a/cinder/compute/__init__.py +++ b/cinder/compute/__init__.py @@ -0,0 +1,34 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import oslo.config.cfg + +import cinder.openstack.common.importutils + +_compute_opts = [ + oslo.config.cfg.StrOpt('compute_api_class', + default='cinder.compute.nova.API', + help='The full class name of the ' + 'compute API class to use'), +] + +oslo.config.cfg.CONF.register_opts(_compute_opts) + + +def API(): + importutils = cinder.openstack.common.importutils + compute_api_class = oslo.config.cfg.CONF.compute_api_class + cls = importutils.import_class(compute_api_class) + return cls() diff --git a/cinder/compute/aggregate_states.py b/cinder/compute/aggregate_states.py index 92e1940277..149f403f75 100644 --- a/cinder/compute/aggregate_states.py +++ b/cinder/compute/aggregate_states.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. +# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/compute/nova.py b/cinder/compute/nova.py new file mode 100644 index 0000000000..6d731e1663 --- /dev/null +++ b/cinder/compute/nova.py @@ -0,0 +1,136 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests to Nova. +""" + + +from novaclient import service_catalog +from novaclient.v1_1 import client as nova_client +from novaclient.v1_1.contrib import assisted_volume_snapshots +from oslo.config import cfg + +from cinder.db import base +from cinder.openstack.common import log as logging + +nova_opts = [ + cfg.StrOpt('nova_catalog_info', + default='compute:nova:publicURL', + help='Info to match when looking for nova in the service ' + 'catalog. Format is : separated values of the form: ' + '::'), + cfg.StrOpt('nova_catalog_admin_info', + default='compute:nova:adminURL', + help='Same as nova_catalog_info, but for admin endpoint.'), + cfg.StrOpt('nova_endpoint_template', + default=None, + help='Override service catalog lookup with template for nova ' + 'endpoint e.g. http://localhost:8774/v2/%(tenant_id)s'), + cfg.StrOpt('nova_endpoint_admin_template', + default=None, + help='Same as nova_endpoint_template, but for admin endpoint.'), + cfg.StrOpt('os_region_name', + default=None, + help='region name of this node'), + cfg.StrOpt('nova_ca_certificates_file', + default=None, + help='Location of ca certificates file to use for nova client ' + 'requests.'), + cfg.BoolOpt('nova_api_insecure', + default=False, + help='Allow to perform insecure SSL requests to nova'), +] + +CONF = cfg.CONF +CONF.register_opts(nova_opts) + +LOG = logging.getLogger(__name__) + + +def novaclient(context, admin=False): + # FIXME: the novaclient ServiceCatalog object is mis-named. + # It actually contains the entire access blob. + # Only needed parts of the service catalog are passed in, see + # nova/context.py. + compat_catalog = { + 'access': {'serviceCatalog': context.service_catalog or []} + } + sc = service_catalog.ServiceCatalog(compat_catalog) + + nova_endpoint_template = CONF.nova_endpoint_template + nova_catalog_info = CONF.nova_catalog_info + + if admin: + nova_endpoint_template = CONF.nova_endpoint_admin_template + nova_catalog_info = CONF.nova_catalog_admin_info + + if nova_endpoint_template: + url = nova_endpoint_template % context.to_dict() + else: + info = nova_catalog_info + service_type, service_name, endpoint_type = info.split(':') + # extract the region if set in configuration + if CONF.os_region_name: + attr = 'region' + filter_value = CONF.os_region_name + else: + attr = None + filter_value = None + url = sc.url_for(attr=attr, + filter_value=filter_value, + service_type=service_type, + service_name=service_name, + endpoint_type=endpoint_type) + + LOG.debug(_('Novaclient connection created using URL: %s') % url) + + extensions = [assisted_volume_snapshots] + + c = nova_client.Client(context.user_id, + context.auth_token, + context.project_id, + auth_url=url, + insecure=CONF.nova_api_insecure, + cacert=CONF.nova_ca_certificates_file, + extensions=extensions) + # noauth extracts user_id:project_id from auth_token + c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id, + context.project_id) + c.client.management_url = url + return c + + +class API(base.Base): + """API for interacting with novaclient.""" + + def update_server_volume(self, context, server_id, attachment_id, + new_volume_id): + novaclient(context).volumes.update_server_volume(server_id, + attachment_id, + new_volume_id) + + def create_volume_snapshot(self, context, volume_id, create_info): + nova = novaclient(context, admin=True) + + nova.assisted_volume_snapshots.create( + volume_id, + create_info=create_info) + + def delete_volume_snapshot(self, context, snapshot_id, delete_info): + nova = novaclient(context, admin=True) + + nova.assisted_volume_snapshots.delete( + snapshot_id, + delete_info=delete_info) diff --git a/cinder/context.py b/cinder/context.py index a257f0a701..c97fdea8e5 100644 --- a/cinder/context.py +++ b/cinder/context.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -20,19 +18,19 @@ """RequestContext: context for requests that persist through all of cinder.""" import copy +import uuid -from cinder.openstack.common import log as logging from cinder.openstack.common import local +from cinder.openstack.common import log as logging from cinder.openstack.common import timeutils from cinder import policy -from cinder import utils LOG = logging.getLogger(__name__) def generate_request_id(): - return 'req-' + str(utils.gen_uuid()) + return 'req-' + str(uuid.uuid4()) class RequestContext(object): @@ -43,10 +41,12 @@ class RequestContext(object): """ def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", - roles=None, remote_address=None, timestamp=None, - request_id=None, auth_token=None, overwrite=True, - quota_class=None, **kwargs): - """ + roles=None, project_name=None, remote_address=None, + timestamp=None, request_id=None, auth_token=None, + overwrite=True, quota_class=None, service_catalog=None, + **kwargs): + """Initialize RequestContext. + :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. @@ -59,11 +59,12 @@ def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", """ if kwargs: LOG.warn(_('Arguments dropped when creating context: %s') % - str(kwargs)) + str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] + self.project_name = project_name self.is_admin = is_admin if self.is_admin is None: self.is_admin = policy.check_is_admin(self.roles) @@ -83,6 +84,15 @@ def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", self.quota_class = quota_class if overwrite or not hasattr(local.store, 'context'): self.update_store() + self.quota_committed = False + + if service_catalog: + # Only include required parts of service_catalog + self.service_catalog = [s for s in service_catalog + if s.get('type') in ('compute',)] + else: + # if list is empty or none + self.service_catalog = [] def _get_read_deleted(self): return self._read_deleted @@ -105,6 +115,7 @@ def update_store(self): def to_dict(self): return {'user_id': self.user_id, 'project_id': self.project_id, + 'project_name': self.project_name, 'is_admin': self.is_admin, 'read_deleted': self.read_deleted, 'roles': self.roles, @@ -112,7 +123,10 @@ def to_dict(self): 'timestamp': timeutils.strtime(self.timestamp), 'request_id': self.request_id, 'auth_token': self.auth_token, - 'quota_class': self.quota_class} + 'quota_class': self.quota_class, + 'service_catalog': self.service_catalog, + 'tenant': self.tenant, + 'user': self.user} @classmethod def from_dict(cls, values): @@ -131,6 +145,22 @@ def elevated(self, read_deleted=None, overwrite=False): return context + def deepcopy(self): + return copy.deepcopy(self) + + # NOTE(sirp): the openstack/common version of RequestContext uses + # tenant/user whereas the Cinder version uses project_id/user_id. We need + # this shim in order to use context-aware code from openstack/common, like + # logging, until we make the switch to using openstack/common's version of + # RequestContext. + @property + def tenant(self): + return self.project_id + + @property + def user(self): + return self.user_id + def get_admin_context(read_deleted="no"): return RequestContext(user_id=None, diff --git a/cinder/db/__init__.py b/cinder/db/__init__.py index f4eb417ec9..e59bc4f3fc 100644 --- a/cinder/db/__init__.py +++ b/cinder/db/__init__.py @@ -1,6 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. diff --git a/cinder/db/api.py b/cinder/db/api.py index 120dad789c..a59d5409cf 100644 --- a/cinder/db/api.py +++ b/cinder/db/api.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -32,24 +30,28 @@ **Related Flags** -:db_backend: string to lookup in the list of LazyPluggable backends. - `sqlalchemy` is the only supported backend right now. +:backend: string to lookup in the list of LazyPluggable backends. + `sqlalchemy` is the only supported backend right now. -:sql_connection: string specifying the sqlalchemy connection to use, like: - `sqlite:///var/lib/cinder/cinder.sqlite`. +:connection: string specifying the sqlalchemy connection to use, like: + `sqlite:///var/lib/cinder/cinder.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ -from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg -from cinder import utils +from oslo.config import cfg + +from cinder.openstack.common.db import api as db_api db_opts = [ + # TODO(rpodolyaka): this option is deprecated but still passed to + # LazyPluggable class which doesn't support retrieving + # of options put into groups. Nova's version of this + # class supports this. Perhaps, we should put it to Oslo + # and then reuse here. cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for db'), @@ -62,18 +64,16 @@ cfg.StrOpt('snapshot_name_template', default='snapshot-%s', help='Template string to be used to generate snapshot names'), - ] - -FLAGS = flags.FLAGS -FLAGS.register_opts(db_opts) + cfg.StrOpt('backup_name_template', + default='backup-%s', + help='Template string to be used to generate backup names'), ] -IMPL = utils.LazyPluggable('db_backend', - sqlalchemy='cinder.db.sqlalchemy.api') +CONF = cfg.CONF +CONF.register_opts(db_opts) +_BACKEND_MAPPING = {'sqlalchemy': 'cinder.db.sqlalchemy.api'} -class NoMoreTargets(exception.CinderException): - """No more available targets""" - pass +IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING) ################### @@ -137,33 +137,6 @@ def service_update(context, service_id, values): return IMPL.service_update(context, service_id, values) -################### -def migration_update(context, id, values): - """Update a migration instance.""" - return IMPL.migration_update(context, id, values) - - -def migration_create(context, values): - """Create a migration record.""" - return IMPL.migration_create(context, values) - - -def migration_get(context, migration_id): - """Finds a migration by the id.""" - return IMPL.migration_get(context, migration_id) - - -def migration_get_by_instance_and_status(context, instance_uuid, status): - """Finds a migration by the instance uuid its migrating.""" - return IMPL.migration_get_by_instance_and_status(context, instance_uuid, - status) - - -def migration_get_all_unconfirmed(context, confirm_window): - """Finds all unconfirmed migrations within the confirmation window.""" - return IMPL.migration_get_all_unconfirmed(context, confirm_window) - - ################### @@ -190,9 +163,10 @@ def volume_allocate_iscsi_target(context, volume_id, host): return IMPL.volume_allocate_iscsi_target(context, volume_id, host) -def volume_attached(context, volume_id, instance_id, mountpoint): +def volume_attached(context, volume_id, instance_id, host_name, mountpoint): """Ensure that a volume is set as attached.""" - return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) + return IMPL.volume_attached(context, volume_id, instance_id, host_name, + mountpoint) def volume_create(context, values): @@ -200,11 +174,20 @@ def volume_create(context, values): return IMPL.volume_create(context, values) -def volume_data_get_for_project(context, project_id, session=None): +def volume_data_get_for_host(context, host): + """Get (volume_count, gigabytes) for project.""" + return IMPL.volume_data_get_for_host(context, + host) + + +def volume_data_get_for_project(context, project_id): """Get (volume_count, gigabytes) for project.""" - return IMPL.volume_data_get_for_project(context, - project_id, - session) + return IMPL.volume_data_get_for_project(context, project_id) + + +def finish_volume_migration(context, src_vol_id, dest_vol_id): + """Perform database updates upon completion of volume migration.""" + return IMPL.finish_volume_migration(context, src_vol_id, dest_vol_id) def volume_destroy(context, volume_id): @@ -222,9 +205,9 @@ def volume_get(context, volume_id): return IMPL.volume_get(context, volume_id) -def volume_get_all(context): +def volume_get_all(context, marker, limit, sort_key, sort_dir): """Get all volumes.""" - return IMPL.volume_get_all(context) + return IMPL.volume_get_all(context, marker, limit, sort_key, sort_dir) def volume_get_all_by_host(context, host): @@ -237,9 +220,11 @@ def volume_get_all_by_instance_uuid(context, instance_uuid): return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid) -def volume_get_all_by_project(context, project_id): +def volume_get_all_by_project(context, project_id, marker, limit, sort_key, + sort_dir): """Get all volumes belonging to a project.""" - return IMPL.volume_get_all_by_project(context, project_id) + return IMPL.volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir) def volume_get_iscsi_target_num(context, volume_id): @@ -298,6 +283,40 @@ def snapshot_update(context, snapshot_id, values): return IMPL.snapshot_update(context, snapshot_id, values) +def snapshot_data_get_for_project(context, project_id, volume_type_id=None): + """Get count and gigabytes used for snapshots for specified project.""" + return IMPL.snapshot_data_get_for_project(context, + project_id, + volume_type_id) + + +def snapshot_get_active_by_window(context, begin, end=None, project_id=None): + """Get all the snapshots inside the window. + + Specifying a project_id will filter for a certain project. + """ + return IMPL.snapshot_get_active_by_window(context, begin, end, project_id) + + +#################### + + +def snapshot_metadata_get(context, snapshot_id): + """Get all metadata for a snapshot.""" + return IMPL.snapshot_metadata_get(context, snapshot_id) + + +def snapshot_metadata_delete(context, snapshot_id, key): + """Delete the given metadata item.""" + IMPL.snapshot_metadata_delete(context, snapshot_id, key) + + +def snapshot_metadata_update(context, snapshot_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + return IMPL.snapshot_metadata_update(context, snapshot_id, + metadata, delete) + + #################### @@ -313,7 +332,25 @@ def volume_metadata_delete(context, volume_id, key): def volume_metadata_update(context, volume_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" - IMPL.volume_metadata_update(context, volume_id, metadata, delete) + return IMPL.volume_metadata_update(context, volume_id, metadata, delete) + + +################## + + +def volume_admin_metadata_get(context, volume_id): + """Get all administration metadata for a volume.""" + return IMPL.volume_admin_metadata_get(context, volume_id) + + +def volume_admin_metadata_delete(context, volume_id, key): + """Delete the given metadata item.""" + IMPL.volume_admin_metadata_delete(context, volume_id, key) + + +def volume_admin_metadata_update(context, volume_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + IMPL.volume_admin_metadata_update(context, volume_id, metadata, delete) ################## @@ -329,9 +366,9 @@ def volume_type_get_all(context, inactive=False): return IMPL.volume_type_get_all(context, inactive) -def volume_type_get(context, id): +def volume_type_get(context, id, inactive=False): """Get volume type by id.""" - return IMPL.volume_type_get(context, id) + return IMPL.volume_type_get(context, id, inactive) def volume_type_get_by_name(context, name): @@ -339,15 +376,44 @@ def volume_type_get_by_name(context, name): return IMPL.volume_type_get_by_name(context, name) -def volume_type_destroy(context, name): +def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): + """Get volume types that are associated with specific qos specs.""" + return IMPL.volume_type_qos_associations_get(context, + qos_specs_id, + inactive) + + +def volume_type_qos_associate(context, type_id, qos_specs_id): + """Associate a volume type with specific qos specs.""" + return IMPL.volume_type_qos_associate(context, type_id, qos_specs_id) + + +def volume_type_qos_disassociate(context, qos_specs_id, type_id): + """Disassociate a volume type from specific qos specs.""" + return IMPL.volume_type_qos_disassociate(context, qos_specs_id, type_id) + + +def volume_type_qos_disassociate_all(context, qos_specs_id): + """Disassociate all volume types from specific qos specs.""" + return IMPL.volume_type_qos_disassociate_all(context, + qos_specs_id) + + +def volume_type_qos_specs_get(context, type_id): + """Get all qos specs for given volume type.""" + return IMPL.volume_type_qos_specs_get(context, type_id) + + +def volume_type_destroy(context, id): """Delete a volume type.""" - return IMPL.volume_type_destroy(context, name) + return IMPL.volume_type_destroy(context, id) def volume_get_active_by_window(context, begin, end=None, project_id=None): """Get all the volumes inside the window. - Specifying a project_id will filter for a certain project.""" + Specifying a project_id will filter for a certain project. + """ return IMPL.volume_get_active_by_window(context, begin, end, project_id) @@ -364,101 +430,178 @@ def volume_type_extra_specs_delete(context, volume_type_id, key): IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) -def volume_type_extra_specs_update_or_create(context, volume_type_id, - extra_specs): +def volume_type_extra_specs_update_or_create(context, + volume_type_id, + extra_specs): """Create or update volume type extra specs. This adds or modifies the - key/value pairs specified in the extra specs dict argument""" - IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, - extra_specs) + key/value pairs specified in the extra specs dict argument + """ + IMPL.volume_type_extra_specs_update_or_create(context, + volume_type_id, + extra_specs) ################### -def sm_backend_conf_create(context, values): - """Create a new SM Backend Config entry.""" - return IMPL.sm_backend_conf_create(context, values) +def volume_type_encryption_get(context, volume_type_id, session=None): + return IMPL.volume_type_encryption_get(context, volume_type_id, session) -def sm_backend_conf_update(context, sm_backend_conf_id, values): - """Update a SM Backend Config entry.""" - return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values) +def volume_type_encryption_delete(context, volume_type_id): + return IMPL.volume_type_encryption_delete(context, volume_type_id) -def sm_backend_conf_delete(context, sm_backend_conf_id): - """Delete a SM Backend Config.""" - return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id) +# TODO(joel-coffman): split into two functions -- update and create +def volume_type_encryption_update_or_create(context, volume_type_id, + encryption_specs): + return IMPL.volume_type_encryption_update_or_create(context, + volume_type_id, + encryption_specs) -def sm_backend_conf_get(context, sm_backend_conf_id): - """Get a specific SM Backend Config.""" - return IMPL.sm_backend_conf_get(context, sm_backend_conf_id) +def volume_type_encryption_volume_get(context, volume_type_id, session=None): + return IMPL.volume_type_encryption_volume_get(context, volume_type_id, + session) -def sm_backend_conf_get_by_sr(context, sr_uuid): - """Get a specific SM Backend Config.""" - return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid) +def volume_encryption_metadata_get(context, volume_id, session=None): + return IMPL.volume_encryption_metadata_get(context, volume_id, session) -def sm_backend_conf_get_all(context): - """Get all SM Backend Configs.""" - return IMPL.sm_backend_conf_get_all(context) +################### -#################### +def qos_specs_create(context, values): + """Create a qos_specs.""" + return IMPL.qos_specs_create(context, values) -def sm_flavor_create(context, values): - """Create a new SM Flavor entry.""" - return IMPL.sm_flavor_create(context, values) +def qos_specs_get(context, qos_specs_id): + """Get all specification for a given qos_specs.""" + return IMPL.qos_specs_get(context, qos_specs_id) -def sm_flavor_update(context, sm_flavor_id, values): - """Update a SM Flavor entry.""" - return IMPL.sm_flavor_update(context, values) +def qos_specs_get_all(context, inactive=False, filters=None): + """Get all qos_specs.""" + return IMPL.qos_specs_get_all(context, inactive, filters) -def sm_flavor_delete(context, sm_flavor_id): - """Delete a SM Flavor.""" - return IMPL.sm_flavor_delete(context, sm_flavor_id) +def qos_specs_get_by_name(context, name): + """Get all specification for a given qos_specs.""" + return IMPL.qos_specs_get_by_name(context, name) -def sm_flavor_get(context, sm_flavor): - """Get a specific SM Flavor.""" - return IMPL.sm_flavor_get(context, sm_flavor) +def qos_specs_associations_get(context, qos_specs_id): + """Get all associated volume types for a given qos_specs.""" + return IMPL.qos_specs_associations_get(context, qos_specs_id) -def sm_flavor_get_all(context): - """Get all SM Flavors.""" - return IMPL.sm_flavor_get_all(context) +def qos_specs_associate(context, qos_specs_id, type_id): + """Associate qos_specs from volume type.""" + return IMPL.qos_specs_associate(context, qos_specs_id, type_id) -#################### +def qos_specs_disassociate(context, qos_specs_id, type_id): + """Disassociate qos_specs from volume type.""" + return IMPL.qos_specs_disassociate(context, qos_specs_id, type_id) -def sm_volume_create(context, values): - """Create a new child Zone entry.""" - return IMPL.sm_volume_create(context, values) +def qos_specs_disassociate_all(context, qos_specs_id): + """Disassociate qos_specs from all entities.""" + return IMPL.qos_specs_disassociate_all(context, qos_specs_id) -def sm_volume_update(context, volume_id, values): - """Update a child Zone entry.""" - return IMPL.sm_volume_update(context, values) +def qos_specs_delete(context, qos_specs_id): + """Delete the qos_specs.""" + IMPL.qos_specs_delete(context, qos_specs_id) -def sm_volume_delete(context, volume_id): - """Delete a child Zone.""" - return IMPL.sm_volume_delete(context, volume_id) +def qos_specs_item_delete(context, qos_specs_id, key): + """Delete specified key in the qos_specs.""" + IMPL.qos_specs_item_delete(context, qos_specs_id, key) -def sm_volume_get(context, volume_id): - """Get a specific child Zone.""" - return IMPL.sm_volume_get(context, volume_id) +def qos_specs_update(context, qos_specs_id, specs): + """Update qos specs. + This adds or modifies the key/value pairs specified in the + specs dict argument for a given qos_specs. + """ + IMPL.qos_specs_update(context, qos_specs_id, specs) + + +################### + + +def volume_glance_metadata_create(context, volume_id, key, value): + """Update the Glance metadata for the specified volume.""" + return IMPL.volume_glance_metadata_create(context, + volume_id, + key, + value) + + +def volume_glance_metadata_get_all(context): + """Return the glance metadata for all volumes.""" + return IMPL.volume_glance_metadata_get_all(context) + + +def volume_glance_metadata_get(context, volume_id): + """Return the glance metadata for a volume.""" + return IMPL.volume_glance_metadata_get(context, volume_id) + + +def volume_snapshot_glance_metadata_get(context, snapshot_id): + """Return the Glance metadata for the specified snapshot.""" + return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id) + + +def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): + """Update the Glance metadata for a snapshot. + + This will copy all of the key:value pairs from the originating volume, + to ensure that a volume created from the snapshot will retain the + original metadata. + """ + return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id, + volume_id) + + +def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): + """Update the Glance metadata from a volume (created from a snapshot). + + This will copy all of the key:value pairs from the originating snapshot, + to ensure that the Glance metadata from the original volume is retained. + """ + return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id, + snapshot_id) + + +def volume_glance_metadata_delete_by_volume(context, volume_id): + """Delete the glance metadata for a volume.""" + return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id) + + +def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): + """Delete the glance metadata for a snapshot.""" + return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) + + +def volume_glance_metadata_copy_from_volume_to_volume(context, + src_volume_id, + volume_id): + """Update the Glance metadata for a volume by copying all of the key:value + pairs from the originating volume. + + This is so that a volume created from the volume (clone) will retain the + original metadata. + """ + return IMPL.volume_glance_metadata_copy_from_volume_to_volume( + context, + src_volume_id, + volume_id) -def sm_volume_get_all(context): - """Get all child Zones.""" - return IMPL.sm_volume_get_all(context) ################### @@ -501,6 +644,11 @@ def quota_class_get(context, class_name, resource): return IMPL.quota_class_get(context, class_name, resource) +def quota_class_get_default(context): + """Retrieve all default quotas.""" + return IMPL.quota_class_get_default(context) + + def quota_class_get_all_by_name(context, class_name): """Retrieve all quotas associated with a given quota class.""" return IMPL.quota_class_get_all_by_name(context, class_name) @@ -524,13 +672,6 @@ def quota_class_destroy_all_by_name(context, class_name): ################### -def quota_usage_create(context, project_id, resource, in_use, reserved, - until_refresh): - """Create a quota usage for the given project and resource.""" - return IMPL.quota_usage_create(context, project_id, resource, - in_use, reserved, until_refresh) - - def quota_usage_get(context, project_id, resource): """Retrieve a quota usage or raise if it does not exist.""" return IMPL.quota_usage_get(context, project_id, resource) @@ -541,18 +682,6 @@ def quota_usage_get_all_by_project(context, project_id): return IMPL.quota_usage_get_all_by_project(context, project_id) -def quota_usage_update(context, class_name, resource, in_use, reserved, - until_refresh): - """Update a quota usage or raise if it does not exist.""" - return IMPL.quota_usage_update(context, project_id, resource, - in_use, reserved, until_refresh) - - -def quota_usage_destroy(context, project_id, resource): - """Destroy the quota usage or raise if it does not exist.""" - return IMPL.quota_usage_destroy(context, project_id, resource) - - ################### @@ -582,27 +711,103 @@ def reservation_destroy(context, uuid): def quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age): + until_refresh, max_age, project_id=None): """Check quotas and create appropriate reservations.""" return IMPL.quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age) + until_refresh, max_age, project_id=project_id) -def reservation_commit(context, reservations): +def reservation_commit(context, reservations, project_id=None): """Commit quota reservations.""" - return IMPL.reservation_commit(context, reservations) + return IMPL.reservation_commit(context, reservations, + project_id=project_id) -def reservation_rollback(context, reservations): +def reservation_rollback(context, reservations, project_id=None): """Roll back quota reservations.""" - return IMPL.reservation_rollback(context, reservations) + return IMPL.reservation_rollback(context, reservations, + project_id=project_id) def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" - return IMPL.quota_get_all_by_project(context, project_id) + return IMPL.quota_destroy_all_by_project(context, project_id) def reservation_expire(context): """Roll back any expired reservations.""" return IMPL.reservation_expire(context) + + +################### + + +def backup_get(context, backup_id): + """Get a backup or raise if it does not exist.""" + return IMPL.backup_get(context, backup_id) + + +def backup_get_all(context): + """Get all backups.""" + return IMPL.backup_get_all(context) + + +def backup_get_all_by_host(context, host): + """Get all backups belonging to a host.""" + return IMPL.backup_get_all_by_host(context, host) + + +def backup_create(context, values): + """Create a backup from the values dictionary.""" + return IMPL.backup_create(context, values) + + +def backup_get_all_by_project(context, project_id): + """Get all backups belonging to a project.""" + return IMPL.backup_get_all_by_project(context, project_id) + + +def backup_update(context, backup_id, values): + """Set the given properties on a backup and update it. + + Raises NotFound if backup does not exist. + """ + return IMPL.backup_update(context, backup_id, values) + + +def backup_destroy(context, backup_id): + """Destroy the backup or raise if it does not exist.""" + return IMPL.backup_destroy(context, backup_id) + + +################### + + +def transfer_get(context, transfer_id): + """Get a volume transfer record or raise if it does not exist.""" + return IMPL.transfer_get(context, transfer_id) + + +def transfer_get_all(context): + """Get all volume transfer records.""" + return IMPL.transfer_get_all(context) + + +def transfer_get_all_by_project(context, project_id): + """Get all volume transfer records for specified project.""" + return IMPL.transfer_get_all_by_project(context, project_id) + + +def transfer_create(context, values): + """Create an entry in the transfers table.""" + return IMPL.transfer_create(context, values) + + +def transfer_destroy(context, transfer_id): + """Destroy a record in the volume transfer table.""" + return IMPL.transfer_destroy(context, transfer_id) + + +def transfer_accept(context, transfer_id, user_id, project_id): + """Accept a volume transfer.""" + return IMPL.transfer_accept(context, transfer_id, user_id, project_id) diff --git a/cinder/db/base.py b/cinder/db/base.py index 8b9d437c6d..21a862f564 100644 --- a/cinder/db/base.py +++ b/cinder/db/base.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -18,8 +16,9 @@ """Base class for classes that need modular database access.""" -from cinder import flags -from cinder.openstack.common import cfg + +from oslo.config import cfg + from cinder.openstack.common import importutils @@ -27,8 +26,8 @@ default='cinder.db', help='driver to use for database access') -FLAGS = flags.FLAGS -FLAGS.register_opt(db_driver_opt) +CONF = cfg.CONF +CONF.register_opt(db_driver_opt) class Base(object): @@ -36,5 +35,5 @@ class Base(object): def __init__(self, db_driver=None): if not db_driver: - db_driver = FLAGS.db_driver + db_driver = CONF.db_driver self.db = importutils.import_module(db_driver) # pylint: disable=C0103 diff --git a/cinder/db/migration.py b/cinder/db/migration.py index 6079055e50..041aeb7eb9 100644 --- a/cinder/db/migration.py +++ b/cinder/db/migration.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -25,9 +23,6 @@ sqlalchemy='cinder.db.sqlalchemy.migration') -INIT_VERSION = 000 - - def db_sync(version=None): """Migrate the database to `version` or the most recent version.""" return IMPL.db_sync(version=version) @@ -36,3 +31,8 @@ def db_sync(version=None): def db_version(): """Display the current database version.""" return IMPL.db_version() + + +def db_initial_version(): + """The starting version for the database.""" + return IMPL.db_initial_version() diff --git a/cinder/db/sqlalchemy/__init__.py b/cinder/db/sqlalchemy/__init__.py index 747015af53..e69de29bb2 100644 --- a/cinder/db/sqlalchemy/__init__.py +++ b/cinder/db/sqlalchemy/__init__.py @@ -1,17 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index be29b30bdb..35f03fc2a5 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -19,32 +17,45 @@ """Implementation of SQLAlchemy backend.""" -import datetime -import functools + +import sys +import uuid import warnings -from cinder import db -from cinder import exception -from cinder import flags -from cinder import utils -from cinder.openstack.common import log as logging -from cinder.db.sqlalchemy import models -from cinder.db.sqlalchemy.session import get_session -from cinder.openstack.common import timeutils +from oslo.config import cfg from sqlalchemy.exc import IntegrityError from sqlalchemy import or_ -from sqlalchemy.orm import joinedload -from sqlalchemy.orm import joinedload_all -from sqlalchemy.sql.expression import asc -from sqlalchemy.sql.expression import desc +from sqlalchemy.orm import joinedload, joinedload_all from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql import func -from sqlalchemy.sql.expression import literal_column -FLAGS = flags.FLAGS +from cinder.common import sqlalchemyutils +from cinder.db.sqlalchemy import models +from cinder import exception +from cinder.openstack.common.db import exception as db_exc +from cinder.openstack.common.db.sqlalchemy import session as db_session +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder.openstack.common import uuidutils + +CONF = cfg.CONF LOG = logging.getLogger(__name__) +db_session.set_defaults(sql_connection='sqlite:///$state_path/$sqlite_db', + sqlite_db='cinder.sqlite') + +get_engine = db_session.get_engine +get_session = db_session.get_session + +_DEFAULT_QUOTA_NAME = 'default' + + +def get_backend(): + """The backend is this module itself.""" + + return sys.modules[__name__] + def is_admin_context(context): """Indicates if the request context is an administrator.""" @@ -133,12 +144,26 @@ def require_volume_exists(f): """ def wrapper(context, volume_id, *args, **kwargs): - db.volume_get(context, volume_id) + volume_get(context, volume_id) return f(context, volume_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper +def require_snapshot_exists(f): + """Decorator to require the specified snapshot to exist. + + Requires the wrapped function to use context and snapshot_id as + their first two arguments. + """ + + def wrapper(context, snapshot_id, *args, **kwargs): + snapshot_get(context, snapshot_id) + return f(context, snapshot_id, *args, **kwargs) + wrapper.__name__ = f.__name__ + return wrapper + + def model_query(context, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. @@ -162,7 +187,7 @@ def model_query(context, *args, **kwargs): query = query.filter_by(deleted=True) else: raise Exception( - _("Unrecognized read_deleted value '%s'") % read_deleted) + _("Unrecognized read_deleted value '%s'") % read_deleted) if project_only and is_user_context(context): query = query.filter_by(project_id=context.project_id) @@ -170,46 +195,45 @@ def model_query(context, *args, **kwargs): return query -def exact_filter(query, model, filters, legal_keys): - """Applies exact match filtering to a query. - - Returns the updated query. Modifies filters argument to remove - filters consumed. - - :param query: query to apply filters to - :param model: model object the query applies to, for IN-style - filtering - :param filters: dictionary of filters; values that are lists, - tuples, sets, or frozensets cause an 'IN' test to - be performed, while exact matching ('==' operator) - is used for other values - :param legal_keys: list of keys to apply exact filtering to - """ +def _sync_volumes(context, project_id, session, volume_type_id=None, + volume_type_name=None): + (volumes, gigs) = _volume_data_get_for_project( + context, project_id, volume_type_id=volume_type_id, session=session) + key = 'volumes' + if volume_type_name: + key += '_' + volume_type_name + return {key: volumes} - filter_dict = {} - # Walk through all the keys - for key in legal_keys: - # Skip ones we're not filtering on - if key not in filters: - continue +def _sync_snapshots(context, project_id, session, volume_type_id=None, + volume_type_name=None): + (snapshots, gigs) = _snapshot_data_get_for_project( + context, project_id, volume_type_id=volume_type_id, session=session) + key = 'snapshots' + if volume_type_name: + key += '_' + volume_type_name + return {key: snapshots} - # OK, filtering on this key; what value do we search for? - value = filters.pop(key) - if isinstance(value, (list, tuple, set, frozenset)): - # Looking for values in a list; apply to query directly - column_attr = getattr(model, key) - query = query.filter(column_attr.in_(value)) - else: - # OK, simple exact match; save for later - filter_dict[key] = value +def _sync_gigabytes(context, project_id, session, volume_type_id=None, + volume_type_name=None): + (_junk, vol_gigs) = _volume_data_get_for_project( + context, project_id, volume_type_id=volume_type_id, session=session) + key = 'gigabytes' + if volume_type_name: + key += '_' + volume_type_name + if CONF.no_snapshot_gb_quota: + return {key: vol_gigs} + (_junk, snap_gigs) = _snapshot_data_get_for_project( + context, project_id, volume_type_id=volume_type_id, session=session) + return {key: vol_gigs + snap_gigs} - # Apply simple exact matches - if filter_dict: - query = query.filter_by(**filter_dict) - return query +QUOTA_SYNC_FUNCTIONS = { + '_sync_volumes': _sync_volumes, + '_sync_snapshots': _sync_snapshots, + '_sync_gigabytes': _sync_gigabytes, +} ################### @@ -219,21 +243,29 @@ def exact_filter(query, model, filters, legal_keys): def service_destroy(context, service_id): session = get_session() with session.begin(): - service_ref = service_get(context, service_id, session=session) + service_ref = _service_get(context, service_id, session=session) service_ref.delete(session=session) @require_admin_context -def service_get(context, service_id, session=None): - result = model_query(context, models.Service, session=session).\ - filter_by(id=service_id).\ - first() +def _service_get(context, service_id, session=None): + result = model_query( + context, + models.Service, + session=session).\ + filter_by(id=service_id).\ + first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result +@require_admin_context +def service_get(context, service_id): + return _service_get(context, service_id) + + @require_admin_context def service_get_all(context, disabled=None): query = model_query(context, models.Service) @@ -246,26 +278,32 @@ def service_get_all(context, disabled=None): @require_admin_context def service_get_all_by_topic(context, topic): - return model_query(context, models.Service, read_deleted="no").\ - filter_by(disabled=False).\ - filter_by(topic=topic).\ - all() + return model_query( + context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(topic=topic).\ + all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): - return model_query(context, models.Service, read_deleted="no").\ - filter_by(disabled=False).\ - filter_by(host=host).\ - filter_by(topic=topic).\ - first() + result = model_query( + context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(host=host).\ + filter_by(topic=topic).\ + first() + if not result: + raise exception.ServiceNotFound(service_id=None) + return result @require_admin_context def service_get_all_by_host(context, host): - return model_query(context, models.Service, read_deleted="no").\ - filter_by(host=host).\ - all() + return model_query( + context, models.Service, read_deleted="no").\ + filter_by(host=host).\ + all() @require_admin_context @@ -274,24 +312,24 @@ def _service_get_all_topic_subquery(context, session, topic, subq, label): return model_query(context, models.Service, func.coalesce(sort_value, 0), session=session, read_deleted="no").\ - filter_by(topic=topic).\ - filter_by(disabled=False).\ - outerjoin((subq, models.Service.host == subq.c.host)).\ - order_by(sort_value).\ - all() + filter_by(topic=topic).\ + filter_by(disabled=False).\ + outerjoin((subq, models.Service.host == subq.c.host)).\ + order_by(sort_value).\ + all() @require_admin_context def service_get_all_volume_sorted(context): session = get_session() with session.begin(): - topic = FLAGS.volume_topic + topic = CONF.volume_topic label = 'volume_gigabytes' subq = model_query(context, models.Volume.host, func.sum(models.Volume.size).label(label), session=session, read_deleted="no").\ - group_by(models.Volume.host).\ - subquery() + group_by(models.Volume.host).\ + subquery() return _service_get_all_topic_subquery(context, session, topic, @@ -302,9 +340,9 @@ def service_get_all_volume_sorted(context): @require_admin_context def service_get_by_args(context, host, binary): result = model_query(context, models.Service).\ - filter_by(host=host).\ - filter_by(binary=binary).\ - first() + filter_by(host=host).\ + filter_by(binary=binary).\ + first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) @@ -316,7 +354,7 @@ def service_get_by_args(context, host, binary): def service_create(context, values): service_ref = models.Service() service_ref.update(values) - if not FLAGS.enable_new_services: + if not CONF.enable_new_services: service_ref.disabled = True service_ref.save() return service_ref @@ -326,7 +364,7 @@ def service_create(context, values): def service_update(context, service_id, values): session = get_session() with session.begin(): - service_ref = service_get(context, service_id, session=session) + service_ref = _service_get(context, service_id, session=session) service_ref.update(values) service_ref.save(session=session) @@ -346,16 +384,15 @@ def _metadata_refs(metadata_dict, meta_class): def _dict_with_extra_specs(inst_type_query): - """Takes an instance, volume, or instance type query returned - by sqlalchemy and returns it as a dictionary, converting the - extra_specs entry from a list of dicts: + """Convert type query result to dict with extra_spec and rate_limit. - 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] + Takes a volume type query returned by sqlalchemy and returns it + as a dictionary, converting the extra_specs entry from a list + of dicts: + 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: - 'extra_specs' : {'k1': 'v1'} - """ inst_type_dict = dict(inst_type_query) extra_specs = dict([(x['key'], x['value']) @@ -370,8 +407,8 @@ def _dict_with_extra_specs(inst_type_query): @require_admin_context def iscsi_target_count_by_host(context, host): return model_query(context, models.IscsiTarget).\ - filter_by(host=host).\ - count() + filter_by(host=host).\ + count() @require_admin_context @@ -391,12 +428,12 @@ def iscsi_target_create_safe(context, values): @require_context -def quota_get(context, project_id, resource, session=None): +def _quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, read_deleted="no").\ - filter_by(project_id=project_id).\ - filter_by(resource=resource).\ - first() + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) @@ -404,13 +441,18 @@ def quota_get(context, project_id, resource, session=None): return result +@require_context +def quota_get(context, project_id, resource): + return _quota_get(context, project_id, resource) + + @require_context def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.Quota, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() + filter_by(project_id=project_id).\ + all() result = {'project_id': project_id} for row in rows: @@ -433,7 +475,7 @@ def quota_create(context, project_id, resource, limit): def quota_update(context, project_id, resource, limit): session = get_session() with session.begin(): - quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref = _quota_get(context, project_id, resource, session=session) quota_ref.hard_limit = limit quota_ref.save(session=session) @@ -442,7 +484,7 @@ def quota_update(context, project_id, resource, limit): def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): - quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref = _quota_get(context, project_id, resource, session=session) quota_ref.delete(session=session) @@ -450,12 +492,12 @@ def quota_destroy(context, project_id, resource): @require_context -def quota_class_get(context, class_name, resource, session=None): +def _quota_class_get(context, class_name, resource, session=None): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ - filter_by(class_name=class_name).\ - filter_by(resource=resource).\ - first() + filter_by(class_name=class_name).\ + filter_by(resource=resource).\ + first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) @@ -463,13 +505,30 @@ def quota_class_get(context, class_name, resource, session=None): return result +@require_context +def quota_class_get(context, class_name, resource): + return _quota_class_get(context, class_name, resource) + + +def quota_class_get_default(context): + rows = model_query(context, models.QuotaClass, + read_deleted="no").\ + filter_by(class_name=_DEFAULT_QUOTA_NAME).all() + + result = {'class_name': _DEFAULT_QUOTA_NAME} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + @require_context def quota_class_get_all_by_name(context, class_name): authorize_quota_class_context(context, class_name) rows = model_query(context, models.QuotaClass, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() + filter_by(class_name=class_name).\ + all() result = {'class_name': class_name} for row in rows: @@ -492,8 +551,8 @@ def quota_class_create(context, class_name, resource, limit): def quota_class_update(context, class_name, resource, limit): session = get_session() with session.begin(): - quota_class_ref = quota_class_get(context, class_name, resource, - session=session) + quota_class_ref = _quota_class_get(context, class_name, resource, + session=session) quota_class_ref.hard_limit = limit quota_class_ref.save(session=session) @@ -502,8 +561,8 @@ def quota_class_update(context, class_name, resource, limit): def quota_class_destroy(context, class_name, resource): session = get_session() with session.begin(): - quota_class_ref = quota_class_get(context, class_name, resource, - session=session) + quota_class_ref = _quota_class_get(context, class_name, resource, + session=session) quota_class_ref.delete(session=session) @@ -513,8 +572,8 @@ def quota_class_destroy_all_by_name(context, class_name): with session.begin(): quota_classes = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() + filter_by(class_name=class_name).\ + all() for quota_class_ref in quota_classes: quota_class_ref.delete(session=session) @@ -524,12 +583,11 @@ def quota_class_destroy_all_by_name(context, class_name): @require_context -def quota_usage_get(context, project_id, resource, session=None): - result = model_query(context, models.QuotaUsage, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - filter_by(resource=resource).\ - first() +def quota_usage_get(context, project_id, resource): + result = model_query(context, models.QuotaUsage, read_deleted="no").\ + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() if not result: raise exception.QuotaUsageNotFound(project_id=project_id) @@ -542,8 +600,8 @@ def quota_usage_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.QuotaUsage, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() + filter_by(project_id=project_id).\ + all() result = {'project_id': project_id} for row in rows: @@ -553,8 +611,9 @@ def quota_usage_get_all_by_project(context, project_id): @require_admin_context -def quota_usage_create(context, project_id, resource, in_use, reserved, - until_refresh, session=None): +def _quota_usage_create(context, project_id, resource, in_use, reserved, + until_refresh, session=None): + quota_usage_ref = models.QuotaUsage() quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource @@ -566,44 +625,14 @@ def quota_usage_create(context, project_id, resource, in_use, reserved, return quota_usage_ref -@require_admin_context -def quota_usage_update(context, project_id, resource, in_use, reserved, - until_refresh, session=None): - def do_update(session): - quota_usage_ref = quota_usage_get(context, project_id, resource, - session=session) - quota_usage_ref.in_use = in_use - quota_usage_ref.reserved = reserved - quota_usage_ref.until_refresh = until_refresh - quota_usage_ref.save(session=session) - - if session: - # Assume caller started a transaction - do_update(session) - else: - session = get_session() - with session.begin(): - do_update(session) - - -@require_admin_context -def quota_usage_destroy(context, project_id, resource): - session = get_session() - with session.begin(): - quota_usage_ref = quota_usage_get(context, project_id, resource, - session=session) - quota_usage_ref.delete(session=session) - - ################### @require_context -def reservation_get(context, uuid, session=None): +def _reservation_get(context, uuid, session=None): result = model_query(context, models.Reservation, session=session, read_deleted="no").\ - filter_by(uuid=uuid).\ - first() + filter_by(uuid=uuid).first() if not result: raise exception.ReservationNotFound(uuid=uuid) @@ -611,13 +640,17 @@ def reservation_get(context, uuid, session=None): return result +@require_context +def reservation_get(context, uuid): + return _reservation_get(context, uuid) + + @require_context def reservation_get_all_by_project(context, project_id): authorize_project_context(context, project_id) - rows = model_query(context, models.QuotaUsage, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() + rows = model_query(context, models.Reservation, read_deleted="no").\ + filter_by(project_id=project_id).all() result = {'project_id': project_id} for row in rows: @@ -628,8 +661,8 @@ def reservation_get_all_by_project(context, project_id): @require_admin_context -def reservation_create(context, uuid, usage, project_id, resource, delta, - expire, session=None): +def _reservation_create(context, uuid, usage, project_id, resource, delta, + expire, session=None): reservation_ref = models.Reservation() reservation_ref.uuid = uuid reservation_ref.usage_id = usage['id'] @@ -641,11 +674,18 @@ def reservation_create(context, uuid, usage, project_id, resource, delta, return reservation_ref +@require_admin_context +def reservation_create(context, uuid, usage, project_id, resource, delta, + expire): + return _reservation_create(context, uuid, usage, project_id, resource, + delta, expire) + + @require_admin_context def reservation_destroy(context, uuid): session = get_session() with session.begin(): - reservation_ref = reservation_get(context, uuid, session=session) + reservation_ref = _reservation_get(context, uuid, session=session) reservation_ref.delete(session=session) @@ -657,25 +697,28 @@ def reservation_destroy(context, uuid): # code always acquires the lock on quota_usages before acquiring the lock # on reservations. -def _get_quota_usages(context, session): +def _get_quota_usages(context, session, project_id): # Broken out for testability rows = model_query(context, models.QuotaUsage, read_deleted="no", session=session).\ - filter_by(project_id=context.project_id).\ - with_lockmode('update').\ - all() + filter_by(project_id=project_id).\ + with_lockmode('update').\ + all() return dict((row.resource, row) for row in rows) @require_context def quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age): + until_refresh, max_age, project_id=None): elevated = context.elevated() session = get_session() with session.begin(): + if project_id is None: + project_id = context.project_id + # Get the current usages - usages = _get_quota_usages(context, session) + usages = _get_quota_usages(context, session, project_id) # Handle usage refresh work = set(deltas.keys()) @@ -685,12 +728,12 @@ def quota_reserve(context, resources, quotas, deltas, expire, # Do we need to refresh the usage? refresh = False if resource not in usages: - usages[resource] = quota_usage_create(elevated, - context.project_id, - resource, - 0, 0, - until_refresh or None, - session=session) + usages[resource] = _quota_usage_create(elevated, + project_id, + resource, + 0, 0, + until_refresh or None, + session=session) refresh = True elif usages[resource].in_use < 0: # Negative in_use count indicates a desync, so try to @@ -700,25 +743,34 @@ def quota_reserve(context, resources, quotas, deltas, expire, usages[resource].until_refresh -= 1 if usages[resource].until_refresh <= 0: refresh = True - elif max_age and (usages[resource].updated_at - - timeutils.utcnow()).seconds >= max_age: + elif max_age and usages[resource].updated_at is not None and ( + (usages[resource].updated_at - + timeutils.utcnow()).seconds >= max_age): refresh = True # OK, refresh the usage if refresh: # Grab the sync routine - sync = resources[resource].sync - - updates = sync(elevated, context.project_id, session) + sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] + volume_type_id = getattr(resources[resource], + 'volume_type_id', None) + volume_type_name = getattr(resources[resource], + 'volume_type_name', None) + updates = sync(elevated, project_id, + volume_type_id=volume_type_id, + volume_type_name=volume_type_name, + session=session) for res, in_use in updates.items(): # Make sure we have a destination for the usage! if res not in usages: - usages[res] = quota_usage_create(elevated, - context.project_id, - res, - 0, 0, - until_refresh or None, - session=session) + usages[res] = _quota_usage_create( + elevated, + project_id, + res, + 0, 0, + until_refresh or None, + session=session + ) # Update the usage usages[res].in_use = in_use @@ -737,18 +789,17 @@ def quota_reserve(context, resources, quotas, deltas, expire, # a best-effort mechanism. # Check for deltas that would go negative - unders = [resource for resource, delta in deltas.items() - if delta < 0 and - delta + usages[resource].in_use < 0] + unders = [r for r, delta in deltas.items() + if delta < 0 and delta + usages[r].in_use < 0] # Now, let's check the quotas # NOTE(Vek): We're only concerned about positive increments. # If a project has gone over quota, we want them to # be able to reduce their usage without any # problems. - overs = [resource for resource, delta in deltas.items() - if quotas[resource] >= 0 and delta >= 0 and - quotas[resource] < delta + usages[resource].total] + overs = [r for r, delta in deltas.items() + if quotas[r] >= 0 and delta >= 0 and + quotas[r] < delta + usages[r].total] # NOTE(Vek): The quota check needs to be in the transaction, # but the transaction doesn't fail just because @@ -761,12 +812,12 @@ def quota_reserve(context, resources, quotas, deltas, expire, if not overs: reservations = [] for resource, delta in deltas.items(): - reservation = reservation_create(elevated, - str(utils.gen_uuid()), - usages[resource], - context.project_id, - resource, delta, expire, - session=session) + reservation = _reservation_create(elevated, + str(uuid.uuid4()), + usages[resource], + project_id, + resource, delta, expire, + session=session) reservations.append(reservation.uuid) # Also update the reserved quantity @@ -790,7 +841,7 @@ def quota_reserve(context, resources, quotas, deltas, expire, if unders: LOG.warning(_("Change will make usage less than 0 for the following " - "resources: %(unders)s") % locals()) + "resources: %s") % unders) if overs: usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved'])) for k, v in usages.items()) @@ -807,16 +858,16 @@ def _quota_reservations(session, context, reservations): return model_query(context, models.Reservation, read_deleted="no", session=session).\ - filter(models.Reservation.uuid.in_(reservations)).\ - with_lockmode('update').\ - all() + filter(models.Reservation.uuid.in_(reservations)).\ + with_lockmode('update').\ + all() @require_context -def reservation_commit(context, reservations): +def reservation_commit(context, reservations, project_id=None): session = get_session() with session.begin(): - usages = _get_quota_usages(context, session) + usages = _get_quota_usages(context, session, project_id) for reservation in _quota_reservations(session, context, reservations): usage = usages[reservation.resource] @@ -831,10 +882,10 @@ def reservation_commit(context, reservations): @require_context -def reservation_rollback(context, reservations): +def reservation_rollback(context, reservations, project_id=None): session = get_session() with session.begin(): - usages = _get_quota_usages(context, session) + usages = _get_quota_usages(context, session, project_id) for reservation in _quota_reservations(session, context, reservations): usage = usages[reservation.resource] @@ -853,24 +904,24 @@ def quota_destroy_all_by_project(context, project_id): with session.begin(): quotas = model_query(context, models.Quota, session=session, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() + filter_by(project_id=project_id).\ + all() for quota_ref in quotas: quota_ref.delete(session=session) quota_usages = model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() + filter_by(project_id=project_id).\ + all() for quota_usage_ref in quota_usages: quota_usage_ref.delete(session=session) reservations = model_query(context, models.Reservation, session=session, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() + filter_by(project_id=project_id).\ + all() for reservation_ref in reservations: reservation_ref.delete(session=session) @@ -883,8 +934,8 @@ def reservation_expire(context): current_time = timeutils.utcnow() results = model_query(context, models.Reservation, session=session, read_deleted="no").\ - filter(models.Reservation.expire < current_time).\ - all() + filter(models.Reservation.expire < current_time).\ + all() if results: for reservation in results: @@ -904,15 +955,15 @@ def volume_allocate_iscsi_target(context, volume_id, host): with session.begin(): iscsi_target_ref = model_query(context, models.IscsiTarget, session=session, read_deleted="no").\ - filter_by(volume=None).\ - filter_by(host=host).\ - with_lockmode('update').\ - first() + filter_by(volume=None).\ + filter_by(host=host).\ + with_lockmode('update').\ + first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not iscsi_target_ref: - raise db.NoMoreTargets() + raise exception.NoMoreTargets() iscsi_target_ref.volume_id = volume_id session.add(iscsi_target_ref) @@ -921,94 +972,174 @@ def volume_allocate_iscsi_target(context, volume_id, host): @require_admin_context -def volume_attached(context, volume_id, instance_uuid, mountpoint): - if not utils.is_uuid_like(instance_uuid): - raise exception.InvalidUUID(instance_uuid) +def volume_attached(context, volume_id, instance_uuid, host_name, mountpoint): + if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): + raise exception.InvalidUUID(uuid=instance_uuid) session = get_session() with session.begin(): - volume_ref = volume_get(context, volume_id, session=session) + volume_ref = _volume_get(context, volume_id, session=session) volume_ref['status'] = 'in-use' volume_ref['mountpoint'] = mountpoint volume_ref['attach_status'] = 'attached' volume_ref['instance_uuid'] = instance_uuid + volume_ref['attached_host'] = host_name volume_ref.save(session=session) + return volume_ref @require_context def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) + if is_admin_context(context): + values['volume_admin_metadata'] = \ + _metadata_refs(values.get('admin_metadata'), + models.VolumeAdminMetadata) + elif values.get('volume_admin_metadata'): + del values['volume_admin_metadata'] + volume_ref = models.Volume() if not values.get('id'): - values['id'] = str(utils.gen_uuid()) + values['id'] = str(uuid.uuid4()) volume_ref.update(values) session = get_session() with session.begin(): volume_ref.save(session=session) - return volume_get(context, values['id'], session=session) + return _volume_get(context, values['id'], session=session) @require_admin_context -def volume_data_get_for_project(context, project_id, session=None): +def volume_data_get_for_host(context, host): result = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), - read_deleted="no", - session=session).\ - filter_by(project_id=project_id).\ - first() + read_deleted="no").\ + filter_by(host=host).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_admin_context +def _volume_data_get_for_project(context, project_id, volume_type_id=None, + session=None): + query = model_query(context, + func.count(models.Volume.id), + func.sum(models.Volume.size), + read_deleted="no", + session=session).\ + filter_by(project_id=project_id) + + if volume_type_id: + query = query.filter_by(volume_type_id=volume_type_id) + + result = query.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) +@require_admin_context +def volume_data_get_for_project(context, project_id, volume_type_id=None): + return _volume_data_get_for_project(context, project_id, volume_type_id) + + +@require_admin_context +def finish_volume_migration(context, src_vol_id, dest_vol_id): + """Copy almost all columns from dest to source.""" + session = get_session() + with session.begin(): + src_volume_ref = _volume_get(context, src_vol_id, session=session) + dest_volume_ref = _volume_get(context, dest_vol_id, session=session) + + # NOTE(rpodolyaka): we should copy only column values, while model + # instances also have relationships attributes, which + # should be ignored + def is_column(inst, attr): + return attr in inst.__class__.__table__.columns + + for key, value in dest_volume_ref.iteritems(): + if key == 'id' or not is_column(dest_volume_ref, key): + continue + elif key == 'migration_status': + value = None + elif key == '_name_id': + value = dest_volume_ref['_name_id'] or dest_volume_ref['id'] + + setattr(src_volume_ref, key, value) + + @require_admin_context def volume_destroy(context, volume_id): session = get_session() + now = timeutils.utcnow() with session.begin(): - session.query(models.Volume).\ - filter_by(id=volume_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - session.query(models.IscsiTarget).\ - filter_by(volume_id=volume_id).\ - update({'volume_id': None}) - session.query(models.VolumeMetadata).\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) + model_query(context, models.Volume, session=session).\ + filter_by(id=volume_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at')}) + model_query(context, models.IscsiTarget, session=session).\ + filter_by(volume_id=volume_id).\ + update({'volume_id': None}) + model_query(context, models.VolumeMetadata, session=session).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at')}) + model_query(context, models.VolumeAdminMetadata, session=session).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at')}) + model_query(context, models.Transfer, session=session).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at')}) @require_admin_context def volume_detached(context, volume_id): session = get_session() with session.begin(): - volume_ref = volume_get(context, volume_id, session=session) - volume_ref['status'] = 'available' + volume_ref = _volume_get(context, volume_id, session=session) + # Hide status update from user if we're performing a volume migration + if not volume_ref['migration_status']: + volume_ref['status'] = 'available' volume_ref['mountpoint'] = None volume_ref['attach_status'] = 'detached' volume_ref['instance_uuid'] = None + volume_ref['attached_host'] = None + volume_ref['attach_time'] = None volume_ref.save(session=session) @require_context def _volume_get_query(context, session=None, project_only=False): - return model_query(context, models.Volume, session=session, - project_only=project_only).\ - options(joinedload('volume_metadata')).\ - options(joinedload('volume_type')) + if is_admin_context(context): + return model_query(context, models.Volume, session=session, + project_only=project_only).\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_admin_metadata')).\ + options(joinedload('volume_type')) + else: + return model_query(context, models.Volume, session=session, + project_only=project_only).\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_type')) @require_context -def volume_get(context, volume_id, session=None): +def _volume_get(context, volume_id, session=None): result = _volume_get_query(context, session=session, project_only=True).\ - filter_by(id=volume_id).\ - first() + filter_by(id=volume_id).\ + first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) @@ -1016,9 +1147,27 @@ def volume_get(context, volume_id, session=None): return result +@require_context +def volume_get(context, volume_id): + return _volume_get(context, volume_id) + + @require_admin_context -def volume_get_all(context): - return _volume_get_query(context).all() +def volume_get_all(context, marker, limit, sort_key, sort_dir): + session = get_session() + with session.begin(): + query = _volume_get_query(context, session=session) + + marker_volume = None + if marker is not None: + marker_volume = _volume_get(context, marker, session=session) + + query = sqlalchemyutils.paginate_query(query, models.Volume, limit, + [sort_key, 'created_at', 'id'], + marker=marker_volume, + sort_dir=sort_dir) + + return query.all() @require_admin_context @@ -1029,10 +1178,11 @@ def volume_get_all_by_host(context, host): @require_admin_context def volume_get_all_by_instance_uuid(context, instance_uuid): result = model_query(context, models.Volume, read_deleted="no").\ - options(joinedload('volume_metadata')).\ - options(joinedload('volume_type')).\ - filter_by(instance_uuid=instance_uuid).\ - all() + options(joinedload('volume_metadata')).\ + options(joinedload('volume_admin_metadata')).\ + options(joinedload('volume_type')).\ + filter_by(instance_uuid=instance_uuid).\ + all() if not result: return [] @@ -1041,16 +1191,31 @@ def volume_get_all_by_instance_uuid(context, instance_uuid): @require_context -def volume_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - return _volume_get_query(context).filter_by(project_id=project_id).all() +def volume_get_all_by_project(context, project_id, marker, limit, sort_key, + sort_dir): + session = get_session() + with session.begin(): + authorize_project_context(context, project_id) + query = _volume_get_query(context, session).\ + filter_by(project_id=project_id) + + marker_volume = None + if marker is not None: + marker_volume = _volume_get(context, marker, session) + + query = sqlalchemyutils.paginate_query(query, models.Volume, limit, + [sort_key, 'created_at', 'id'], + marker=marker_volume, + sort_dir=sort_dir) + + return query.all() @require_admin_context def volume_get_iscsi_target_num(context, volume_id): result = model_query(context, models.IscsiTarget, read_deleted="yes").\ - filter_by(volume_id=volume_id).\ - first() + filter_by(volume_id=volume_id).\ + first() if not result: raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) @@ -1061,30 +1226,39 @@ def volume_get_iscsi_target_num(context, volume_id): @require_context def volume_update(context, volume_id, values): session = get_session() - metadata = values.get('metadata') - if metadata is not None: - volume_metadata_update(context, - volume_id, - values.pop('metadata'), - delete=True) with session.begin(): - volume_ref = volume_get(context, volume_id, session=session) + metadata = values.get('metadata') + if metadata is not None: + _volume_user_metadata_update(context, + volume_id, + values.pop('metadata'), + delete=True, + session=session) + + admin_metadata = values.get('admin_metadata') + if is_admin_context(context) and admin_metadata is not None: + _volume_admin_metadata_update(context, + volume_id, + values.pop('admin_metadata'), + delete=True, + session=session) + + volume_ref = _volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) + return volume_ref #################### -def _volume_metadata_get_query(context, volume_id, session=None): - return model_query(context, models.VolumeMetadata, - session=session, read_deleted="no").\ - filter_by(volume_id=volume_id) +def _volume_x_metadata_get_query(context, volume_id, model, session=None): + return model_query(context, model, session=session, read_deleted="no").\ + filter_by(volume_id=volume_id) -@require_context -@require_volume_exists -def volume_metadata_get(context, volume_id): - rows = _volume_metadata_get_query(context, volume_id).all() +def _volume_x_metadata_get(context, volume_id, model, session=None): + rows = _volume_x_metadata_get_query(context, volume_id, model, + session=session).all() result = {} for row in rows: result[row['key']] = row['value'] @@ -1092,10 +1266,107 @@ def volume_metadata_get(context, volume_id): return result +def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec, + session=None): + result = _volume_x_metadata_get_query(context, volume_id, + model, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise notfound_exec(metadata_key=key, volume_id=volume_id) + return result + + +def _volume_x_metadata_update(context, volume_id, metadata, delete, + model, notfound_exec, session=None): + if not session: + session = get_session() + + with session.begin(subtransactions=True): + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = _volume_x_metadata_get(context, volume_id, + model, session=session) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = _volume_x_metadata_get_item(context, volume_id, + meta_key, model, + notfound_exec, + session=session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + # Now update all existing items with new values, or create new meta + # objects + for meta_key, meta_value in metadata.items(): + + # update the value whether it exists or not + item = {"value": meta_value} + + try: + meta_ref = _volume_x_metadata_get_item(context, volume_id, + meta_key, model, + notfound_exec, + session=session) + except notfound_exec: + meta_ref = model() + item.update({"key": meta_key, "volume_id": volume_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return _volume_x_metadata_get(context, volume_id, model) + + +def _volume_user_metadata_get_query(context, volume_id, session=None): + return _volume_x_metadata_get_query(context, volume_id, + models.VolumeMetadata, session=session) + + +@require_context +@require_volume_exists +def _volume_user_metadata_get(context, volume_id, session=None): + return _volume_x_metadata_get(context, volume_id, + models.VolumeMetadata, session=session) + + +@require_context +def _volume_user_metadata_get_item(context, volume_id, key, session=None): + return _volume_x_metadata_get_item(context, volume_id, key, + models.VolumeMetadata, + exception.VolumeMetadataNotFound, + session=session) + + +@require_context +@require_volume_exists +def _volume_user_metadata_update(context, volume_id, metadata, delete, + session=None): + return _volume_x_metadata_update(context, volume_id, metadata, delete, + models.VolumeMetadata, + exception.VolumeMetadataNotFound, + session=session) + + +@require_context +@require_volume_exists +def volume_metadata_get_item(context, volume_id, key): + return _volume_user_metadata_get_item(context, volume_id, key) + + +@require_context +@require_volume_exists +def volume_metadata_get(context, volume_id): + return _volume_user_metadata_get(context, volume_id) + + @require_context @require_volume_exists def volume_metadata_delete(context, volume_id, key): - _volume_metadata_get_query(context, volume_id).\ + _volume_user_metadata_get_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), @@ -1104,51 +1375,56 @@ def volume_metadata_delete(context, volume_id, key): @require_context @require_volume_exists -def volume_metadata_get_item(context, volume_id, key, session=None): - result = _volume_metadata_get_query(context, volume_id, session=session).\ - filter_by(key=key).\ - first() +def volume_metadata_update(context, volume_id, metadata, delete): + return _volume_user_metadata_update(context, volume_id, metadata, delete) - if not result: - raise exception.VolumeMetadataNotFound(metadata_key=key, - volume_id=volume_id) - return result + +################### -@require_context +def _volume_admin_metadata_get_query(context, volume_id, session=None): + return _volume_x_metadata_get_query(context, volume_id, + models.VolumeAdminMetadata, + session=session) + + +@require_admin_context @require_volume_exists -def volume_metadata_update(context, volume_id, metadata, delete): - session = get_session() +def _volume_admin_metadata_get(context, volume_id, session=None): + return _volume_x_metadata_get(context, volume_id, + models.VolumeAdminMetadata, session=session) - # Set existing metadata to deleted if delete argument is True - if delete: - original_metadata = volume_metadata_get(context, volume_id) - for meta_key, meta_value in original_metadata.iteritems(): - if meta_key not in metadata: - meta_ref = volume_metadata_get_item(context, volume_id, - meta_key, session) - meta_ref.update({'deleted': True}) - meta_ref.save(session=session) - meta_ref = None +@require_admin_context +@require_volume_exists +def _volume_admin_metadata_update(context, volume_id, metadata, delete, + session=None): + return _volume_x_metadata_update(context, volume_id, metadata, delete, + models.VolumeAdminMetadata, + exception.VolumeAdminMetadataNotFound, + session=session) - # Now update all existing items with new values, or create new meta objects - for meta_key, meta_value in metadata.iteritems(): - # update the value whether it exists or not - item = {"value": meta_value} +@require_admin_context +@require_volume_exists +def volume_admin_metadata_get(context, volume_id): + return _volume_admin_metadata_get(context, volume_id) - try: - meta_ref = volume_metadata_get_item(context, volume_id, - meta_key, session) - except exception.VolumeMetadataNotFound, e: - meta_ref = models.VolumeMetadata() - item.update({"key": meta_key, "volume_id": volume_id}) - meta_ref.update(item) - meta_ref.save(session=session) +@require_admin_context +@require_volume_exists +def volume_admin_metadata_delete(context, volume_id, key): + _volume_admin_metadata_get_query(context, volume_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) - return metadata + +@require_admin_context +@require_volume_exists +def volume_admin_metadata_update(context, volume_id, metadata, delete): + return _volume_admin_metadata_update(context, volume_id, metadata, delete) ################### @@ -1156,34 +1432,45 @@ def volume_metadata_update(context, volume_id, metadata, delete): @require_context def snapshot_create(context, values): + values['snapshot_metadata'] = _metadata_refs(values.get('metadata'), + models.SnapshotMetadata) snapshot_ref = models.Snapshot() if not values.get('id'): - values['id'] = str(utils.gen_uuid()) + values['id'] = str(uuid.uuid4()) snapshot_ref.update(values) session = get_session() with session.begin(): snapshot_ref.save(session=session) - return snapshot_ref + + return _snapshot_get(context, values['id'], session=session) @require_admin_context def snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): - session.query(models.Snapshot).\ - filter_by(id=snapshot_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) + model_query(context, models.Snapshot, session=session).\ + filter_by(id=snapshot_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + model_query(context, models.SnapshotMetadata, session=session).\ + filter_by(snapshot_id=snapshot_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) @require_context -def snapshot_get(context, snapshot_id, session=None): +def _snapshot_get(context, snapshot_id, session=None): result = model_query(context, models.Snapshot, session=session, project_only=True).\ - filter_by(id=snapshot_id).\ - first() + options(joinedload('volume')).\ + options(joinedload('snapshot_metadata')).\ + filter_by(id=snapshot_id).\ + first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) @@ -1191,140 +1478,222 @@ def snapshot_get(context, snapshot_id, session=None): return result +@require_context +def snapshot_get(context, snapshot_id): + return _snapshot_get(context, snapshot_id) + + @require_admin_context def snapshot_get_all(context): - return model_query(context, models.Snapshot).all() + return model_query(context, models.Snapshot).\ + options(joinedload('snapshot_metadata')).\ + all() @require_context def snapshot_get_all_for_volume(context, volume_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ - filter_by(volume_id=volume_id).all() + filter_by(volume_id=volume_id).\ + options(joinedload('snapshot_metadata')).\ + all() @require_context def snapshot_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return model_query(context, models.Snapshot).\ - filter_by(project_id=project_id).\ - all() + filter_by(project_id=project_id).\ + options(joinedload('snapshot_metadata')).\ + all() + + +@require_context +def _snapshot_data_get_for_project(context, project_id, volume_type_id=None, + session=None): + authorize_project_context(context, project_id) + query = model_query(context, + func.count(models.Snapshot.id), + func.sum(models.Snapshot.volume_size), + read_deleted="no", + session=session).\ + filter_by(project_id=project_id) + + if volume_type_id: + query = query.join('volume').filter_by(volume_type_id=volume_type_id) + + result = query.first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_context +def snapshot_data_get_for_project(context, project_id, volume_type_id=None): + return _snapshot_data_get_for_project(context, project_id, volume_type_id) + + +@require_context +def snapshot_get_active_by_window(context, begin, end=None, project_id=None): + """Return snapshots that were active during window.""" + + query = model_query(context, models.Snapshot, read_deleted="yes") + query = query.filter(or_(models.Snapshot.deleted_at == None, + models.Snapshot.deleted_at > begin)) + query = query.options(joinedload(models.Snapshot.volume)) + if end: + query = query.filter(models.Snapshot.created_at < end) + if project_id: + query = query.filter_by(project_id=project_id) + + return query.all() @require_context def snapshot_update(context, snapshot_id, values): session = get_session() with session.begin(): - snapshot_ref = snapshot_get(context, snapshot_id, session=session) + snapshot_ref = _snapshot_get(context, snapshot_id, session=session) snapshot_ref.update(values) snapshot_ref.save(session=session) +#################### -################### +def _snapshot_metadata_get_query(context, snapshot_id, session=None): + return model_query(context, models.SnapshotMetadata, + session=session, read_deleted="no").\ + filter_by(snapshot_id=snapshot_id) -@require_admin_context -def migration_create(context, values): - migration = models.Migration() - migration.update(values) - migration.save() - return migration +@require_context +@require_snapshot_exists +def _snapshot_metadata_get(context, snapshot_id, session=None): + rows = _snapshot_metadata_get_query(context, snapshot_id, session).all() + result = {} + for row in rows: + result[row['key']] = row['value'] -@require_admin_context -def migration_update(context, id, values): - session = get_session() - with session.begin(): - migration = migration_get(context, id, session=session) - migration.update(values) - migration.save(session=session) - return migration + return result -@require_admin_context -def migration_get(context, id, session=None): - result = model_query(context, models.Migration, session=session, - read_deleted="yes").\ - filter_by(id=id).\ - first() +@require_context +@require_snapshot_exists +def snapshot_metadata_get(context, snapshot_id): + return _snapshot_metadata_get(context, snapshot_id) - if not result: - raise exception.MigrationNotFound(migration_id=id) - return result +@require_context +@require_snapshot_exists +def snapshot_metadata_delete(context, snapshot_id, key): + _snapshot_metadata_get_query(context, snapshot_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) -@require_admin_context -def migration_get_by_instance_and_status(context, instance_uuid, status): - result = model_query(context, models.Migration, read_deleted="yes").\ - filter_by(instance_uuid=instance_uuid).\ - filter_by(status=status).\ - first() +@require_context +def _snapshot_metadata_get_item(context, snapshot_id, key, session=None): + result = _snapshot_metadata_get_query(context, + snapshot_id, + session=session).\ + filter_by(key=key).\ + first() if not result: - raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, - status=status) - + raise exception.SnapshotMetadataNotFound(metadata_key=key, + snapshot_id=snapshot_id) return result -@require_admin_context -def migration_get_all_unconfirmed(context, confirm_window, session=None): - confirm_window = timeutils.utcnow() - datetime.timedelta( - seconds=confirm_window) - - return model_query(context, models.Migration, session=session, - read_deleted="yes").\ - filter(models.Migration.updated_at <= confirm_window).\ - filter_by(status="finished").\ - all() - +@require_context +@require_snapshot_exists +def snapshot_metadata_update(context, snapshot_id, metadata, delete): + session = get_session() + with session.begin(): + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = _snapshot_metadata_get(context, snapshot_id, + session) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = _snapshot_metadata_get_item(context, + snapshot_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + # Now update all existing items with new values, or create new meta + # objects + for meta_key, meta_value in metadata.items(): + + # update the value whether it exists or not + item = {"value": meta_value} + + try: + meta_ref = _snapshot_metadata_get_item(context, snapshot_id, + meta_key, session) + except exception.SnapshotMetadataNotFound as e: + meta_ref = models.SnapshotMetadata() + item.update({"key": meta_key, "snapshot_id": snapshot_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return snapshot_metadata_get(context, snapshot_id) -################## +################### @require_admin_context def volume_type_create(context, values): - """Create a new instance type. In order to pass in extra specs, - the values dict should contain a 'extra_specs' key/value pair: + """Create a new instance type. + In order to pass in extra specs, the values dict should contain a + 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} - """ + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + session = get_session() with session.begin(): try: - volume_type_get_by_name(context, values['name'], session) - raise exception.VolumeTypeExists(name=values['name']) + _volume_type_get_by_name(context, values['name'], session) + raise exception.VolumeTypeExists(id=values['name']) except exception.VolumeTypeNotFoundByName: pass try: - values['extra_specs'] = _metadata_refs(values.get('extra_specs'), + _volume_type_get(context, values['id'], session) + raise exception.VolumeTypeExists(id=values['id']) + except exception.VolumeTypeNotFound: + pass + try: + values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeTypes() volume_type_ref.update(values) - volume_type_ref.save() - except Exception, e: - raise exception.DBError(e) + volume_type_ref.save(session=session) + except Exception as e: + raise db_exc.DBError(e) return volume_type_ref @require_context def volume_type_get_all(context, inactive=False, filters=None): - """ - Returns a dict describing all volume_types with name as key. - """ + """Returns a dict describing all volume_types with name as key.""" filters = filters or {} read_deleted = "yes" if inactive else "no" rows = model_query(context, models.VolumeTypes, read_deleted=read_deleted).\ - options(joinedload('extra_specs')).\ - order_by("name").\ - all() + options(joinedload('extra_specs')).\ + order_by("name").\ + all() - # TODO(sirp): this patern of converting rows to a result with extra_specs - # is repeated quite a bit, might be worth creating a method for it result = {} for row in rows: result[row['name']] = _dict_with_extra_specs(row) @@ -1333,12 +1702,15 @@ def volume_type_get_all(context, inactive=False, filters=None): @require_context -def volume_type_get(context, id, session=None): - """Returns a dict describing specific volume_type""" - result = model_query(context, models.VolumeTypes, session=session).\ - options(joinedload('extra_specs')).\ - filter_by(id=id).\ - first() +def _volume_type_get(context, id, session=None, inactive=False): + read_deleted = "yes" if inactive else "no" + result = model_query(context, + models.VolumeTypes, + session=session, + read_deleted=read_deleted).\ + options(joinedload('extra_specs')).\ + filter_by(id=id).\ + first() if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) @@ -1347,12 +1719,18 @@ def volume_type_get(context, id, session=None): @require_context -def volume_type_get_by_name(context, name, session=None): - """Returns a dict describing specific volume_type""" +def volume_type_get(context, id, inactive=False): + """Return a dict describing specific volume_type.""" + + return _volume_type_get(context, id, None, inactive) + + +@require_context +def _volume_type_get_by_name(context, name, session=None): result = model_query(context, models.VolumeTypes, session=session).\ - options(joinedload('extra_specs')).\ - filter_by(name=name).\ - first() + options(joinedload('extra_specs')).\ + filter_by(name=name).\ + first() if not result: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) @@ -1360,32 +1738,129 @@ def volume_type_get_by_name(context, name, session=None): return _dict_with_extra_specs(result) +@require_context +def volume_type_get_by_name(context, name): + """Return a dict describing specific volume_type.""" + + return _volume_type_get_by_name(context, name) + + +@require_admin_context +def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): + read_deleted = "yes" if inactive else "no" + return model_query(context, models.VolumeTypes, + read_deleted=read_deleted). \ + filter_by(qos_specs_id=qos_specs_id).all() + + @require_admin_context -def volume_type_destroy(context, name): +def volume_type_qos_associate(context, type_id, qos_specs_id): session = get_session() with session.begin(): - volume_type_ref = volume_type_get_by_name(context, name, - session=session) - volume_type_id = volume_type_ref['id'] - session.query(models.VolumeTypes).\ - filter_by(id=volume_type_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - session.query(models.VolumeTypeExtraSpecs).\ - filter_by(volume_type_id=volume_type_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def volume_get_active_by_window(context, begin, end=None, - project_id=None): - """Return volumes that were active during window.""" + _volume_type_get(context, type_id, session) + + session.query(models.VolumeTypes). \ + filter_by(id=type_id). \ + update({'qos_specs_id': qos_specs_id, + 'updated_at': timeutils.utcnow()}) + + +@require_admin_context +def volume_type_qos_disassociate(context, qos_specs_id, type_id): + """Disassociate volume type from qos specs.""" + session = get_session() + with session.begin(): + _volume_type_get(context, type_id, session) + + session.query(models.VolumeTypes). \ + filter_by(id=type_id). \ + filter_by(qos_specs_id=qos_specs_id). \ + update({'qos_specs_id': None, + 'updated_at': timeutils.utcnow()}) + + +@require_admin_context +def volume_type_qos_disassociate_all(context, qos_specs_id): + """Disassociate all volume types associated with specified qos specs.""" + session = get_session() + with session.begin(): + session.query(models.VolumeTypes). \ + filter_by(qos_specs_id=qos_specs_id). \ + update({'qos_specs_id': None, + 'updated_at': timeutils.utcnow()}) + + +@require_admin_context +def volume_type_qos_specs_get(context, type_id): + """Return all qos specs for given volume type. + + result looks like: + { + 'qos_specs': + { + 'id': 'qos-specs-id', + 'name': 'qos_specs_name', + 'consumer': 'Consumer', + 'specs': { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3' + } + } + } + + """ + session = get_session() + with session.begin(): + _volume_type_get(context, type_id, session) + + row = session.query(models.VolumeTypes). \ + options(joinedload('qos_specs')). \ + filter_by(id=type_id). \ + first() + + # row.qos_specs is a list of QualityOfServiceSpecs ref + specs = _dict_with_qos_specs(row.qos_specs) + + if not specs: + # turn empty list to None + specs = None + else: + specs = specs[0] + + return {'qos_specs': specs} + + +@require_admin_context +def volume_type_destroy(context, id): session = get_session() - query = session.query(models.Volume) + with session.begin(): + _volume_type_get(context, id, session) + results = model_query(context, models.Volume, session=session). \ + filter_by(volume_type_id=id).all() + if results: + msg = _('VolumeType %s deletion failed, VolumeType in use.') % id + LOG.error(msg) + raise exception.VolumeTypeInUse(volume_type_id=id) + model_query(context, models.VolumeTypes, session=session).\ + filter_by(id=id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + model_query(context, models.VolumeTypeExtraSpecs, session=session).\ + filter_by(volume_type_id=id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + +@require_context +def volume_get_active_by_window(context, + begin, + end=None, + project_id=None): + """Return volumes that were active during window.""" + query = model_query(context, models.Volume, read_deleted="yes") query = query.filter(or_(models.Volume.deleted_at == None, models.Volume.deleted_at > begin)) if end: @@ -1402,13 +1877,13 @@ def volume_get_active_by_window(context, begin, end=None, def _volume_type_extra_specs_query(context, volume_type_id, session=None): return model_query(context, models.VolumeTypeExtraSpecs, session=session, read_deleted="no").\ - filter_by(volume_type_id=volume_type_id) + filter_by(volume_type_id=volume_type_id) @require_context def volume_type_extra_specs_get(context, volume_type_id): rows = _volume_type_extra_specs_query(context, volume_type_id).\ - all() + all() result = {} for row in rows: @@ -1419,24 +1894,29 @@ def volume_type_extra_specs_get(context, volume_type_id): @require_context def volume_type_extra_specs_delete(context, volume_type_id, key): - _volume_type_extra_specs_query(context, volume_type_id).\ - filter_by(key=key).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) + session = get_session() + with session.begin(): + _volume_type_extra_specs_get_item(context, volume_type_id, key, + session) + _volume_type_extra_specs_query(context, volume_type_id, session).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) @require_context -def volume_type_extra_specs_get_item(context, volume_type_id, key, - session=None): +def _volume_type_extra_specs_get_item(context, volume_type_id, key, + session=None): result = _volume_type_extra_specs_query( - context, volume_type_id, session=session).\ - filter_by(key=key).\ - first() + context, volume_type_id, session=session).\ + filter_by(key=key).\ + first() if not result: raise exception.VolumeTypeExtraSpecsNotFound( - extra_specs_key=key, volume_type_id=volume_type_id) + extra_specs_key=key, + volume_type_id=volume_type_id) return result @@ -1445,396 +1925,756 @@ def volume_type_extra_specs_get_item(context, volume_type_id, key, def volume_type_extra_specs_update_or_create(context, volume_type_id, specs): session = get_session() - spec_ref = None - for key, value in specs.iteritems(): - try: - spec_ref = volume_type_extra_specs_get_item( - context, volume_type_id, key, session) - except exception.VolumeTypeExtraSpecsNotFound, e: - spec_ref = models.VolumeTypeExtraSpecs() - spec_ref.update({"key": key, "value": value, - "volume_type_id": volume_type_id, - "deleted": 0}) - spec_ref.save(session=session) - return specs + with session.begin(): + spec_ref = None + for key, value in specs.iteritems(): + try: + spec_ref = _volume_type_extra_specs_get_item( + context, volume_type_id, key, session) + except exception.VolumeTypeExtraSpecsNotFound as e: + spec_ref = models.VolumeTypeExtraSpecs() + spec_ref.update({"key": key, "value": value, + "volume_type_id": volume_type_id, + "deleted": False}) + spec_ref.save(session=session) + + return specs #################### @require_admin_context -def sm_backend_conf_create(context, values): - backend_conf = models.SMBackendConf() - backend_conf.update(values) - backend_conf.save() - return backend_conf +def qos_specs_create(context, values): + """Create a new QoS specs. + :param values dictionary that contains specifications for QoS + e.g. {'name': 'Name', + 'qos_specs': { + 'consumer': 'front-end', + 'total_iops_sec': 1000, + 'total_bytes_sec': 1024000 + } + } + """ + specs_id = str(uuid.uuid4()) -@require_admin_context -def sm_backend_conf_update(context, sm_backend_id, values): session = get_session() with session.begin(): - backend_conf = model_query(context, models.SMBackendConf, - session=session, - read_deleted="yes").\ - filter_by(id=sm_backend_id).\ - first() + try: + _qos_specs_get_by_name(context, values['name'], session) + raise exception.QoSSpecsExists(specs_id=values['name']) + except exception.QoSSpecsNotFound: + pass + try: + # Insert a root entry for QoS specs + specs_root = models.QualityOfServiceSpecs() + root = dict(id=specs_id) + # 'QoS_Specs_Name' is a internal reserved key to store + # the name of QoS specs + root['key'] = 'QoS_Specs_Name' + root['value'] = values['name'] + LOG.debug("DB qos_specs_create(): root %s", root) + specs_root.update(root) + specs_root.save(session=session) - if not backend_conf: - raise exception.NotFound( - _("No backend config with id %(sm_backend_id)s") % locals()) + # Insert all specification entries for QoS specs + for k, v in values['qos_specs'].iteritems(): + item = dict(key=k, value=v, specs_id=specs_id) + item['id'] = str(uuid.uuid4()) + spec_entry = models.QualityOfServiceSpecs() + spec_entry.update(item) + spec_entry.save(session=session) + except Exception as e: + raise db_exc.DBError(e) - backend_conf.update(values) - backend_conf.save(session=session) - return backend_conf + return dict(id=specs_root.id, name=specs_root.value) @require_admin_context -def sm_backend_conf_delete(context, sm_backend_id): - # FIXME(sirp): for consistency, shouldn't this just mark as deleted with - # `purge` actually deleting the record? - session = get_session() - with session.begin(): - model_query(context, models.SMBackendConf, session=session, - read_deleted="yes").\ - filter_by(id=sm_backend_id).\ - delete() +def _qos_specs_get_by_name(context, name, session=None, inactive=False): + read_deleted = 'yes' if inactive else 'no' + results = model_query(context, models.QualityOfServiceSpecs, + read_deleted=read_deleted, session=session). \ + filter_by(key='QoS_Specs_Name'). \ + filter_by(value=name). \ + options(joinedload('specs')).all() + + if not results: + raise exception.QoSSpecsNotFound(specs_id=name) + + return results @require_admin_context -def sm_backend_conf_get(context, sm_backend_id): - result = model_query(context, models.SMBackendConf, read_deleted="yes").\ - filter_by(id=sm_backend_id).\ - first() +def _qos_specs_get_ref(context, qos_specs_id, session=None, inactive=False): + read_deleted = 'yes' if inactive else 'no' + result = model_query(context, models.QualityOfServiceSpecs, + read_deleted=read_deleted, session=session). \ + filter_by(id=qos_specs_id). \ + options(joinedload_all('specs')).all() if not result: - raise exception.NotFound(_("No backend config with id " - "%(sm_backend_id)s") % locals()) + raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) return result +def _dict_with_children_specs(specs): + """Convert specs list to a dict.""" + result = {} + for spec in specs: + # Skip deleted keys + if not spec['deleted']: + result.update({spec['key']: spec['value']}) + + return result + + +def _dict_with_qos_specs(rows): + """Convert qos specs query results to list. + + Qos specs query results are a list of quality_of_service_specs refs, + some are root entry of a qos specs (key == 'QoS_Specs_Name') and the + rest are children entry, a.k.a detailed specs for a qos specs. This + function converts query results to a dict using spec name as key. + """ + result = [] + for row in rows: + if row['key'] == 'QoS_Specs_Name': + member = {} + member['name'] = row['value'] + member.update(dict(id=row['id'])) + if row.specs: + spec_dict = _dict_with_children_specs(row.specs) + member.update(dict(consumer=spec_dict['consumer'])) + del spec_dict['consumer'] + member.update(dict(specs=spec_dict)) + result.append(member) + return result + + @require_admin_context -def sm_backend_conf_get_by_sr(context, sr_uuid): - return model_query(context, models.SMBackendConf, read_deleted="yes").\ - filter_by(sr_uuid=sr_uuid).\ - first() +def qos_specs_get(context, qos_specs_id, inactive=False): + rows = _qos_specs_get_ref(context, qos_specs_id, None, inactive) + + return _dict_with_qos_specs(rows)[0] @require_admin_context -def sm_backend_conf_get_all(context): - return model_query(context, models.SMBackendConf, read_deleted="yes").\ - all() +def qos_specs_get_all(context, inactive=False, filters=None): + """Returns a list of all qos_specs. + + Results is like: + [{ + 'id': SPECS-UUID, + 'name': 'qos_spec-1', + 'consumer': 'back-end', + 'specs': { + 'key1': 'value1', + 'key2': 'value2', + ... + } + }, + { + 'id': SPECS-UUID, + 'name': 'qos_spec-2', + 'consumer': 'front-end', + 'specs': { + 'key1': 'value1', + 'key2': 'value2', + ... + } + }, + ] + """ + filters = filters or {} + #TODO(zhiteng) Add filters for 'consumer' + read_deleted = "yes" if inactive else "no" + rows = model_query(context, models.QualityOfServiceSpecs, + read_deleted=read_deleted). \ + options(joinedload_all('specs')).all() + + return _dict_with_qos_specs(rows) -#################### +@require_admin_context +def qos_specs_get_by_name(context, name, inactive=False): + rows = _qos_specs_get_by_name(context, name, None, inactive) + + return _dict_with_qos_specs(rows)[0] + + +@require_admin_context +def qos_specs_associations_get(context, qos_specs_id): + """Return all entities associated with specified qos specs. -def _sm_flavor_get_query(context, sm_flavor_label, session=None): - return model_query(context, models.SMFlavors, session=session, - read_deleted="yes").\ - filter_by(label=sm_flavor_label) + For now, the only entity that is possible to associate with + a qos specs is volume type, so this is just a wrapper of + volume_type_qos_associations_get(). But it's possible to + extend qos specs association to other entities, such as volumes, + sometime in future. + """ + # Raise QoSSpecsNotFound if no specs found + _qos_specs_get_ref(context, qos_specs_id, None) + return volume_type_qos_associations_get(context, qos_specs_id) @require_admin_context -def sm_flavor_create(context, values): - sm_flavor = models.SMFlavors() - sm_flavor.update(values) - sm_flavor.save() - return sm_flavor +def qos_specs_associate(context, qos_specs_id, type_id): + """Associate volume type from specified qos specs.""" + return volume_type_qos_associate(context, type_id, qos_specs_id) @require_admin_context -def sm_flavor_update(context, sm_flavor_label, values): - sm_flavor = sm_flavor_get(context, sm_flavor_label) - sm_flavor.update(values) - sm_flavor.save() - return sm_flavor +def qos_specs_disassociate(context, qos_specs_id, type_id): + """Disassociate volume type from specified qos specs.""" + return volume_type_qos_disassociate(context, qos_specs_id, type_id) @require_admin_context -def sm_flavor_delete(context, sm_flavor_label): +def qos_specs_disassociate_all(context, qos_specs_id): + """Disassociate all entities associated with specified qos specs. + + For now, the only entity that is possible to associate with + a qos specs is volume type, so this is just a wrapper of + volume_type_qos_disassociate_all(). But it's possible to + extend qos specs association to other entities, such as volumes, + sometime in future. + """ + return volume_type_qos_disassociate_all(context, qos_specs_id) + + +@require_admin_context +def qos_specs_item_delete(context, qos_specs_id, key): + session = get_session() + with session.begin(): + _qos_specs_get_item(context, qos_specs_id, key) + session.query(models.QualityOfServiceSpecs). \ + filter(models.QualityOfServiceSpecs.key == key). \ + filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +def qos_specs_delete(context, qos_specs_id): session = get_session() with session.begin(): - _sm_flavor_get_query(context, sm_flavor_label).delete() + _qos_specs_get_ref(context, qos_specs_id, session) + session.query(models.QualityOfServiceSpecs).\ + filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id, + models.QualityOfServiceSpecs.specs_id == + qos_specs_id)).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) @require_admin_context -def sm_flavor_get(context, sm_flavor_label): - result = _sm_flavor_get_query(context, sm_flavor_label).first() +def _qos_specs_get_item(context, qos_specs_id, key, session=None): + result = model_query(context, models.QualityOfServiceSpecs, + session=session). \ + filter(models.QualityOfServiceSpecs.key == key). \ + filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ + first() if not result: - raise exception.NotFound( - _("No sm_flavor called %(sm_flavor)s") % locals()) + raise exception.QoSSpecsKeyNotFound( + specs_key=key, + specs_id=qos_specs_id) return result @require_admin_context -def sm_flavor_get_all(context): - return model_query(context, models.SMFlavors, read_deleted="yes").all() +def qos_specs_update(context, qos_specs_id, specs): + """Make updates to a existing qos specs. + Perform add, update or delete key/values to a qos specs. + """ -############################### + session = get_session() + with session.begin(): + # make sure qos specs exists + _qos_specs_get_ref(context, qos_specs_id, session) + spec_ref = None + for key in specs.keys(): + try: + spec_ref = _qos_specs_get_item( + context, qos_specs_id, key, session) + except exception.QoSSpecsKeyNotFound as e: + spec_ref = models.QualityOfServiceSpecs() + id = None + if spec_ref.get('id', None): + id = spec_ref['id'] + else: + id = str(uuid.uuid4()) + value = dict(id=id, key=key, value=specs[key], + specs_id=qos_specs_id, + deleted=False) + LOG.debug('qos_specs_update() value: %s' % value) + spec_ref.update(value) + spec_ref.save(session=session) + + return specs -def _sm_volume_get_query(context, volume_id, session=None): - return model_query(context, models.SMVolume, session=session, - read_deleted="yes").\ - filter_by(id=volume_id) +#################### -def sm_volume_create(context, values): - sm_volume = models.SMVolume() - sm_volume.update(values) - sm_volume.save() - return sm_volume +@require_context +def volume_type_encryption_get(context, volume_type_id, session=None): + return model_query(context, models.Encryption, session=session, + read_deleted="no").\ + filter_by(volume_type_id=volume_type_id).first() -def sm_volume_update(context, volume_id, values): - sm_volume = sm_volume_get(context, volume_id) - sm_volume.update(values) - sm_volume.save() - return sm_volume +@require_admin_context +def volume_type_encryption_delete(context, volume_type_id): + session = get_session() + with session.begin(): + encryption = volume_type_encryption_get(context, volume_type_id, + session) + encryption.update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) -def sm_volume_delete(context, volume_id): +@require_admin_context +def volume_type_encryption_update_or_create(context, volume_type_id, + values): session = get_session() - with session.begin(): - _sm_volume_get_query(context, volume_id, session=session).delete() + encryption = volume_type_encryption_get(context, volume_type_id, + session) + if not encryption: + encryption = models.Encryption() -def sm_volume_get(context, volume_id): - result = _sm_volume_get_query(context, volume_id).first() + if 'volume_type_id' not in values: + values['volume_type_id'] = volume_type_id - if not result: - raise exception.NotFound( - _("No sm_volume with id %(volume_id)s") % locals()) + encryption.update(values) + encryption.save(session=session) - return result + return encryption -def sm_volume_get_all(context): - return model_query(context, models.SMVolume, read_deleted="yes").all() +def volume_type_encryption_volume_get(context, volume_type_id, session=None): + volume_list = _volume_get_query(context, session=session, + project_only=False).\ + filter_by(volume_type_id=volume_type_id).\ + all() + return volume_list +#################### -############################### + +@require_context +def volume_encryption_metadata_get(context, volume_id, session=None): + """Return the encryption key id for a given volume.""" + + volume_ref = _volume_get(context, volume_id) + encryption_ref = volume_type_encryption_get(context, + volume_ref['volume_type_id']) + + return { + 'encryption_key_id': volume_ref['encryption_key_id'], + 'control_location': encryption_ref['control_location'], + 'cipher': encryption_ref['cipher'], + 'key_size': encryption_ref['key_size'], + 'provider': encryption_ref['provider'], + } + + +#################### @require_context -def quota_get(context, project_id, resource, session=None): - result = model_query(context, models.Quota, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - filter_by(resource=resource).\ - first() +def _volume_glance_metadata_get_all(context, session=None): + rows = model_query(context, + models.VolumeGlanceMetadata, + project_only=True, + session=session).\ + filter_by(deleted=False).\ + all() - if not result: - raise exception.ProjectQuotaNotFound(project_id=project_id) + return rows - return result + +@require_context +def volume_glance_metadata_get_all(context): + """Return the Glance metadata for all volumes.""" + + return _volume_glance_metadata_get_all(context) @require_context -def quota_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) +@require_volume_exists +def _volume_glance_metadata_get(context, volume_id, session=None): + rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ + filter_by(volume_id=volume_id).\ + filter_by(deleted=False).\ + all() - rows = model_query(context, models.Quota, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() + if not rows: + raise exception.GlanceMetadataNotFound(id=volume_id) - result = {'project_id': project_id} - for row in rows: - result[row.resource] = row.hard_limit + return rows - return result +@require_context +@require_volume_exists +def volume_glance_metadata_get(context, volume_id): + """Return the Glance metadata for the specified volume.""" -@require_admin_context -def quota_create(context, project_id, resource, limit): - quota_ref = models.Quota() - quota_ref.project_id = project_id - quota_ref.resource = resource - quota_ref.hard_limit = limit - quota_ref.save() - return quota_ref + return _volume_glance_metadata_get(context, volume_id) -@require_admin_context -def quota_update(context, project_id, resource, limit): +@require_context +@require_snapshot_exists +def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None): + rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ + filter_by(snapshot_id=snapshot_id).\ + filter_by(deleted=False).\ + all() + + if not rows: + raise exception.GlanceMetadataNotFound(id=snapshot_id) + + return rows + + +@require_context +@require_snapshot_exists +def volume_snapshot_glance_metadata_get(context, snapshot_id): + """Return the Glance metadata for the specified snapshot.""" + + return _volume_snapshot_glance_metadata_get(context, snapshot_id) + + +@require_context +@require_volume_exists +def volume_glance_metadata_create(context, volume_id, key, value): + """Update the Glance metadata for a volume by adding a new key:value pair. + + This API does not support changing the value of a key once it has been + created. + """ + session = get_session() with session.begin(): - quota_ref = quota_get(context, project_id, resource, session=session) - quota_ref.hard_limit = limit - quota_ref.save(session=session) + rows = session.query(models.VolumeGlanceMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(key=key).\ + filter_by(deleted=False).all() + if len(rows) > 0: + raise exception.GlanceMetadataExists(key=key, + volume_id=volume_id) + + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = key + vol_glance_metadata.value = str(value) + + vol_glance_metadata.save(session=session) + + return + + +@require_context +@require_snapshot_exists +def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): + """Update the Glance metadata for a snapshot. + + This copies all of the key:value pairs from the originating volume, to + ensure that a volume created from the snapshot will retain the + original metadata. + """ -@require_admin_context -def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): - quota_ref = quota_get(context, project_id, resource, session=session) - quota_ref.delete(session=session) + metadata = _volume_glance_metadata_get(context, volume_id, + session=session) + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.snapshot_id = snapshot_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] + vol_glance_metadata.save(session=session) + + +@require_context +@require_volume_exists +def volume_glance_metadata_copy_from_volume_to_volume(context, + src_volume_id, + volume_id): + """Update the Glance metadata for a volume. + + This copies all all of the key:value pairs from the originating volume, + to ensure that a volume created from the volume (clone) will + retain the original metadata. + """ -@require_admin_context -def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): - quotas = model_query(context, models.Quota, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - all() + metadata = _volume_glance_metadata_get(context, + src_volume_id, + session=session) + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] - for quota_ref in quotas: - quota_ref.delete(session=session) + vol_glance_metadata.save(session=session) -################### +@require_context +@require_volume_exists +def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): + """Update the Glance metadata from a volume (created from a snapshot) by + copying all of the key:value pairs from the originating snapshot. + This is so that the Glance metadata from the original volume is retained. + """ -@require_context -def quota_class_get(context, class_name, resource, session=None): - result = model_query(context, models.QuotaClass, session=session, - read_deleted="no").\ - filter_by(class_name=class_name).\ - filter_by(resource=resource).\ - first() + session = get_session() + with session.begin(): + metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id, + session=session) + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] - if not result: - raise exception.QuotaClassNotFound(class_name=class_name) + vol_glance_metadata.save(session=session) - return result + +@require_context +def volume_glance_metadata_delete_by_volume(context, volume_id): + model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) @require_context -def quota_class_get_all_by_name(context, class_name): - authorize_quota_class_context(context, class_name) +def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): + model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ + filter_by(snapshot_id=snapshot_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) - rows = model_query(context, models.QuotaClass, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() - result = {'class_name': class_name} - for row in rows: - result[row.resource] = row.hard_limit +############################### + + +@require_context +def backup_get(context, backup_id): + result = model_query(context, models.Backup, project_only=True).\ + filter_by(id=backup_id).\ + first() + + if not result: + raise exception.BackupNotFound(backup_id=backup_id) return result @require_admin_context -def quota_class_create(context, class_name, resource, limit): - quota_class_ref = models.QuotaClass() - quota_class_ref.class_name = class_name - quota_class_ref.resource = resource - quota_class_ref.hard_limit = limit - quota_class_ref.save() - return quota_class_ref +def backup_get_all(context): + return model_query(context, models.Backup).all() @require_admin_context -def quota_class_update(context, class_name, resource, limit): - session = get_session() - with session.begin(): - quota_class_ref = quota_class_get(context, class_name, resource, - session=session) - quota_class_ref.hard_limit = limit - quota_class_ref.save(session=session) +def backup_get_all_by_host(context, host): + return model_query(context, models.Backup).filter_by(host=host).all() -@require_admin_context -def quota_class_destroy(context, class_name, resource): +@require_context +def backup_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + return model_query(context, models.Backup).\ + filter_by(project_id=project_id).all() + + +@require_context +def backup_create(context, values): + backup = models.Backup() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + backup.update(values) + backup.save() + return backup + + +@require_context +def backup_update(context, backup_id, values): session = get_session() with session.begin(): - quota_class_ref = quota_class_get(context, class_name, resource, - session=session) - quota_class_ref.delete(session=session) + backup = model_query(context, models.Backup, + session=session, read_deleted="yes").\ + filter_by(id=backup_id).first() + + if not backup: + raise exception.BackupNotFound( + _("No backup with id %s") % backup_id) + + backup.update(values) + backup.save(session=session) + return backup @require_admin_context -def quota_class_destroy_all_by_name(context, class_name): - session = get_session() - with session.begin(): - quota_classes = model_query(context, models.QuotaClass, - session=session, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() +def backup_destroy(context, backup_id): + model_query(context, models.Backup).\ + filter_by(id=backup_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) - for quota_class_ref in quota_classes: - quota_class_ref.delete(session=session) + +############################### @require_context -def quota_usage_get(context, project_id, resource, session=None): - result = model_query(context, models.QuotaUsage, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - filter_by(resource=resource).\ - first() +def _transfer_get(context, transfer_id, session=None): + query = model_query(context, models.Transfer, + session=session).\ + filter_by(id=transfer_id) + + if not is_admin_context(context): + volume = models.Volume + query = query.filter(models.Transfer.volume_id == volume.id, + volume.project_id == context.project_id) + + result = query.first() if not result: - raise exception.QuotaUsageNotFound(project_id=project_id) + raise exception.TransferNotFound(transfer_id=transfer_id) return result @require_context -def quota_usage_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) +def transfer_get(context, transfer_id): + return _transfer_get(context, transfer_id) - rows = model_query(context, models.QuotaUsage, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - result = {'project_id': project_id} - for row in rows: - result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) - return result +def _translate_transfers(transfers): + results = [] + for transfer in transfers: + r = {} + r['id'] = transfer['id'] + r['volume_id'] = transfer['volume_id'] + r['display_name'] = transfer['display_name'] + r['created_at'] = transfer['created_at'] + r['deleted'] = transfer['deleted'] + results.append(r) + return results @require_admin_context -def quota_usage_create(context, project_id, resource, in_use, reserved, - until_refresh, session=None, save=True): - quota_usage_ref = models.QuotaUsage() - quota_usage_ref.project_id = project_id - quota_usage_ref.resource = resource - quota_usage_ref.in_use = in_use - quota_usage_ref.reserved = reserved - quota_usage_ref.until_refresh = until_refresh +def transfer_get_all(context): + results = model_query(context, models.Transfer).all() + return _translate_transfers(results) - # Allow us to hold the save operation until later; keeps the - # transaction in quota_reserve() from breaking too early - if save: - quota_usage_ref.save(session=session) - return quota_usage_ref +@require_context +def transfer_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + query = model_query(context, models.Transfer).\ + filter(models.Volume.id == models.Transfer.volume_id, + models.Volume.project_id == project_id) + results = query.all() + return _translate_transfers(results) -@require_admin_context -def quota_usage_update(context, project_id, resource, in_use, reserved, - until_refresh, session=None): - def do_update(session): - quota_usage_ref = quota_usage_get(context, project_id, resource, - session=session) - quota_usage_ref.in_use = in_use - quota_usage_ref.reserved = reserved - quota_usage_ref.until_refresh = until_refresh - quota_usage_ref.save(session=session) - - if session: - # Assume caller started a transaction - do_update(session) - else: - session = get_session() - with session.begin(): - do_update(session) +@require_context +def transfer_create(context, values): + transfer = models.Transfer() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + session = get_session() + with session.begin(): + volume_ref = _volume_get(context, + values['volume_id'], + session=session) + if volume_ref['status'] != 'available': + msg = _('Volume must be available') + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + volume_ref['status'] = 'awaiting-transfer' + transfer.update(values) + transfer.save(session=session) + volume_ref.update(volume_ref) + volume_ref.save(session=session) + return transfer -@require_admin_context -def quota_usage_destroy(context, project_id, resource): + +@require_context +def transfer_destroy(context, transfer_id): session = get_session() with session.begin(): - quota_usage_ref = quota_usage_get(context, project_id, resource, - session=session) - quota_usage_ref.delete(session=session) + transfer_ref = _transfer_get(context, + transfer_id, + session=session) + volume_ref = _volume_get(context, + transfer_ref['volume_id'], + session=session) + # If the volume state is not 'awaiting-transfer' don't change it, but + # we can still mark the transfer record as deleted. + if volume_ref['status'] != 'awaiting-transfer': + msg = _('Volume in unexpected state %s, ' + 'expected awaiting-transfer') % volume_ref['status'] + LOG.error(msg) + else: + volume_ref['status'] = 'available' + volume_ref.update(volume_ref) + volume_ref.save(session=session) + model_query(context, models.Transfer, session=session).\ + filter_by(id=transfer_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def transfer_accept(context, transfer_id, user_id, project_id): + session = get_session() + with session.begin(): + transfer_ref = _transfer_get(context, transfer_id, session) + volume_id = transfer_ref['volume_id'] + volume_ref = _volume_get(context, volume_id, session=session) + if volume_ref['status'] != 'awaiting-transfer': + volume_status = volume_ref['status'] + msg = _('Transfer %(transfer_id)s: Volume id %(volume_id)s in ' + 'unexpected state %(status)s, expected ' + 'awaiting-transfer') % {'transfer_id': transfer_id, + 'volume_id': volume_ref['id'], + 'status': volume_ref['status']} + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + + volume_ref['status'] = 'available' + volume_ref['user_id'] = user_id + volume_ref['project_id'] = project_id + volume_ref['updated_at'] = literal_column('updated_at') + volume_ref.update(volume_ref) + volume_ref.save(session=session) + session.query(models.Transfer).\ + filter_by(id=transfer_ref['id']).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) diff --git a/cinder/db/sqlalchemy/migrate_repo/manage.py b/cinder/db/sqlalchemy/migrate_repo/manage.py old mode 100644 new mode 100755 index 09e340f44f..a714db6240 --- a/cinder/db/sqlalchemy/migrate_repo/manage.py +++ b/cinder/db/sqlalchemy/migrate_repo/manage.py @@ -1,4 +1,33 @@ #!/usr/bin/env python +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo.config import cfg + +from cinder.openstack.common import gettextutils +gettextutils.install('cinder', lazy=False) + +from cinder.db.sqlalchemy import migrate_repo +from cinder import version + from migrate.versioning.shell import main + +CONF = cfg.CONF + if __name__ == '__main__': - main(debug='False', repository='.') + CONF([], project='cinder', version=version.version_string()) + main(debug='False', url=CONF.database.connection, + repository=os.path.abspath(os.path.dirname(migrate_repo.__file__))) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py b/cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py index 6ca30faa9e..da71b1f871 100644 --- a/cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py +++ b/cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 OpenStack LLC. +# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,13 +12,12 @@ # License for the specific language governing permissions and limitations # under the License. + from sqlalchemy import Boolean, Column, DateTime, ForeignKey from sqlalchemy import Integer, MetaData, String, Table -from cinder import flags from cinder.openstack.common import log as logging -FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) @@ -240,7 +237,7 @@ def upgrade(migrate_engine): table.create() except Exception: LOG.info(repr(table)) - LOG.exception('Exception while creating table.') + LOG.exception(_('Exception while creating table.')) raise if migrate_engine.name == "mysql": @@ -269,4 +266,4 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): - LOG.exception('Downgrade from initial Cinder install is unsupported.') + LOG.exception(_('Downgrade from initial Cinder install is unsupported.')) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py b/cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py index 3491734d9b..4217d1a694 100644 --- a/cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py +++ b/cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 OpenStack LLC. +# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +from migrate import ForeignKeyConstraint from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import MetaData, Integer, String, Table, ForeignKey @@ -28,23 +27,21 @@ def upgrade(migrate_engine): # New table quota_classes = Table('quota_classes', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True), - Column('class_name', - String(length=255, convert_unicode=True, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), index=True), - Column('resource', - String(length=255, convert_unicode=True, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False)), - Column('hard_limit', Integer(), nullable=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, + name=None)), + Column('id', Integer(), primary_key=True), + Column('class_name', + String(length=255), + index=True), + Column('resource', + String(length=255)), + Column('hard_limit', Integer(), nullable=True), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) try: quota_classes.create() @@ -53,26 +50,23 @@ def upgrade(migrate_engine): raise quota_usages = Table('quota_usages', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True), - Column('project_id', - String(length=255, convert_unicode=True, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - index=True), - Column('resource', - String(length=255, convert_unicode=True, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False)), - Column('in_use', Integer(), nullable=False), - Column('reserved', Integer(), nullable=False), - Column('until_refresh', Integer(), nullable=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, + name=None)), + Column('id', Integer(), primary_key=True), + Column('project_id', + String(length=255), + index=True), + Column('resource', + String(length=255)), + Column('in_use', Integer(), nullable=False), + Column('reserved', Integer(), nullable=False), + Column('until_refresh', Integer(), nullable=True), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) try: quota_usages.create() @@ -81,31 +75,29 @@ def upgrade(migrate_engine): raise reservations = Table('reservations', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True), - Column('uuid', - String(length=36, convert_unicode=True, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), nullable=False), - Column('usage_id', Integer(), ForeignKey('quota_usages.id'), - nullable=False), - Column('project_id', - String(length=255, convert_unicode=True, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - index=True), - Column('resource', - String(length=255, convert_unicode=True, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False)), - Column('delta', Integer(), nullable=False), - Column('expire', DateTime(timezone=False)), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, + name=None)), + Column('id', Integer(), primary_key=True), + Column('uuid', + String(length=36), + nullable=False), + Column('usage_id', + Integer(), + ForeignKey('quota_usages.id'), + nullable=False), + Column('project_id', + String(length=255), + index=True), + Column('resource', + String(length=255)), + Column('delta', Integer(), nullable=False), + Column('expire', DateTime(timezone=False)), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) try: reservations.create() @@ -118,6 +110,22 @@ def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine + if migrate_engine.name == 'mysql': + # NOTE(jsbryant): MySQL Cannot drop the quota_usages table + # until the foreign key reservations_ibfk_1 is removed. We + # remove the foreign key first, and then we drop the table. + table = Table('reservations', meta, autoload=True) + ref_table = Table('reservations', meta, autoload=True) + params = {'columns': [table.c['usage_id']], + 'refcolumns': [ref_table.c['id']], + 'name': 'reservations_ibfk_1'} + + try: + fkey = ForeignKeyConstraint(**params) + fkey.drop() + except Exception: + LOG.error(_("Dropping foreign key reservations_ibfk_1 failed.")) + quota_classes = Table('quota_classes', meta, autoload=True) try: quota_classes.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py b/cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py new file mode 100644 index 0000000000..1974c8cbf1 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py @@ -0,0 +1,76 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Text, Boolean +from sqlalchemy import MetaData, Integer, String, Table, ForeignKey + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # Just for the ForeignKey and column creation to succeed, these are not the + # actual definitions of tables . + # + volumes = Table('volumes', + meta, + Column('id', Integer(), + primary_key=True, nullable=False), + mysql_engine='InnoDB') + snapshots = Table('snapshots', + meta, + Column('id', Integer(), + primary_key=True, nullable=False), + mysql_engine='InnoDB') + # Create new table + volume_glance_metadata = Table( + 'volume_glance_metadata', + meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', String(length=36), ForeignKey('volumes.id')), + Column('snapshot_id', String(length=36), + ForeignKey('snapshots.id')), + Column('key', String(255)), + Column('value', Text), + mysql_engine='InnoDB' + ) + + try: + volume_glance_metadata.create() + except Exception: + LOG.exception(_("Exception while creating table " + "'volume_glance_metadata'")) + meta.drop_all(tables=[volume_glance_metadata]) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volume_glance_metadata = Table('volume_glance_metadata', + meta, autoload=True) + try: + volume_glance_metadata.drop() + except Exception: + LOG.error(_("volume_glance_metadata table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py b/cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py new file mode 100644 index 0000000000..3cd0afb9ca --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py @@ -0,0 +1,153 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from cinder.openstack.common import log as logging +from migrate import ForeignKeyConstraint +from sqlalchemy import Integer, MetaData, String, Table + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Convert volume_type_id to UUID.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + volume_types = Table('volume_types', meta, autoload=True) + extra_specs = Table('volume_type_extra_specs', meta, autoload=True) + + fkey_remove_list = [volumes.c.volume_type_id, + volume_types.c.id, + extra_specs.c.volume_type_id] + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + + try: + fkey.drop() + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise + + volumes.c.volume_type_id.alter(String(36)) + volume_types.c.id.alter(String(36)) + extra_specs.c.volume_type_id.alter(String(36)) + + vtype_list = list(volume_types.select().execute()) + for t in vtype_list: + new_id = str(uuid.uuid4()) + + volumes.update().\ + where(volumes.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + extra_specs.update().\ + where(extra_specs.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + volume_types.update().\ + where(volume_types.c.id == t['id']).\ + values(id=new_id).execute() + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + try: + fkey.create() + LOG.info('Created foreign key %s' % fkey_name) + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise + + +def downgrade(migrate_engine): + """Convert volume_type from UUID back to int.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + volume_types = Table('volume_types', meta, autoload=True) + extra_specs = Table('volume_type_extra_specs', meta, autoload=True) + + fkey_remove_list = [volumes.c.volume_type_id, + volume_types.c.id, + extra_specs.c.volume_type_id] + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + + try: + fkey.drop() + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise + + vtype_list = list(volume_types.select().execute()) + new_id = 1 + + for t in vtype_list: + volumes.update().\ + where(volumes.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + extra_specs.update().\ + where(extra_specs.c.volume_type_id == t['id']).\ + values(volume_type_id=new_id).execute() + + volume_types.update().\ + where(volume_types.c.id == t['id']).\ + values(id=new_id).execute() + + new_id += 1 + + volumes.c.volume_type_id.alter(Integer) + volume_types.c.id.alter(Integer) + extra_specs.c.volume_type_id.alter(Integer) + + for column in fkey_remove_list: + fkeys = list(column.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + fkey = ForeignKeyConstraint(columns=[column], + refcolumns=[volume_types.c.id], + name=fkey_name) + try: + fkey.create() + LOG.info('Created foreign key %s' % fkey_name) + except Exception: + if migrate_engine.url.get_dialect().name.startswith('sqlite'): + pass + else: + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py b/cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py new file mode 100644 index 0000000000..352d640a92 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.openstack.common import log as logging +from sqlalchemy import Column +from sqlalchemy import MetaData, String, Table + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Add source volume id column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + source_volid = Column('source_volid', String(36)) + volumes.create_column(source_volid) + volumes.update().values(source_volid=None).execute() + + +def downgrade(migrate_engine): + """Remove source volume id column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + source_volid = Column('source_volid', String(36)) + volumes.drop_column(source_volid) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql new file mode 100644 index 0000000000..c34f31752f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql @@ -0,0 +1,124 @@ +BEGIN TRANSACTION; + +CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_uuid VARCHAR(36), + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id VARCHAR(36), + volume_type_id VARCHAR(36), + source_volid VARCHAR(36), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) +); + +INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_uuid, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id, + volume_type_id, + source_volid + FROM volumes; + +DROP TABLE volumes; + +CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_uuid VARCHAR(36), + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id VARCHAR(36), + volume_type_id VARCHAR(36), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) +); + +INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_uuid, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id, + volume_type_id + FROM volumes_backup; + +DROP TABLE volumes_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py b/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py new file mode 100644 index 0000000000..9c6aced6b1 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Column +from sqlalchemy import MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + provider_location = Column('provider_location', String(255)) + snapshots.create_column(provider_location) + snapshots.update().values(provider_location=None).execute() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + provider_location = snapshots.columns.provider_location + snapshots.drop_column(provider_location) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py b/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py new file mode 100644 index 0000000000..007a15db05 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import MetaData, Table +from migrate.changeset.constraint import ForeignKeyConstraint + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + + ForeignKeyConstraint( + columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id]).create() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + + ForeignKeyConstraint( + columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id]).drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql new file mode 100644 index 0000000000..d2fe9b6930 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql @@ -0,0 +1,32 @@ +-- As sqlite does not support the DROP FOREIGN KEY, we need to create +-- the table, and move all the data to it. + +BEGIN TRANSACTION; + +CREATE TABLE snapshots_v6 ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + volume_id VARCHAR(36) NOT NULL, + user_id VARCHAR(255), + project_id VARCHAR(255), + status VARCHAR(255), + progress VARCHAR(255), + volume_size INTEGER, + scheduled_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) +); + +INSERT INTO snapshots_v6 SELECT * FROM snapshots; + +DROP TABLE snapshots; + +ALTER TABLE snapshots_v6 RENAME TO snapshots; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py b/cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py new file mode 100644 index 0000000000..a786c61485 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py @@ -0,0 +1,69 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import MetaData, Integer, String, Table + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # New table + backups = Table( + 'backups', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', String(36), primary_key=True, nullable=False), + Column('volume_id', String(36), nullable=False), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('host', String(length=255)), + Column('availability_zone', String(length=255)), + Column('display_name', String(length=255)), + Column('display_description', String(length=255)), + Column('container', String(length=255)), + Column('status', String(length=255)), + Column('fail_reason', String(length=255)), + Column('service_metadata', String(length=255)), + Column('service', String(length=255)), + Column('size', Integer()), + Column('object_count', Integer()), + mysql_engine='InnoDB' + ) + + try: + backups.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(backups)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + backups = Table('backups', meta, autoload=True) + try: + backups.drop() + except Exception: + LOG.error(_("backups table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py new file mode 100644 index 0000000000..4c7f453e15 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import Integer, MetaData, String, Table, ForeignKey + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + + # New table + snapshot_metadata = Table( + 'snapshot_metadata', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('snapshot_id', String(length=36), ForeignKey('snapshots.id'), + nullable=False), + Column('key', String(length=255)), + Column('value', String(length=255)), + mysql_engine='InnoDB' + ) + + try: + snapshot_metadata.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(snapshot_metadata)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + snapshot_metadata = Table('snapshot_metadata', + meta, + autoload=True) + try: + snapshot_metadata.drop() + except Exception: + LOG.error(_("snapshot_metadata table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py new file mode 100644 index 0000000000..dddd9cc4c3 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import MetaData, String, Table, ForeignKey + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + # New table + transfers = Table( + 'transfers', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean), + Column('id', String(36), primary_key=True, nullable=False), + Column('volume_id', String(length=36), ForeignKey('volumes.id'), + nullable=False), + Column('display_name', String(length=255)), + Column('salt', String(length=255)), + Column('crypt_hash', String(length=255)), + Column('expires_at', DateTime(timezone=False)), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + try: + transfers.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(transfers)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + transfers = Table('transfers', + meta, + autoload=True) + try: + transfers.drop() + except Exception: + LOG.error(_("transfers table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/011_add_bootable_column.py b/cinder/db/sqlalchemy/migrate_repo/versions/011_add_bootable_column.py new file mode 100644 index 0000000000..112764e581 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/011_add_bootable_column.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Boolean, Column, MetaData, Table + + +def upgrade(migrate_engine): + """Add bootable column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + bootable = Column('bootable', Boolean) + + volumes.create_column(bootable) + volumes.update().values(bootable=False).execute() + + glance_metadata = Table('volume_glance_metadata', meta, autoload=True) + glance_items = list(glance_metadata.select().execute()) + for item in glance_items: + volumes.update().\ + where(volumes.c.id == item['volume_id']).\ + values(bootable=True).execute() + + +def downgrade(migrate_engine): + """Remove bootable column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + bootable = volumes.columns.bootable + #bootable = Column('bootable', Boolean) + volumes.drop_column(bootable) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/011_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/011_sqlite_downgrade.sql new file mode 100644 index 0000000000..f27f48541a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/011_sqlite_downgrade.sql @@ -0,0 +1,64 @@ +BEGIN TRANSACTION; + +CREATE TABLE volumes_v10 ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + snapshot_id VARCHAR(36), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_uuid VARCHAR(36), + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(255), + provider_auth VARCHAR(255), + volume_type_id VARCHAR(36), + source_volid VARCHAR(36), + PRIMARY KEY (id) +); + +INSERT INTO volumes_v10 + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + snapshot_id, + host, + size, + availability_zone, + instance_uuid, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + volume_type_id, + source_volid + FROM volumes; + +DROP TABLE volumes; +ALTER TABLE volumes_v10 RENAME TO volumes; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/012_add_attach_host_column.py b/cinder/db/sqlalchemy/migrate_repo/versions/012_add_attach_host_column.py new file mode 100644 index 0000000000..58b03bcfc2 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/012_add_attach_host_column.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column +from sqlalchemy import MetaData, String, Table + + +def upgrade(migrate_engine): + """Add attach host column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + attached_host = Column('attached_host', String(255)) + volumes.create_column(attached_host) + volumes.update().values(attached_host=None).execute() + + +def downgrade(migrate_engine): + """Remove attach host column from volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + attached_host = Column('attached_host', String(255)) + volumes.drop_column(attached_host) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_downgrade.sql new file mode 100644 index 0000000000..f3813cc5cf --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_downgrade.sql @@ -0,0 +1,66 @@ +BEGIN TRANSACTION; + +CREATE TABLE volumes_v11 ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + snapshot_id VARCHAR(36), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_uuid VARCHAR(36), + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(255), + provider_auth VARCHAR(255), + volume_type_id VARCHAR(36), + source_volid VARCHAR(36), + bootable BOOLEAN, + PRIMARY KEY (id) +); + +INSERT INTO volumes_v11 + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + snapshot_id, + host, + size, + availability_zone, + instance_uuid, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + volume_type_id, + source_volid, + bootable + FROM volumes; + +DROP TABLE volumes; +ALTER TABLE volumes_v11 RENAME TO volumes; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/013_add_provider_geometry_column.py b/cinder/db/sqlalchemy/migrate_repo/versions/013_add_provider_geometry_column.py new file mode 100644 index 0000000000..afc3b5b35c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/013_add_provider_geometry_column.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column +from sqlalchemy import MetaData, String, Table + + +def upgrade(migrate_engine): + """Add provider_geometry column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + provider_geometry = Column('provider_geometry', String(255)) + volumes.create_column(provider_geometry) + volumes.update().values(provider_geometry=None).execute() + + +def downgrade(migrate_engine): + """Remove provider_geometry column from volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + provider_geometry = Column('provider_geometry', String(255)) + volumes.drop_column(provider_geometry) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql new file mode 100644 index 0000000000..509fe434b4 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql @@ -0,0 +1,68 @@ +BEGIN TRANSACTION; + +CREATE TABLE volumes_v12 ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + snapshot_id VARCHAR(36), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_uuid VARCHAR(36), + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(255), + provider_auth VARCHAR(255), + volume_type_id VARCHAR(36), + source_volid VARCHAR(36), + bootable BOOLEAN, + attached_host VARCHAR(255), + PRIMARY KEY (id) +); + +INSERT INTO volumes_v12 + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + snapshot_id, + host, + size, + availability_zone, + instance_uuid, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + volume_type_id, + source_volid, + bootable, + attached_host + FROM volumes; + +DROP TABLE volumes; +ALTER TABLE volumes_v12 RENAME TO volumes; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/014_add_name_id.py b/cinder/db/sqlalchemy/migrate_repo/versions/014_add_name_id.py new file mode 100644 index 0000000000..4bf8bffff2 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/014_add_name_id.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import String, Column, MetaData, Table + + +def upgrade(migrate_engine): + """Add _name_id column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + _name_id = Column('_name_id', String(36)) + volumes.create_column(_name_id) + volumes.update().values(_name_id=None).execute() + + +def downgrade(migrate_engine): + """Remove _name_id column from volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + _name_id = volumes.columns._name_id + volumes.drop_column(_name_id) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/014_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/014_sqlite_downgrade.sql new file mode 100644 index 0000000000..a8260fa970 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/014_sqlite_downgrade.sql @@ -0,0 +1,70 @@ +BEGIN TRANSACTION; + +CREATE TABLE volumes_v13 ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + snapshot_id VARCHAR(36), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_uuid VARCHAR(36), + attached_host VARCHAR(255), + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(255), + provider_auth VARCHAR(255), + volume_type_id VARCHAR(36), + source_volid VARCHAR(36), + bootable BOOLEAN, + provider_geometry VARCHAR(255), + PRIMARY KEY (id) +); + +INSERT INTO volumes_v13 + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + snapshot_id, + host, + size, + availability_zone, + instance_uuid, + attached_host, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + volume_type_id, + source_volid, + bootable, + provider_geometry + FROM volumes; + +DROP TABLE volumes; +ALTER TABLE volumes_v13 RENAME TO volumes; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py new file mode 100644 index 0000000000..63848efaa2 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +TABLE_NAME = 'migrations' + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + table = Table(TABLE_NAME, meta, autoload=True) + try: + table.drop() + except Exception: + LOG.error(_("migrations table not dropped")) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + table = Table( + TABLE_NAME, meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + + Column('source_compute', String(length=255)), + Column('dest_compute', String(length=255)), + Column('dest_host', String(length=255)), + Column('old_instance_type_id', Integer), + Column('new_instance_type_id', Integer), + Column('instance_uuid', String(length=255), nullable=True), + Column('status', String(length=255)), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + try: + table.create() + except Exception: + LOG.error(_("Table |%s| not created"), repr(table)) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py b/cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py new file mode 100644 index 0000000000..6dfb68ea21 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py @@ -0,0 +1,102 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String, Table + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + sm_backend_config = Table('sm_backend_config', meta, autoload=True) + sm_flavors = Table('sm_flavors', meta, autoload=True) + sm_volume = Table('sm_volume', meta, autoload=True) + + tables = [sm_volume, sm_backend_config, sm_flavors] + + for table in tables: + try: + table.drop() + except Exception: + LOG.exception(_('Exception while dropping table %s.'), + repr(table)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + Table('volumes', meta, autoload=True) + + sm_backend_config = Table( + 'sm_backend_config', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('flavor_id', Integer, ForeignKey('sm_flavors.id'), + nullable=False), + Column('sr_uuid', String(length=255)), + Column('sr_type', String(length=255)), + Column('config_params', String(length=2047)), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + sm_flavors = Table( + 'sm_flavors', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('label', String(length=255)), + Column('description', String(length=255)), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + sm_volume = Table( + 'sm_volume', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', String(length=36), + ForeignKey('volumes.id'), + primary_key=True, + nullable=False), + Column('backend_id', Integer, ForeignKey('sm_backend_config.id'), + nullable=False), + Column('vdi_uuid', String(length=255)), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + tables = [sm_flavors, sm_backend_config, sm_volume] + + for table in tables: + try: + table.create() + except Exception: + LOG.exception(_('Exception while creating table %s.'), + repr(table)) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py b/cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py new file mode 100644 index 0000000000..4558667ff2 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py @@ -0,0 +1,114 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, ForeignKey, MetaData, Table +from sqlalchemy import Boolean, DateTime, Integer, String + +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData(bind=migrate_engine) + + # encryption key UUID -- must be stored per volume + volumes = Table('volumes', meta, autoload=True) + encryption_key = Column('encryption_key_id', String(36)) + try: + volumes.create_column(encryption_key) + except Exception: + LOG.error(_("Column |%s| not created!"), repr(encryption_key)) + raise + + # encryption key UUID and volume type id -- must be stored per snapshot + snapshots = Table('snapshots', meta, autoload=True) + encryption_key = Column('encryption_key_id', String(36)) + try: + snapshots.create_column(encryption_key) + except Exception: + LOG.error(_("Column |%s| not created!"), repr(encryption_key)) + raise + volume_type = Column('volume_type_id', String(36)) + try: + snapshots.create_column(volume_type) + except Exception: + LOG.error(_("Column |%s| not created!"), repr(volume_type)) + raise + + volume_types = Table('volume_types', meta, autoload=True) + + # encryption types associated with particular volume type + encryption = Table( + 'encryption', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('cipher', String(length=255)), + Column('control_location', String(length=255), nullable=False), + Column('key_size', Integer), + Column('provider', String(length=255), nullable=False), + # NOTE(joel-coffman): The volume_type_id must be unique or else the + # referenced volume type becomes ambiguous. That is, specifying the + # volume type is not sufficient to identify a particular encryption + # scheme unless each volume type is associated with at most one + # encryption scheme. + Column('volume_type_id', String(length=36), + ForeignKey(volume_types.c.id), + primary_key=True, nullable=False), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + try: + encryption.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(encryption)) + raise + + +def downgrade(migrate_engine): + meta = MetaData(bind=migrate_engine) + + # drop encryption key UUID for volumes + volumes = Table('volumes', meta, autoload=True) + try: + volumes.c.encryption_key_id.drop() + except Exception: + LOG.error(_("encryption_key_id column not dropped from volumes")) + raise + + # drop encryption key UUID and volume type id for snapshots + snapshots = Table('snapshots', meta, autoload=True) + try: + snapshots.c.encryption_key_id.drop() + except Exception: + LOG.error(_("encryption_key_id column not dropped from snapshots")) + raise + try: + snapshots.c.volume_type_id.drop() + except Exception: + LOG.error(_("volume_type_id column not dropped from snapshots")) + raise + + # drop encryption types table + encryption = Table('encryption', meta, autoload=True) + try: + encryption.drop() + except Exception: + LOG.error(_("encryption table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py b/cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py new file mode 100644 index 0000000000..6957f5da07 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py @@ -0,0 +1,101 @@ +# Copyright (C) 2013 eBay Inc. +# Copyright (C) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import ForeignKey, MetaData, String, Table +from migrate import ForeignKeyConstraint + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Add volume_type_rate_limit table.""" + meta = MetaData() + meta.bind = migrate_engine + + quality_of_service_specs = Table( + 'quality_of_service_specs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', String(36), primary_key=True, nullable=False), + Column('specs_id', String(36), + ForeignKey('quality_of_service_specs.id')), + Column('key', String(255)), + Column('value', String(255)), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + try: + quality_of_service_specs.create() + except Exception: + LOG.error(_("Table quality_of_service_specs not created!")) + raise + + volume_types = Table('volume_types', meta, autoload=True) + qos_specs_id = Column('qos_specs_id', String(36), + ForeignKey('quality_of_service_specs.id')) + + try: + volume_types.create_column(qos_specs_id) + volume_types.update().values(qos_specs_id=None).execute() + except Exception: + LOG.error(_("Added qos_specs_id column to volume type table failed.")) + raise + + +def downgrade(migrate_engine): + """Remove volume_type_rate_limit table.""" + meta = MetaData() + meta.bind = migrate_engine + + qos_specs = Table('quality_of_service_specs', meta, autoload=True) + + if migrate_engine.name == 'mysql': + # NOTE(alanmeadows): MySQL Cannot drop column qos_specs_id + # until the foreign key volumes_types_ibfk_1 is removed. We + # remove the foreign key first, and then we drop the column. + table = Table('volume_types', meta, autoload=True) + ref_table = Table('volume_types', meta, autoload=True) + params = {'columns': [table.c['qos_specs_id']], + 'refcolumns': [ref_table.c['id']], + 'name': 'volume_types_ibfk_1'} + + try: + fkey = ForeignKeyConstraint(**params) + fkey.drop() + except Exception: + LOG.error(_("Dropping foreign key volume_types_ibfk_1 failed")) + + volume_types = Table('volume_types', meta, autoload=True) + qos_specs_id = Column('qos_specs_id', String(36)) + + try: + volume_types.drop_column(qos_specs_id) + except Exception: + LOG.error(_("Dropping qos_specs_id column failed.")) + raise + + try: + qos_specs.drop() + + except Exception: + LOG.error(_("Dropping quality_of_service_specs table failed.")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/019_add_migration_status.py b/cinder/db/sqlalchemy/migrate_repo/versions/019_add_migration_status.py new file mode 100644 index 0000000000..5ae25f3b5a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/019_add_migration_status.py @@ -0,0 +1,36 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import String, Column, MetaData, Table + + +def upgrade(migrate_engine): + """Add migration_status column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + migration_status = Column('migration_status', String(255)) + volumes.create_column(migration_status) + + +def downgrade(migrate_engine): + """Remove migration_status column from volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + migration_status = volumes.columns.migration_status + volumes.drop_column(migration_status) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py new file mode 100644 index 0000000000..f66f09ddee --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import Integer, MetaData, String, Table, ForeignKey + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + _volumes = Table('volumes', meta, autoload=True) + + # New table + volume_admin_metadata = Table( + 'volume_admin_metadata', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Boolean), + Column('id', Integer, primary_key=True, nullable=False), + Column('volume_id', String(length=36), ForeignKey('volumes.id'), + nullable=False), + Column('key', String(length=255)), + Column('value', String(length=255)), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + try: + volume_admin_metadata.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(volume_admin_metadata)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + volume_admin_metadata = Table('volume_admin_metadata', + meta, + autoload=True) + try: + volume_admin_metadata.drop() + except Exception: + LOG.error(_("volume_admin_metadata table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py b/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py new file mode 100644 index 0000000000..5c06e9c039 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py @@ -0,0 +1,85 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from cinder.openstack.common import log as logging +from oslo.config import cfg +from sqlalchemy import MetaData, Table + +# Get default values via config. The defaults will either +# come from the default values set in the quota option +# configuration or via cinder.conf if the user has configured +# default values for quotas there. +CONF = cfg.CONF +CONF.import_opt('quota_volumes', 'cinder.quota') +CONF.import_opt('quota_snapshots', 'cinder.quota') +CONF.import_opt('quota_gigabytes', 'cinder.quota') +LOG = logging.getLogger(__name__) + +CLASS_NAME = 'default' +CREATED_AT = datetime.datetime.now() + + +def upgrade(migrate_engine): + """Add default quota class data into DB.""" + meta = MetaData() + meta.bind = migrate_engine + + quota_classes = Table('quota_classes', meta, autoload=True) + + rows = quota_classes.count().\ + where(quota_classes.c.class_name == 'default').execute().scalar() + + # Do not add entries if there are already 'default' entries. We don't + # want to write over something the user added. + if rows: + LOG.info(_("Found existing 'default' entries in the quota_classes " + "table. Skipping insertion of default values.")) + return + + try: + #Set default volumes + qci = quota_classes.insert() + qci.execute({'created_at': CREATED_AT, + 'class_name': CLASS_NAME, + 'resource': 'volumes', + 'hard_limit': CONF.quota_volumes, + 'deleted': False, }) + #Set default snapshots + qci.execute({'created_at': CREATED_AT, + 'class_name': CLASS_NAME, + 'resource': 'snapshots', + 'hard_limit': CONF.quota_snapshots, + 'deleted': False, }) + #Set default gigabytes + qci.execute({'created_at': CREATED_AT, + 'class_name': CLASS_NAME, + 'resource': 'gigabytes', + 'hard_limit': CONF.quota_gigabytes, + 'deleted': False, }) + LOG.info(_("Added default quota class data into the DB.")) + except Exception: + LOG.error(_("Default quota class data not inserted into the DB.")) + raise + + +def downgrade(migrate_engine): + """Don't delete the 'default' entries at downgrade time. + + We don't know if the user had default entries when we started. + If they did, we wouldn't want to remove them. So, the safest + thing to do is just leave the 'default' entries at downgrade time. + """ + pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/022_add_reason_column_to_service.py b/cinder/db/sqlalchemy/migrate_repo/versions/022_add_reason_column_to_service.py new file mode 100644 index 0000000000..cc0288ee27 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/022_add_reason_column_to_service.py @@ -0,0 +1,30 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + services = Table('services', meta, autoload=True) + reason = Column('disabled_reason', String(255)) + services.create_column(reason) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + services = Table('services', meta, autoload=True) + services.drop_column('disabled_reason') diff --git a/cinder/db/sqlalchemy/migration.py b/cinder/db/sqlalchemy/migration.py index b694682e87..ebfdff8cb1 100644 --- a/cinder/db/sqlalchemy/migration.py +++ b/cinder/db/sqlalchemy/migration.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -16,53 +14,18 @@ # License for the specific language governing permissions and limitations # under the License. -import distutils.version as dist_version -import os - -from cinder.db import migration -from cinder.db.sqlalchemy.session import get_engine -from cinder import exception -from cinder import flags -from cinder.openstack.common import log as logging - - -import sqlalchemy -import migrate -from migrate.versioning import util as migrate_util - - -LOG = logging.getLogger(__name__) - - -@migrate_util.decorator -def patched_with_engine(f, *a, **kw): - url = a[0] - engine = migrate_util.construct_engine(url, **kw) - - try: - kw['engine'] = engine - return f(*a, **kw) - finally: - if isinstance(engine, migrate_util.Engine) and engine is not url: - migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) - engine.dispose() - - -# TODO(jkoelker) When migrate 0.7.3 is released and cinder depends -# on that version or higher, this can be removed -MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') -if (not hasattr(migrate, '__version__') or - dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): - migrate_util.with_engine = patched_with_engine +import os -# NOTE(jkoelker) Delay importing migrate until we are patched from migrate import exceptions as versioning_exceptions from migrate.versioning import api as versioning_api from migrate.versioning.repository import Repository +import sqlalchemy -FLAGS = flags.FLAGS +from cinder.db.sqlalchemy.api import get_engine +from cinder import exception +INIT_VERSION = 000 _REPOSITORY = None @@ -95,12 +58,16 @@ def db_version(): meta.reflect(bind=engine) tables = meta.tables if len(tables) == 0: - db_version_control(migration.INIT_VERSION) + db_version_control(INIT_VERSION) return versioning_api.db_version(get_engine(), repository) else: raise exception.Error(_("Upgrade DB using Essex release first.")) +def db_initial_version(): + return INIT_VERSION + + def db_version_control(version=None): repository = _find_migrate_repo() versioning_api.version_control(get_engine(), repository, version) diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py index ae2a6f51f9..5a29e6852d 100644 --- a/cinder/db/sqlalchemy/models.py +++ b/cinder/db/sqlalchemy/models.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -21,84 +19,40 @@ SQLAlchemy models for cinder data. """ -from sqlalchemy.orm import relationship, backref, object_mapper -from sqlalchemy import Column, Integer, String, schema -from sqlalchemy import ForeignKey, DateTime, Boolean -from sqlalchemy.exc import IntegrityError + +from sqlalchemy import Column, Integer, String, Text, schema from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import ForeignKey, DateTime, Boolean +from sqlalchemy.orm import relationship, backref -from cinder.db.sqlalchemy.session import get_session +from oslo.config import cfg -from cinder import exception -from cinder import flags +from cinder.openstack.common.db.sqlalchemy import models from cinder.openstack.common import timeutils -FLAGS = flags.FLAGS +CONF = cfg.CONF BASE = declarative_base() -class CinderBase(object): +class CinderBase(models.TimestampMixin, + models.ModelBase): """Base class for Cinder Models.""" + __table_args__ = {'mysql_engine': 'InnoDB'} - __table_initialized__ = False - created_at = Column(DateTime, default=timeutils.utcnow) - updated_at = Column(DateTime, onupdate=timeutils.utcnow) + + # TODO(rpodolyaka): reuse models.SoftDeleteMixin in the next stage + # of implementing of BP db-cleanup deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) metadata = None - def save(self, session=None): - """Save this object.""" - if not session: - session = get_session() - session.add(self) - try: - session.flush() - except IntegrityError, e: - if str(e).endswith('is not unique'): - raise exception.Duplicate(str(e)) - else: - raise - def delete(self, session=None): """Delete this object.""" self.deleted = True self.deleted_at = timeutils.utcnow() self.save(session=session) - def __setitem__(self, key, value): - setattr(self, key, value) - - def __getitem__(self, key): - return getattr(self, key) - - def get(self, key, default=None): - return getattr(self, key, default) - - def __iter__(self): - self._i = iter(object_mapper(self).columns) - return self - - def next(self): - n = self._i.next().name - return n, getattr(self, n) - - def update(self, values): - """Make the model object behave like a dict""" - for k, v in values.iteritems(): - setattr(self, k, v) - - def iteritems(self): - """Make the model object behave like a dict. - - Includes attributes from joins.""" - local = dict(self) - joined = dict([(k, v) for k, v in self.__dict__.iteritems() - if not k[0] == '_']) - local.update(joined) - return local.iteritems() - class Service(BASE, CinderBase): """Represents a running service on a host.""" @@ -111,24 +65,26 @@ class Service(BASE, CinderBase): report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) availability_zone = Column(String(255), default='cinder') - - -class CinderNode(BASE, CinderBase): - """Represents a running cinder service on a host.""" - - __tablename__ = 'cinder_nodes' - id = Column(Integer, primary_key=True) - service_id = Column(Integer, ForeignKey('services.id'), nullable=True) + disabled_reason = Column(String(255)) class Volume(BASE, CinderBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' id = Column(String(36), primary_key=True) + _name_id = Column(String(36)) # Don't access/modify this directly! + + @property + def name_id(self): + return self.id if not self._name_id else self._name_id + + @name_id.setter + def name_id(self, value): + self._name_id = value @property def name(self): - return FLAGS.volume_name_template % self.id + return CONF.volume_name_template % self.name_id ec2_id = Column(Integer) user_id = Column(String(255)) @@ -140,10 +96,12 @@ def name(self): size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? instance_uuid = Column(String(36)) + attached_host = Column(String(255)) mountpoint = Column(String(255)) attach_time = Column(String(255)) # TODO(vish): datetime status = Column(String(255)) # TODO(vish): enum? attach_status = Column(String(255)) # TODO(vish): enum + migration_status = Column(String(255)) scheduled_at = Column(DateTime) launched_at = Column(DateTime) @@ -154,12 +112,18 @@ def name(self): provider_location = Column(String(255)) provider_auth = Column(String(255)) + provider_geometry = Column(String(255)) - volume_type_id = Column(Integer) + volume_type_id = Column(String(36)) + source_volid = Column(String(36)) + encryption_key_id = Column(String(36)) + + deleted = Column(Boolean, default=False) + bootable = Column(Boolean, default=False) class VolumeMetadata(BASE, CinderBase): - """Represents a metadata key/value pair for a volume""" + """Represents a metadata key/value pair for a volume.""" __tablename__ = 'volume_metadata' id = Column(Integer, primary_key=True) key = Column(String(255)) @@ -172,12 +136,28 @@ class VolumeMetadata(BASE, CinderBase): 'VolumeMetadata.deleted == False)') +class VolumeAdminMetadata(BASE, CinderBase): + """Represents a administrator metadata key/value pair for a volume.""" + __tablename__ = 'volume_admin_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) + volume = relationship(Volume, backref="volume_admin_metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeAdminMetadata.volume_id == Volume.id,' + 'VolumeAdminMetadata.deleted == False)') + + class VolumeTypes(BASE, CinderBase): - """Represent possible volume_types of volumes offered""" + """Represent possible volume_types of volumes offered.""" __tablename__ = "volume_types" - id = Column(Integer, primary_key=True) + id = Column(String(36), primary_key=True) name = Column(String(255)) - + # A reference to qos_specs entity + qos_specs_id = Column(String(36), + ForeignKey('quality_of_service_specs.id')) volumes = relationship(Volume, backref=backref('volume_type', uselist=False), foreign_keys=id, @@ -187,12 +167,12 @@ class VolumeTypes(BASE, CinderBase): class VolumeTypeExtraSpecs(BASE, CinderBase): - """Represents additional specs as key/value pairs for a volume_type""" + """Represents additional specs as key/value pairs for a volume_type.""" __tablename__ = 'volume_type_extra_specs' id = Column(Integer, primary_key=True) key = Column(String(255)) value = Column(String(255)) - volume_type_id = Column(Integer, + volume_type_id = Column(String(36), ForeignKey('volume_types.id'), nullable=False) volume_type = relationship( @@ -205,6 +185,78 @@ class VolumeTypeExtraSpecs(BASE, CinderBase): ) +class QualityOfServiceSpecs(BASE, CinderBase): + """Represents QoS specs as key/value pairs. + + QoS specs is standalone entity that can be associated/disassociated + with volume types (one to many relation). Adjacency list relationship + pattern is used in this model in order to represent following hierarchical + data with in flat table, e.g, following structure + + qos-specs-1 'Rate-Limit' + | + +------> consumer = 'front-end' + +------> total_bytes_sec = 1048576 + +------> total_iops_sec = 500 + + qos-specs-2 'QoS_Level1' + | + +------> consumer = 'back-end' + +------> max-iops = 1000 + +------> min-iops = 200 + + is represented by: + + id specs_id key value + ------ -------- ------------- ----- + UUID-1 NULL QoSSpec_Name Rate-Limit + UUID-2 UUID-1 consumer front-end + UUID-3 UUID-1 total_bytes_sec 1048576 + UUID-4 UUID-1 total_iops_sec 500 + UUID-5 NULL QoSSpec_Name QoS_Level1 + UUID-6 UUID-5 consumer back-end + UUID-7 UUID-5 max-iops 1000 + UUID-8 UUID-5 min-iops 200 + """ + __tablename__ = 'quality_of_service_specs' + id = Column(String(36), primary_key=True) + specs_id = Column(String(36), ForeignKey(id)) + key = Column(String(255)) + value = Column(String(255)) + + specs = relationship( + "QualityOfServiceSpecs", + cascade="all, delete-orphan", + backref=backref("qos_spec", remote_side=id), + ) + + vol_types = relationship( + VolumeTypes, + backref=backref('qos_specs'), + foreign_keys=id, + primaryjoin='and_(' + 'or_(VolumeTypes.qos_specs_id == ' + 'QualityOfServiceSpecs.id,' + 'VolumeTypes.qos_specs_id == ' + 'QualityOfServiceSpecs.specs_id),' + 'QualityOfServiceSpecs.deleted == False)') + + +class VolumeGlanceMetadata(BASE, CinderBase): + """Glance metadata for a bootable volume.""" + __tablename__ = 'volume_glance_metadata' + id = Column(Integer, primary_key=True, nullable=False) + volume_id = Column(String(36), ForeignKey('volumes.id')) + snapshot_id = Column(String(36), ForeignKey('snapshots.id')) + key = Column(String(255)) + value = Column(Text) + volume = relationship(Volume, backref="volume_glance_metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeGlanceMetadata.volume_id == Volume.id,' + 'VolumeGlanceMetadata.deleted == False)') + + class Quota(BASE, CinderBase): """Represents a single quota override for a project. @@ -275,19 +327,25 @@ class Reservation(BASE, CinderBase): delta = Column(Integer) expire = Column(DateTime, nullable=False) + usage = relationship( + "QuotaUsage", + foreign_keys=usage_id, + primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' + 'QuotaUsage.deleted == 0)') + class Snapshot(BASE, CinderBase): - """Represents a block storage device that can be attached to a VM.""" + """Represents a snapshot of volume.""" __tablename__ = 'snapshots' id = Column(String(36), primary_key=True) @property def name(self): - return FLAGS.snapshot_name_template % self.id + return CONF.snapshot_name_template % self.id @property def volume_name(self): - return FLAGS.volume_name_template % self.volume_id + return self.volume.name # pylint: disable=E1101 user_id = Column(String(255)) project_id = Column(String(255)) @@ -300,9 +358,34 @@ def volume_name(self): display_name = Column(String(255)) display_description = Column(String(255)) + encryption_key_id = Column(String(36)) + volume_type_id = Column(String(36)) + + provider_location = Column(String(255)) + + volume = relationship(Volume, backref="snapshots", + foreign_keys=volume_id, + primaryjoin='Snapshot.volume_id == Volume.id') + + +class SnapshotMetadata(BASE, CinderBase): + """Represents a metadata key/value pair for a snapshot.""" + __tablename__ = 'snapshot_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + snapshot_id = Column(String(36), + ForeignKey('snapshots.id'), + nullable=False) + snapshot = relationship(Snapshot, backref="snapshot_metadata", + foreign_keys=snapshot_id, + primaryjoin='and_(' + 'SnapshotMetadata.snapshot_id == Snapshot.id,' + 'SnapshotMetadata.deleted == False)') + class IscsiTarget(BASE, CinderBase): - """Represents an iscsi target for a given host""" + """Represents an iscsi target for a given host.""" __tablename__ = 'iscsi_targets' __table_args__ = (schema.UniqueConstraint("target_num", "host"), {'mysql_engine': 'InnoDB'}) @@ -317,48 +400,71 @@ class IscsiTarget(BASE, CinderBase): 'IscsiTarget.deleted==False)') -class Migration(BASE, CinderBase): - """Represents a running host-to-host migration.""" - __tablename__ = 'migrations' - id = Column(Integer, primary_key=True, nullable=False) - # NOTE(tr3buchet): the ____compute variables are instance['host'] - source_compute = Column(String(255)) - dest_compute = Column(String(255)) - # NOTE(tr3buchet): dest_host, btw, is an ip address - dest_host = Column(String(255)) - old_instance_type_id = Column(Integer()) - new_instance_type_id = Column(Integer()) - instance_uuid = Column(String(255), - ForeignKey('instances.uuid'), - nullable=True) - #TODO(_cerberus_): enum +class Backup(BASE, CinderBase): + """Represents a backup of a volume to Swift.""" + __tablename__ = 'backups' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return CONF.backup_name_template % self.id + + user_id = Column(String(255), nullable=False) + project_id = Column(String(255), nullable=False) + + volume_id = Column(String(36), nullable=False) + host = Column(String(255)) + availability_zone = Column(String(255)) + display_name = Column(String(255)) + display_description = Column(String(255)) + container = Column(String(255)) status = Column(String(255)) + fail_reason = Column(String(255)) + service_metadata = Column(String(255)) + service = Column(String(255)) + size = Column(Integer) + object_count = Column(Integer) -class SMFlavors(BASE, CinderBase): - """Represents a flavor for SM volumes.""" - __tablename__ = 'sm_flavors' - id = Column(Integer(), primary_key=True) - label = Column(String(255)) - description = Column(String(255)) +class Encryption(BASE, CinderBase): + """Represents encryption requirement for a volume type. + Encryption here is a set of performance characteristics describing + cipher, provider, and key_size for a certain volume type. + """ -class SMBackendConf(BASE, CinderBase): - """Represents the connection to the backend for SM.""" - __tablename__ = 'sm_backend_config' - id = Column(Integer(), primary_key=True) - flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False) - sr_uuid = Column(String(255)) - sr_type = Column(String(255)) - config_params = Column(String(2047)) + __tablename__ = 'encryption' + cipher = Column(String(255)) + key_size = Column(Integer) + provider = Column(String(255)) + control_location = Column(String(255)) + volume_type_id = Column(String(36), + ForeignKey('volume_types.id'), + primary_key=True) + volume_type = relationship( + VolumeTypes, + backref="encryption", + foreign_keys=volume_type_id, + primaryjoin='and_(' + 'Encryption.volume_type_id == VolumeTypes.id,' + 'Encryption.deleted == False)' + ) -class SMVolume(BASE, CinderBase): - __tablename__ = 'sm_volume' - id = Column(String(36), ForeignKey(Volume.id), primary_key=True) - backend_id = Column(Integer, ForeignKey('sm_backend_config.id'), - nullable=False) - vdi_uuid = Column(String(255)) +class Transfer(BASE, CinderBase): + """Represents a volume transfer request.""" + __tablename__ = 'transfers' + id = Column(String(36), primary_key=True) + volume_id = Column(String(36), ForeignKey('volumes.id')) + display_name = Column(String(255)) + salt = Column(String(255)) + crypt_hash = Column(String(255)) + expires_at = Column(DateTime) + volume = relationship(Volume, backref="transfer", + foreign_keys=volume_id, + primaryjoin='and_(' + 'Transfer.volume_id == Volume.id,' + 'Transfer.deleted == False)') def register_models(): @@ -369,16 +475,17 @@ def register_models(): connection is lost and needs to be reestablished. """ from sqlalchemy import create_engine - models = (Migration, + models = (Backup, Service, - SMBackendConf, - SMFlavors, - SMVolume, Volume, VolumeMetadata, + VolumeAdminMetadata, + SnapshotMetadata, + Transfer, VolumeTypeExtraSpecs, VolumeTypes, + VolumeGlanceMetadata, ) - engine = create_engine(FLAGS.sql_connection, echo=False) + engine = create_engine(CONF.database.connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/cinder/db/sqlalchemy/session.py b/cinder/db/sqlalchemy/session.py deleted file mode 100644 index ef94c4b2bc..0000000000 --- a/cinder/db/sqlalchemy/session.py +++ /dev/null @@ -1,151 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Session Handling for SQLAlchemy backend.""" - -import time - -import sqlalchemy.interfaces -import sqlalchemy.orm -from sqlalchemy.exc import DisconnectionError, OperationalError -from sqlalchemy.pool import NullPool, StaticPool - -import cinder.exception -import cinder.flags as flags -from cinder.openstack.common import log as logging - - -FLAGS = flags.FLAGS -LOG = logging.getLogger(__name__) - -_ENGINE = None -_MAKER = None - - -def get_session(autocommit=True, expire_on_commit=False): - """Return a SQLAlchemy session.""" - global _MAKER - - if _MAKER is None: - engine = get_engine() - _MAKER = get_maker(engine, autocommit, expire_on_commit) - - session = _MAKER() - session.query = cinder.exception.wrap_db_error(session.query) - session.flush = cinder.exception.wrap_db_error(session.flush) - return session - - -def synchronous_switch_listener(dbapi_conn, connection_rec): - """Switch sqlite connections to non-synchronous mode""" - dbapi_conn.execute("PRAGMA synchronous = OFF") - - -def ping_listener(dbapi_conn, connection_rec, connection_proxy): - """ - Ensures that MySQL connections checked out of the - pool are alive. - - Borrowed from: - http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f - """ - try: - dbapi_conn.cursor().execute('select 1') - except dbapi_conn.OperationalError, ex: - if ex.args[0] in (2006, 2013, 2014, 2045, 2055): - LOG.warn('Got mysql server has gone away: %s', ex) - raise DisconnectionError("Database server went away") - else: - raise - - -def is_db_connection_error(args): - """Return True if error in connecting to db.""" - # NOTE(adam_g): This is currently MySQL specific and needs to be extended - # to support Postgres and others. - conn_err_codes = ('2002', '2003', '2006') - for err_code in conn_err_codes: - if args.find(err_code) != -1: - return True - return False - - -def get_engine(): - """Return a SQLAlchemy engine.""" - global _ENGINE - if _ENGINE is None: - connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection) - - engine_args = { - "pool_recycle": FLAGS.sql_idle_timeout, - "echo": False, - 'convert_unicode': True, - } - - # Map our SQL debug level to SQLAlchemy's options - if FLAGS.sql_connection_debug >= 100: - engine_args['echo'] = 'debug' - elif FLAGS.sql_connection_debug >= 50: - engine_args['echo'] = True - - if "sqlite" in connection_dict.drivername: - engine_args["poolclass"] = NullPool - - if FLAGS.sql_connection == "sqlite://": - engine_args["poolclass"] = StaticPool - engine_args["connect_args"] = {'check_same_thread': False} - - _ENGINE = sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args) - - if 'mysql' in connection_dict.drivername: - sqlalchemy.event.listen(_ENGINE, 'checkout', ping_listener) - elif "sqlite" in connection_dict.drivername: - if not FLAGS.sqlite_synchronous: - sqlalchemy.event.listen(_ENGINE, 'connect', - synchronous_switch_listener) - - try: - _ENGINE.connect() - except OperationalError, e: - if not is_db_connection_error(e.args[0]): - raise - - remaining = FLAGS.sql_max_retries - if remaining == -1: - remaining = 'infinite' - while True: - msg = _('SQL connection failed. %s attempts left.') - LOG.warn(msg % remaining) - if remaining != 'infinite': - remaining -= 1 - time.sleep(FLAGS.sql_retry_interval) - try: - _ENGINE.connect() - break - except OperationalError, e: - if (remaining != 'infinite' and remaining == 0) or \ - not is_db_connection_error(e.args[0]): - raise - return _ENGINE - - -def get_maker(engine, autocommit=True, expire_on_commit=False): - """Return a SQLAlchemy sessionmaker using the given engine.""" - return sqlalchemy.orm.sessionmaker(bind=engine, - autocommit=autocommit, - expire_on_commit=expire_on_commit) diff --git a/cinder/exception.py b/cinder/exception.py index 045c8a6c2f..dfbb052469 100644 --- a/cinder/exception.py +++ b/cinder/exception.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -24,12 +22,26 @@ """ +import sys + +from oslo.config import cfg import webob.exc +from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import log as logging + LOG = logging.getLogger(__name__) +exc_log_opts = [ + cfg.BoolOpt('fatal_exception_format_errors', + default=False, + help='make exception message format errors fatal'), +] + +CONF = cfg.CONF +CONF.register_opts(exc_log_opts) + class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=0, title="", explanation=""): @@ -39,49 +51,10 @@ def __init__(self, code=0, title="", explanation=""): super(ConvertedException, self).__init__() -class ProcessExecutionError(IOError): - def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, - description=None): - self.exit_code = exit_code - self.stderr = stderr - self.stdout = stdout - self.cmd = cmd - self.description = description - - if description is None: - description = _('Unexpected error while running command.') - if exit_code is None: - exit_code = '-' - message = _('%(description)s\nCommand: %(cmd)s\n' - 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' - 'Stderr: %(stderr)r') % locals() - IOError.__init__(self, message) - - class Error(Exception): pass -class DBError(Error): - """Wraps an implementation specific exception.""" - def __init__(self, inner_exception=None): - self.inner_exception = inner_exception - super(DBError, self).__init__(str(inner_exception)) - - -def wrap_db_error(f): - def _wrap(*args, **kwargs): - try: - return f(*args, **kwargs) - except UnicodeEncodeError: - raise InvalidUnicodeParameter() - except Exception, e: - LOG.exception(_('DB exception wrapped.')) - raise DBError(e) - _wrap.func_name = f.func_name - return _wrap - - class CinderException(Exception): """Base Cinder Exception @@ -108,24 +81,43 @@ def __init__(self, message=None, **kwargs): try: message = self.message % kwargs - except Exception as e: + except Exception: + exc_info = sys.exc_info() # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) + if CONF.fatal_exception_format_errors: + raise exc_info[0], exc_info[1], exc_info[2] # at least get the core message out if something happened message = self.message + # NOTE(luisg): We put the actual message in 'msg' so that we can access + # it, because if we try to access the message via 'message' it will be + # overshadowed by the class' message attribute + self.msg = message super(CinderException, self).__init__(message) + def __unicode__(self): + return unicode(self.msg) -class DeprecatedConfig(CinderException): - message = _("Fatal call to deprecated config") + " %(msg)s" + +class VolumeBackendAPIException(CinderException): + message = _("Bad or unexpected response from the storage volume " + "backend API: %(data)s") + + +class VolumeDriverException(CinderException): + message = _("Volume driver reported an error: %(message)s") + + +class BackupDriverException(CinderException): + message = _("Backup driver reported an error: %(message)s") class GlanceConnectionFailed(CinderException): - message = _("Connection to glance failed") + ": %(reason)s" + message = _("Connection to glance failed: %(reason)s") class NotAuthorized(CinderException): @@ -145,13 +137,22 @@ class ImageNotAuthorized(CinderException): message = _("Not authorized for image %(image_id)s.") +class DriverNotInitialized(CinderException): + message = _("Volume driver not ready.") + + class Invalid(CinderException): message = _("Unacceptable parameters.") code = 400 class InvalidSnapshot(Invalid): - message = _("Invalid snapshot") + ": %(reason)s" + message = _("Invalid snapshot: %(reason)s") + + +class InvalidVolumeAttachMode(Invalid): + message = _("Invalid attaching mode '%(mode)s' for " + "volume %(volume_id)s.") class VolumeAttached(Invalid): @@ -162,37 +163,28 @@ class SfJsonEncodeFailure(CinderException): message = _("Failed to load data into json format") -class InvalidRequest(Invalid): - message = _("The request is invalid.") - - class InvalidResults(Invalid): message = _("The results are invalid.") class InvalidInput(Invalid): - message = _("Invalid input received") + ": %(reason)s" + message = _("Invalid input received: %(reason)s") class InvalidVolumeType(Invalid): - message = _("Invalid volume type") + ": %(reason)s" + message = _("Invalid volume type: %(reason)s") class InvalidVolume(Invalid): - message = _("Invalid volume") + ": %(reason)s" - - -class InvalidPortRange(Invalid): - message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") + message = _("Invalid volume: %(reason)s") class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") -class InvalidUnicodeParameter(Invalid): - message = _("Invalid Parameter: " - "Unicode is not supported by the current database.") +class InvalidHost(Invalid): + message = _("Invalid host: %(reason)s") # Cannot be templated as the error syntax varies. @@ -201,6 +193,15 @@ class InvalidParameterValue(Invalid): message = _("%(err)s") +class InvalidAuthKey(Invalid): + message = _("Invalid auth key: %(reason)s") + + +class InvalidConfigurationValue(Invalid): + message = _('Value "%(value)s" is not valid for ' + 'configuration option "%(option)s"') + + class ServiceUnavailable(Invalid): message = _("Service is unavailable at this time.") @@ -209,8 +210,12 @@ class ImageUnacceptable(Invalid): message = _("Image %(image_id)s is unacceptable: %(reason)s") +class DeviceUnavailable(Invalid): + message = _("The device in the path %(path)s is unavailable: %(reason)s") + + class InvalidUUID(Invalid): - message = _("Expected a uuid but received %(uuid).") + message = _("Expected a uuid but received %(uuid)s.") class NotFound(CinderException): @@ -219,25 +224,30 @@ class NotFound(CinderException): safe = True -class PersistentVolumeFileNotFound(NotFound): - message = _("Volume %(volume_id)s persistence file could not be found.") - - class VolumeNotFound(NotFound): message = _("Volume %(volume_id)s could not be found.") -class SfAccountNotFound(NotFound): - message = _("Unable to locate account %(account_name)s on " - "Solidfire device") +class VolumeMetadataNotFound(NotFound): + message = _("Volume %(volume_id)s has no metadata with " + "key %(metadata_key)s.") -class VolumeNotFoundForInstance(VolumeNotFound): - message = _("Volume not found for instance %(instance_id)s.") +class VolumeAdminMetadataNotFound(NotFound): + message = _("Volume %(volume_id)s has no administration metadata with " + "key %(metadata_key)s.") -class VolumeMetadataNotFound(NotFound): - message = _("Volume %(volume_id)s has no metadata with " +class InvalidVolumeMetadata(Invalid): + message = _("Invalid metadata: %(reason)s") + + +class InvalidVolumeMetadataSize(Invalid): + message = _("Invalid metadata size: %(reason)s") + + +class SnapshotMetadataNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s has no metadata with " "key %(metadata_key)s.") @@ -255,6 +265,11 @@ class VolumeTypeExtraSpecsNotFound(NotFound): "key %(extra_specs_key)s.") +class VolumeTypeInUse(CinderException): + message = _("Volume Type %(volume_type_id)s deletion is not allowed with " + "volumes present with the type.") + + class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") @@ -272,18 +287,6 @@ class ISCSITargetNotFoundForVolume(NotFound): message = _("No target id found for volume %(volume_id)s.") -class ISCSITargetCreateFailed(CinderException): - message = _("Failed to create iscsi target for volume %(volume_id)s.") - - -class ISCSITargetRemoveFailed(CinderException): - message = _("Failed to remove iscsi target for volume %(volume_id)s.") - - -class DiskNotFound(NotFound): - message = _("No disk at %(location)s") - - class InvalidImageRef(Invalid): message = _("Invalid image href %(image_href)s.") @@ -300,6 +303,14 @@ class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") +class SchedulerHostFilterNotFound(NotFound): + message = _("Scheduler Host Filter %(filter_name)s could not be found.") + + +class SchedulerHostWeigherNotFound(NotFound): + message = _("Scheduler Host Weigher %(weigher_name)s could not be found.") + + class HostBinaryNotFound(NotFound): message = _("Could not find binary %(binary)s on host %(host)s.") @@ -341,42 +352,21 @@ class OverQuota(CinderException): message = _("Quota exceeded for resources: %(overs)s") -class MigrationNotFound(NotFound): - message = _("Migration %(migration_id)s could not be found.") - - -class MigrationNotFoundByStatus(MigrationNotFound): - message = _("Migration not found for instance %(instance_id)s " - "with status %(status)s.") - - class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") -class ClassNotFound(NotFound): - message = _("Class %(class_name)s could not be found: %(exception)s") - - -class NotAllowed(CinderException): - message = _("Action not allowed.") - - #TODO(bcwaldon): EOL this exception! class Duplicate(CinderException): pass -class KeyPairExists(Duplicate): - message = _("Key pair %(key_name)s already exists.") - - class VolumeTypeExists(Duplicate): - message = _("Volume Type %(name)s already exists.") + message = _("Volume Type %(id)s already exists.") -class MigrationError(CinderException): - message = _("Migration error") + ": %(reason)s" +class VolumeTypeEncryptionExists(Invalid): + message = _("Volume type encryption for type %(type_id)s already exists.") class MalformedRequestBody(CinderException): @@ -387,6 +377,10 @@ class ConfigNotFound(NotFound): message = _("Could not find config at %(path)s") +class ParameterNotFound(NotFound): + message = _("Could not find parameter %(param)s") + + class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s") @@ -395,29 +389,36 @@ class NoValidHost(CinderException): message = _("No valid host was found. %(reason)s") +class NoMoreTargets(CinderException): + """No more available targets.""" + pass + + class WillNotSchedule(CinderException): message = _("Host %(host)s is not up or doesn't exist.") class QuotaError(CinderException): - message = _("Quota exceeded") + ": code=%(code)s" + message = _("Quota exceeded: code=%(code)s") code = 413 headers = {'Retry-After': 0} safe = True class VolumeSizeExceedsAvailableQuota(QuotaError): - message = _("Requested volume exceeds allowed volume size quota") - - -class VolumeSizeExceedsQuota(QuotaError): - message = _("Maximum volume size exceeded") + message = _("Requested volume or snapshot exceeds allowed Gigabytes " + "quota. Requested %(requested)sG, quota is %(quota)sG and " + "%(consumed)sG has been consumed.") class VolumeLimitExceeded(QuotaError): message = _("Maximum number of volumes allowed (%(allowed)d) exceeded") +class SnapshotLimitExceeded(QuotaError): + message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded") + + class DuplicateSfVolumeNames(Duplicate): message = _("Detected more than one volume with name %(vol_name)s") @@ -427,66 +428,235 @@ class VolumeTypeCreateFailed(CinderException): "name %(name)s and specs %(extra_specs)s") -class SolidFireAPIException(CinderException): - message = _("Bad response from SolidFire API") +class UnknownCmd(VolumeDriverException): + message = _("Unknown or unsupported command %(cmd)s") -class SolidFireAPIDataException(SolidFireAPIException): - message = _("Error in SolidFire API response: data=%(data)s") +class MalformedResponse(VolumeDriverException): + message = _("Malformed response to command %(cmd)s: %(reason)s") -class UnknownCmd(Invalid): - message = _("Unknown or unsupported command %(cmd)s") +class FailedCmdWithDump(VolumeDriverException): + message = _("Operation failed with status=%(status)s. Full dump: %(data)s") -class MalformedResponse(Invalid): - message = _("Malformed response to command %(cmd)s: %(reason)s") +class GlanceMetadataExists(Invalid): + message = _("Glance metadata cannot be updated, key %(key)s" + " exists for volume id %(volume_id)s") -class BadHTTPResponseStatus(CinderException): - message = _("Bad HTTP response status %(status)s") +class GlanceMetadataNotFound(NotFound): + message = _("Glance metadata for volume/snapshot %(id)s cannot be found.") -class FailedCmdWithDump(CinderException): - message = _("Operation failed with status=%(status)s. Full dump: %(data)s") +class ExportFailure(Invalid): + message = _("Failed to export for volume: %(reason)s") -class ZadaraServerCreateFailure(CinderException): +class MetadataCreateFailure(Invalid): + message = _("Failed to create metadata for volume: %(reason)s") + + +class MetadataUpdateFailure(Invalid): + message = _("Failed to update metadata for volume: %(reason)s") + + +class MetadataCopyFailure(Invalid): + message = _("Failed to copy metadata to volume: %(reason)s") + + +class ImageCopyFailure(Invalid): + message = _("Failed to copy image to volume: %(reason)s") + + +class BackupInvalidCephArgs(BackupDriverException): + message = _("Invalid Ceph args provided for backup rbd operation") + + +class BackupOperationError(Invalid): + message = _("An error has occurred during backup operation") + + +class BackupRBDOperationFailed(BackupDriverException): + message = _("Backup RBD operation failed") + + +class BackupNotFound(NotFound): + message = _("Backup %(backup_id)s could not be found.") + + +class BackupFailedToGetVolumeBackend(NotFound): + message = _("Failed to identify volume backend.") + + +class InvalidBackup(Invalid): + message = _("Invalid backup: %(reason)s") + + +class SwiftConnectionFailed(BackupDriverException): + message = _("Connection to swift failed: %(reason)s") + + +class TransferNotFound(NotFound): + message = _("Transfer %(transfer_id)s could not be found.") + + +class VolumeMigrationFailed(CinderException): + message = _("Volume migration failed: %(reason)s") + + +class SSHInjectionThreat(CinderException): + message = _("SSH command injection detected: %(command)s") + + +class QoSSpecsExists(Duplicate): + message = _("QoS Specs %(specs_id)s already exists.") + + +class QoSSpecsCreateFailed(CinderException): + message = _("Failed to create qos_specs: " + "%(name)s with specs %(qos_specs)s.") + + +class QoSSpecsUpdateFailed(CinderException): + message = _("Failed to update qos_specs: " + "%(specs_id)s with specs %(qos_specs)s.") + + +class QoSSpecsNotFound(NotFound): + message = _("No such QoS spec %(specs_id)s.") + + +class QoSSpecsAssociateFailed(CinderException): + message = _("Failed to associate qos_specs: " + "%(specs_id)s with type %(type_id)s.") + + +class QoSSpecsDisassociateFailed(CinderException): + message = _("Failed to disassociate qos_specs: " + "%(specs_id)s with type %(type_id)s.") + + +class QoSSpecsKeyNotFound(NotFound): + message = _("QoS spec %(specs_id)s has no spec with " + "key %(specs_key)s.") + + +class InvalidQoSSpecs(Invalid): + message = _("Invalid qos specs: %(reason)s") + + +class QoSSpecsInUse(CinderException): + message = _("QoS Specs %(specs_id)s is still associated with entities.") + + +class KeyManagerError(CinderException): + msg_fmt = _("key manager error: %(reason)s") + + +# Driver specific exceptions +# Coraid +class CoraidException(VolumeDriverException): + message = _('Coraid Cinder Driver exception.') + + +class CoraidJsonEncodeFailure(CoraidException): + message = _('Failed to encode json data.') + + +class CoraidESMBadCredentials(CoraidException): + message = _('Login on ESM failed.') + + +class CoraidESMReloginFailed(CoraidException): + message = _('Relogin on ESM failed.') + + +class CoraidESMBadGroup(CoraidException): + message = _('Group with name "%(group_name)s" not found.') + + +class CoraidESMConfigureError(CoraidException): + message = _('ESM configure request failed: %(message)s.') + + +class CoraidESMNotAvailable(CoraidException): + message = _('Coraid ESM not available with reason: %(reason)s.') + + +# Zadara +class ZadaraException(VolumeDriverException): + message = _('Zadara Cinder Driver exception.') + + +class ZadaraServerCreateFailure(ZadaraException): message = _("Unable to create server object for initiator %(name)s") -class ZadaraServerNotFound(NotFound): +class ZadaraServerNotFound(ZadaraException): message = _("Unable to find server object for initiator %(name)s") -class ZadaraVPSANoActiveController(CinderException): +class ZadaraVPSANoActiveController(ZadaraException): message = _("Unable to find any active VPSA controller") -class ZadaraAttachmentsNotFound(NotFound): +class ZadaraAttachmentsNotFound(ZadaraException): message = _("Failed to retrieve attachments for volume %(name)s") -class ZadaraInvalidAttachmentInfo(Invalid): +class ZadaraInvalidAttachmentInfo(ZadaraException): message = _("Invalid attachment info for volume %(name)s: %(reason)s") -class InstanceNotFound(NotFound): - message = _("Instance %(instance_id)s could not be found.") +class BadHTTPResponseStatus(ZadaraException): + message = _("Bad HTTP response status %(status)s") -class VolumeBackendAPIException(CinderException): - message = _("Bad or unexpected response from the storage volume " - "backend API: %(data)s") +#SolidFire +class SolidFireAPIException(VolumeBackendAPIException): + message = _("Bad response from SolidFire API") + + +class SolidFireDriverException(VolumeDriverException): + message = _("SolidFire Cinder Driver exception") + + +class SolidFireAPIDataException(SolidFireAPIException): + message = _("Error in SolidFire API response: data=%(data)s") + + +class SolidFireAccountNotFound(SolidFireDriverException): + message = _("Unable to locate account %(account_name)s on " + "Solidfire device") + +# HP 3Par +class Invalid3PARDomain(VolumeDriverException): + message = _("Invalid 3PAR Domain: %(err)s") -class NfsException(CinderException): + +# NFS driver +class NfsException(VolumeDriverException): message = _("Unknown NFS exception") -class NfsNoSharesMounted(NotFound): +class NfsNoSharesMounted(VolumeDriverException): message = _("No mounted NFS shares found") -class NfsNoSuitableShareFound(NotFound): +class NfsNoSuitableShareFound(VolumeDriverException): + message = _("There is no share which can host %(volume_size)sG") + + +# Gluster driver +class GlusterfsException(VolumeDriverException): + message = _("Unknown Gluster exception") + + +class GlusterfsNoSharesMounted(VolumeDriverException): + message = _("No mounted Gluster shares found") + + +class GlusterfsNoSuitableShareFound(VolumeDriverException): message = _("There is no share which can host %(volume_size)sG") diff --git a/cinder/flow_utils.py b/cinder/flow_utils.py new file mode 100644 index 0000000000..05a89c32e1 --- /dev/null +++ b/cinder/flow_utils.py @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# For more information please visit: https://wiki.openstack.org/wiki/TaskFlow +from taskflow import task + + +def _make_task_name(cls, addons=None): + """Makes a pretty name for a task class.""" + base_name = ".".join([cls.__module__, cls.__name__]) + extra = '' + if addons: + extra = ';%s' % (", ".join([str(a) for a in addons])) + return base_name + extra + + +class CinderTask(task.Task): + """The root task class for all cinder tasks. + + It automatically names the given task using the module and class that + implement the given task as the task name. + """ + + def __init__(self, addons=None, **kwargs): + super(CinderTask, self).__init__(_make_task_name(self.__class__, + addons), + **kwargs) diff --git a/cinder/image/__init__.py b/cinder/image/__init__.py index 11af4932e9..e69de29bb2 100644 --- a/cinder/image/__init__.py +++ b/cinder/image/__init__.py @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 OpenStack, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/cinder/image/glance.py b/cinder/image/glance.py index c81be9e404..dc38e29a4e 100644 --- a/cinder/image/glance.py +++ b/cinder/image/glance.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. +# Copyright 2010 OpenStack Foundation +# Copyright 2013 NTT corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,64 +16,89 @@ """Implementation of an image service that uses Glance as the backend""" + from __future__ import absolute_import import copy import itertools import random +import shutil import sys import time import urlparse -import glanceclient import glanceclient.exc +from oslo.config import cfg from cinder import exception -from cinder import flags from cinder.openstack.common import jsonutils from cinder.openstack.common import log as logging from cinder.openstack.common import timeutils +glance_opts = [ + cfg.ListOpt('allowed_direct_url_schemes', + default=[], + help='A list of url schemes that can be downloaded directly ' + 'via the direct_url. Currently supported schemes: ' + '[file].'), +] +CONF = cfg.CONF +CONF.register_opts(glance_opts) LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS def _parse_image_ref(image_href): """Parse an image href into composite parts. :param image_href: href of an image - :returns: a tuple of the form (image_id, host, port) + :returns: a tuple of the form (image_id, netloc, use_ssl) :raises ValueError """ - o = urlparse.urlparse(image_href) - port = o.port or 80 - host = o.netloc.split(':', 1)[0] - image_id = o.path.split('/')[-1] - return (image_id, host, port) - - -def _create_glance_client(context, host, port, version=1): - """Instantiate a new glanceclient.Client object""" + url = urlparse.urlparse(image_href) + netloc = url.netloc + image_id = url.path.split('/')[-1] + use_ssl = (url.scheme == 'https') + return (image_id, netloc, use_ssl) + + +def _create_glance_client(context, netloc, use_ssl, + version=CONF.glance_api_version): + """Instantiate a new glanceclient.Client object.""" + if version is None: + version = CONF.glance_api_version params = {} - if FLAGS.auth_strategy == 'keystone': + if use_ssl: + scheme = 'https' + # https specific params + params['insecure'] = CONF.glance_api_insecure + params['ssl_compression'] = CONF.glance_api_ssl_compression + else: + scheme = 'http' + if CONF.auth_strategy == 'keystone': params['token'] = context.auth_token - endpoint = 'http://%s:%s' % (host, port) - + if CONF.glance_request_timeout is not None: + params['timeout'] = CONF.glance_request_timeout + endpoint = '%s://%s' % (scheme, netloc) return glanceclient.Client(str(version), endpoint, **params) def get_api_servers(): - """ - Shuffle a list of FLAGS.glance_api_servers and return an iterator + """Return Iterable over shuffled api servers. + + Shuffle a list of CONF.glance_api_servers and return an iterator that will cycle through the list, looping around to the beginning if necessary. """ api_servers = [] - for api_server in FLAGS.glance_api_servers: - host, port_str = api_server.split(':') - api_servers.append((host, int(port_str))) + for api_server in CONF.glance_api_servers: + if '//' not in api_server: + api_server = 'http://' + api_server + url = urlparse.urlparse(api_server) + netloc = url.netloc + use_ssl = (url.scheme == 'https') + api_servers.append((netloc, use_ssl)) random.shuffle(api_servers) return itertools.cycle(api_servers) @@ -82,38 +106,49 @@ def get_api_servers(): class GlanceClientWrapper(object): """Glance client wrapper class that implements retries.""" - def __init__(self, context=None, host=None, port=None, version=None): - if host is not None: - self.client = self._create_static_client(context, host, port, - version) + def __init__(self, context=None, netloc=None, use_ssl=False, + version=None): + if netloc is not None: + self.client = self._create_static_client(context, + netloc, + use_ssl, version) else: self.client = None self.api_servers = None + self.version = version - def _create_static_client(self, context, host, port, version): + def _create_static_client(self, context, netloc, use_ssl, version): """Create a client that we'll use for every call.""" - self.host = host - self.port = port + self.netloc = netloc + self.use_ssl = use_ssl self.version = version - return _create_glance_client(context, self.host, self.port, - self.version) + return _create_glance_client(context, + self.netloc, + self.use_ssl, self.version) def _create_onetime_client(self, context, version): """Create a client that will be used for one call.""" if self.api_servers is None: self.api_servers = get_api_servers() - self.host, self.port = self.api_servers.next() - return _create_glance_client(context, self.host, self.port, version) + self.netloc, self.use_ssl = self.api_servers.next() + return _create_glance_client(context, + self.netloc, + self.use_ssl, version) - def call(self, context, version, method, *args, **kwargs): - """ - Call a glance client method. If we get a connection error, - retry the request according to FLAGS.glance_num_retries. + def call(self, context, method, *args, **kwargs): + """Call a glance client method. + + If we get a connection error, + retry the request according to CONF.glance_num_retries. """ + version = self.version + if version in kwargs: + version = kwargs['version'] + retry_excs = (glanceclient.exc.ServiceUnavailable, - glanceclient.exc.InvalidEndpoint, - glanceclient.exc.CommunicationError) - num_attempts = 1 + FLAGS.glance_num_retries + glanceclient.exc.InvalidEndpoint, + glanceclient.exc.CommunicationError) + num_attempts = 1 + CONF.glance_num_retries for attempt in xrange(1, num_attempts + 1): client = self.client or self._create_onetime_client(context, @@ -121,17 +156,25 @@ def call(self, context, version, method, *args, **kwargs): try: return getattr(client.images, method)(*args, **kwargs) except retry_excs as e: - host = self.host - port = self.port + netloc = self.netloc extra = "retrying" error_msg = _("Error contacting glance server " - "'%(host)s:%(port)s' for '%(method)s', %(extra)s.") + "'%(netloc)s' for '%(method)s', " + "%(extra)s.") % {'netloc': netloc, + 'method': method, + 'extra': extra, + } if attempt == num_attempts: extra = 'done trying' - LOG.exception(error_msg, locals()) - raise exception.GlanceConnectionFailed( - host=host, port=port, reason=str(e)) - LOG.exception(error_msg, locals()) + error_msg = _("Error contacting glance server " + "'%(netloc)s' for '%(method)s', " + "%(extra)s.") % {'netloc': netloc, + 'method': method, + 'extra': extra, + } + LOG.exception(error_msg) + raise exception.GlanceConnectionFailed(reason=str(e)) + LOG.exception(error_msg) time.sleep(1) @@ -145,7 +188,7 @@ def detail(self, context, **kwargs): """Calls out to Glance for a list of detailed image information.""" params = self._extract_query_params(kwargs) try: - images = self._client.call(context, 1, 'list', **params) + images = self._client.call(context, 'list', **params) except Exception: _reraise_translated_exception() @@ -165,16 +208,16 @@ def _extract_query_params(self, params): _params[param] = params.get(param) # ensure filters is a dict - params.setdefault('filters', {}) + _params.setdefault('filters', {}) # NOTE(vish): don't filter out private images - params['filters'].setdefault('is_public', 'none') + _params['filters'].setdefault('is_public', 'none') return _params def show(self, context, image_id): """Returns a dict with image data for the given opaque image id.""" try: - image = self._client.call(context, 1, 'get', image_id) + image = self._client.call(context, 'get', image_id) except Exception: _reraise_translated_image_exception(image_id) @@ -186,27 +229,50 @@ def show(self, context, image_id): def get_location(self, context, image_id): """Returns the direct url representing the backend storage location, - or None if this attribute is not shown by Glance.""" + or None if this attribute is not shown by Glance. + """ + if CONF.glance_api_version == 1: + # image location not available in v1 + return (None, None) try: - client = GlanceClientWrapper() - image_meta = client.call(context, 2, 'get', image_id) + # direct_url is returned by v2 api + client = GlanceClientWrapper(version=2) + image_meta = client.call(context, 'get', image_id) except Exception: _reraise_translated_image_exception(image_id) if not self._is_image_available(context, image_meta): raise exception.ImageNotFound(image_id=image_id) - return getattr(image_meta, 'direct_url', None) + # some glance stores like nfs only meta data + # is stored and returned as locations. + # so composite of two needs to be returned. + return (getattr(image_meta, 'direct_url', None), + getattr(image_meta, 'locations', None)) + + def download(self, context, image_id, data=None): + """Calls out to Glance for data and writes data.""" + if 'file' in CONF.allowed_direct_url_schemes: + location = self.get_location(context, image_id) + o = urlparse.urlparse(location) + if o.scheme == "file": + with open(o.path, "r") as f: + # a system call to cp could have significant performance + # advantages, however we do not have the path to files at + # this point in the abstraction. + shutil.copyfileobj(f, data) + return - def download(self, context, image_id, data): - """Calls out to Glance for metadata and data and writes data.""" try: - image_chunks = self._client.call(context, 1, 'data', image_id) + image_chunks = self._client.call(context, 'data', image_id) except Exception: _reraise_translated_image_exception(image_id) - for chunk in image_chunks: - data.write(chunk) + if not data: + return image_chunks + else: + for chunk in image_chunks: + data.write(chunk) def create(self, context, image_meta, data=None): """Store the image data and return the new image object.""" @@ -215,24 +281,31 @@ def create(self, context, image_meta, data=None): if data: sent_service_image_meta['data'] = data - recv_service_image_meta = self._client.call(context, 1, 'create', + recv_service_image_meta = self._client.call(context, 'create', **sent_service_image_meta) return self._translate_from_glance(recv_service_image_meta) - def update(self, context, image_id, image_meta, data=None, - purge_props=True): + def update(self, context, image_id, + image_meta, data=None, purge_props=True): """Modify the given image with the new data.""" image_meta = self._translate_to_glance(image_meta) - image_meta['purge_props'] = purge_props + #NOTE(dosaboy): see comment in bug 1210467 + if CONF.glance_api_version == 1: + image_meta['purge_props'] = purge_props #NOTE(bcwaldon): id is not an editable field, but it is likely to be # passed in by calling code. Let's be nice and ignore it. image_meta.pop('id', None) if data: image_meta['data'] = data try: - image_meta = self._client.call(context, 1, 'update', - image_id, **image_meta) + #NOTE(dosaboy): the v2 api separates update from upload + if data and CONF.glance_api_version > 1: + image_meta = self._client.call(context, 'upload', image_id, + image_meta['data']) + else: + image_meta = self._client.call(context, 'update', image_id, + **image_meta) except Exception: _reraise_translated_image_exception(image_id) else: @@ -246,7 +319,7 @@ def delete(self, context, image_id): """ try: - self._client.call(context, 1, 'delete', image_id) + self._client.call(context, 'delete', image_id) except glanceclient.exc.NotFound: raise exception.ImageNotFound(image_id=image_id) return True @@ -378,7 +451,7 @@ def _reraise_translated_exception(): def _translate_image_exception(image_id, exc_value): if isinstance(exc_value, (glanceclient.exc.Forbidden, - glanceclient.exc.Unauthorized)): + glanceclient.exc.Unauthorized)): return exception.ImageNotAuthorized(image_id=image_id) if isinstance(exc_value, glanceclient.exc.NotFound): return exception.ImageNotFound(image_id=image_id) @@ -389,7 +462,7 @@ def _translate_image_exception(image_id, exc_value): def _translate_plain_exception(exc_value): if isinstance(exc_value, (glanceclient.exc.Forbidden, - glanceclient.exc.Unauthorized)): + glanceclient.exc.Unauthorized)): return exception.NotAuthorized(exc_value) if isinstance(exc_value, glanceclient.exc.NotFound): return exception.NotFound(exc_value) @@ -417,9 +490,10 @@ def get_remote_image_service(context, image_href): return image_service, image_href try: - (image_id, glance_host, glance_port) = _parse_image_ref(image_href) + (image_id, glance_netloc, use_ssl) = _parse_image_ref(image_href) glance_client = GlanceClientWrapper(context=context, - host=glance_host, port=glance_port) + netloc=glance_netloc, + use_ssl=use_ssl) except ValueError: raise exception.InvalidImageRef(image_href=image_href) diff --git a/cinder/image/image_utils.py b/cinder/image/image_utils.py new file mode 100644 index 0000000000..e73e08e4bd --- /dev/null +++ b/cinder/image/image_utils.py @@ -0,0 +1,369 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods to deal with images. + +This is essentially a copy from nova.virt.images.py +Some slight modifications, but at some point +we should look at maybe pushing this up to Oslo +""" + + +import contextlib +import os +import tempfile + +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common import fileutils +from cinder.openstack.common import imageutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import units +from cinder import utils +from cinder.volume import utils as volume_utils + +LOG = logging.getLogger(__name__) + +image_helper_opt = [cfg.StrOpt('image_conversion_dir', + default='$state_path/conversion', + help='Directory used for temporary storage ' + 'during image conversion'), ] + +CONF = cfg.CONF +CONF.register_opts(image_helper_opt) + + +def qemu_img_info(path): + """Return a object containing the parsed output from qemu-img info.""" + cmd = ('env', 'LC_ALL=C', 'qemu-img', 'info', path) + if os.name == 'nt': + cmd = cmd[2:] + out, err = utils.execute(*cmd, run_as_root=True) + return imageutils.QemuImgInfo(out) + + +def convert_image(source, dest, out_format): + """Convert image to other format.""" + cmd = ('qemu-img', 'convert', '-O', out_format, source, dest) + utils.execute(*cmd, run_as_root=True) + + +def resize_image(source, size): + """Changes the virtual size of the image.""" + cmd = ('qemu-img', 'resize', source, '%sG' % size) + utils.execute(*cmd, run_as_root=False) + + +def fetch(context, image_service, image_id, path, _user_id, _project_id): + # TODO(vish): Improve context handling and add owner and auth data + # when it is added to glance. Right now there is no + # auth checking in glance, so we assume that access was + # checked before we got here. + with fileutils.remove_path_on_error(path): + with open(path, "wb") as image_file: + image_service.download(context, image_id, image_file) + + +def fetch_verify_image(context, image_service, image_id, dest, + user_id=None, project_id=None, size=None): + fetch(context, image_service, image_id, dest, + None, None) + + with fileutils.remove_path_on_error(dest): + data = qemu_img_info(dest) + fmt = data.file_format + if fmt is None: + raise exception.ImageUnacceptable( + reason=_("'qemu-img info' parsing failed."), + image_id=image_id) + + backing_file = data.backing_file + if backing_file is not None: + raise exception.ImageUnacceptable( + image_id=image_id, + reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % + {'fmt': fmt, 'backing_file': backing_file})) + + # NOTE(xqueralt): If the image virtual size doesn't fit in the + # requested volume there is no point on resizing it because it will + # generate an unusable image. + if size is not None and data.virtual_size > size: + params = {'image_size': data.virtual_size, 'volume_size': size} + reason = _("Size is %(image_size)dGB and doesn't fit in a " + "volume of size %(volume_size)dGB.") % params + raise exception.ImageUnacceptable(image_id=image_id, reason=reason) + + +def fetch_to_vhd(context, image_service, + image_id, dest, blocksize, + user_id=None, project_id=None): + fetch_to_volume_format(context, image_service, image_id, dest, 'vpc', + blocksize, user_id, project_id) + + +def fetch_to_raw(context, image_service, + image_id, dest, blocksize, + user_id=None, project_id=None, size=None): + fetch_to_volume_format(context, image_service, image_id, dest, 'raw', + blocksize, user_id, project_id, size) + + +def fetch_to_volume_format(context, image_service, + image_id, dest, volume_format, blocksize, + user_id=None, project_id=None, size=None): + if (CONF.image_conversion_dir and not + os.path.exists(CONF.image_conversion_dir)): + os.makedirs(CONF.image_conversion_dir) + + qemu_img = True + image_meta = image_service.show(context, image_id) + + # NOTE(avishay): I'm not crazy about creating temp files which may be + # large and cause disk full errors which would confuse users. + # Unfortunately it seems that you can't pipe to 'qemu-img convert' because + # it seeks. Maybe we can think of something for a future version. + with temporary_file() as tmp: + # We may be on a system that doesn't have qemu-img installed. That + # is ok if we are working with a RAW image. This logic checks to see + # if qemu-img is installed. If not we make sure the image is RAW and + # throw an exception if not. Otherwise we stop before needing + # qemu-img. Systems with qemu-img will always progress through the + # whole function. + try: + # Use the empty tmp file to make sure qemu_img_info works. + qemu_img_info(tmp) + except processutils.ProcessExecutionError: + qemu_img = False + if image_meta: + if image_meta['disk_format'] != 'raw': + raise exception.ImageUnacceptable( + reason=_("qemu-img is not installed and image is of " + "type %s. Only RAW images can be used if " + "qemu-img is not installed.") % + image_meta['disk_format'], + image_id=image_id) + else: + raise exception.ImageUnacceptable( + reason=_("qemu-img is not installed and the disk " + "format is not specified. Only RAW images " + "can be used if qemu-img is not installed."), + image_id=image_id) + + fetch(context, image_service, image_id, tmp, user_id, project_id) + + if is_xenserver_image(context, image_service, image_id): + replace_xenserver_image_with_coalesced_vhd(tmp) + + if not qemu_img: + # qemu-img is not installed but we do have a RAW image. As a + # result we only need to copy the image to the destination and then + # return. + LOG.debug(_('Copying image from %(tmp)s to volume %(dest)s - ' + 'size: %(size)s') % {'tmp': tmp, 'dest': dest, + 'size': image_meta['size']}) + volume_utils.copy_volume(tmp, dest, image_meta['size'], blocksize) + return + + data = qemu_img_info(tmp) + virt_size = data.virtual_size / units.GiB + + # NOTE(xqueralt): If the image virtual size doesn't fit in the + # requested volume there is no point on resizing it because it will + # generate an unusable image. + if size is not None and virt_size > size: + params = {'image_size': virt_size, 'volume_size': size} + reason = _("Size is %(image_size)dGB and doesn't fit in a " + "volume of size %(volume_size)dGB.") % params + raise exception.ImageUnacceptable(image_id=image_id, reason=reason) + + fmt = data.file_format + if fmt is None: + raise exception.ImageUnacceptable( + reason=_("'qemu-img info' parsing failed."), + image_id=image_id) + + backing_file = data.backing_file + if backing_file is not None: + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_("fmt=%(fmt)s backed by:%(backing_file)s") + % {'fmt': fmt, 'backing_file': backing_file, }) + + # NOTE(jdg): I'm using qemu-img convert to write + # to the volume regardless if it *needs* conversion or not + # TODO(avishay): We can speed this up by checking if the image is raw + # and if so, writing directly to the device. However, we need to keep + # check via 'qemu-img info' that what we copied was in fact a raw + # image and not a different format with a backing file, which may be + # malicious. + LOG.debug("%s was %s, converting to %s " % (image_id, fmt, + volume_format)) + convert_image(tmp, dest, volume_format) + + data = qemu_img_info(dest) + if data.file_format != volume_format: + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_("Converted to %(vol_format)s, but format is " + "now %(file_format)s") % {'vol_format': volume_format, + 'file_format': data. + file_format}) + + +def upload_volume(context, image_service, image_meta, volume_path, + volume_format='raw'): + image_id = image_meta['id'] + if (image_meta['disk_format'] == volume_format): + LOG.debug("%s was %s, no need to convert to %s" % + (image_id, volume_format, image_meta['disk_format'])) + if os.name == 'nt' or os.access(volume_path, os.R_OK): + with fileutils.file_open(volume_path) as image_file: + image_service.update(context, image_id, {}, image_file) + else: + with utils.temporary_chown(volume_path): + with fileutils.file_open(volume_path) as image_file: + image_service.update(context, image_id, {}, image_file) + return + + if (CONF.image_conversion_dir and not + os.path.exists(CONF.image_conversion_dir)): + os.makedirs(CONF.image_conversion_dir) + + fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir) + os.close(fd) + with fileutils.remove_path_on_error(tmp): + LOG.debug("%s was %s, converting to %s" % + (image_id, volume_format, image_meta['disk_format'])) + convert_image(volume_path, tmp, image_meta['disk_format']) + + data = qemu_img_info(tmp) + if data.file_format != image_meta['disk_format']: + raise exception.ImageUnacceptable( + image_id=image_id, + reason=_("Converted to %(f1)s, but format is now %(f2)s") % + {'f1': image_meta['disk_format'], 'f2': data.file_format}) + + with fileutils.file_open(tmp) as image_file: + image_service.update(context, image_id, {}, image_file) + fileutils.delete_if_exists(tmp) + + +def is_xenserver_image(context, image_service, image_id): + image_meta = image_service.show(context, image_id) + return is_xenserver_format(image_meta) + + +def is_xenserver_format(image_meta): + return ( + image_meta['disk_format'] == 'vhd' + and image_meta['container_format'] == 'ovf' + ) + + +def file_exist(fpath): + return os.path.exists(fpath) + + +def set_vhd_parent(vhd_path, parentpath): + utils.execute('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath) + + +def extract_targz(archive_name, target): + utils.execute('tar', '-xzf', archive_name, '-C', target) + + +def fix_vhd_chain(vhd_chain): + for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]): + set_vhd_parent(child, parent) + + +def get_vhd_size(vhd_path): + out, err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v') + return int(out) + + +def resize_vhd(vhd_path, size, journal): + utils.execute( + 'vhd-util', 'resize', '-n', vhd_path, '-s', '%d' % size, '-j', journal) + + +def coalesce_vhd(vhd_path): + utils.execute( + 'vhd-util', 'coalesce', '-n', vhd_path) + + +def create_temporary_file(): + fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir) + os.close(fd) + return tmp + + +def rename_file(src, dst): + os.rename(src, dst) + + +@contextlib.contextmanager +def temporary_file(): + try: + tmp = create_temporary_file() + yield tmp + finally: + fileutils.delete_if_exists(tmp) + + +def temporary_dir(): + return utils.tempdir(dir=CONF.image_conversion_dir) + + +def coalesce_chain(vhd_chain): + for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]): + with temporary_dir() as directory_for_journal: + size = get_vhd_size(child) + journal_file = os.path.join( + directory_for_journal, 'vhd-util-resize-journal') + resize_vhd(parent, size, journal_file) + coalesce_vhd(child) + + return vhd_chain[-1] + + +def discover_vhd_chain(directory): + counter = 0 + chain = [] + + while True: + fpath = os.path.join(directory, '%d.vhd' % counter) + if file_exist(fpath): + chain.append(fpath) + else: + break + counter += 1 + + return chain + + +def replace_xenserver_image_with_coalesced_vhd(image_file): + with temporary_dir() as tempdir: + extract_targz(image_file, tempdir) + chain = discover_vhd_chain(tempdir) + fix_vhd_chain(chain) + coalesced = coalesce_chain(chain) + fileutils.delete_if_exists(image_file) + rename_file(coalesced, image_file) diff --git a/cinder/keymgr/__init__.py b/cinder/keymgr/__init__.py new file mode 100644 index 0000000000..84daca4e53 --- /dev/null +++ b/cinder/keymgr/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder.openstack.common import importutils + +keymgr_opts = [ + cfg.StrOpt('api_class', + default='cinder.keymgr.conf_key_mgr.ConfKeyManager', + help='The full class name of the key manager API class'), +] + +CONF = cfg.CONF +CONF.register_opts(keymgr_opts, group='keymgr') + + +def API(): + cls = importutils.import_class(CONF.keymgr.api_class) + return cls() diff --git a/cinder/keymgr/conf_key_mgr.py b/cinder/keymgr/conf_key_mgr.py new file mode 100644 index 0000000000..9915bfe576 --- /dev/null +++ b/cinder/keymgr/conf_key_mgr.py @@ -0,0 +1,134 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +An implementation of a key manager that reads its key from the project's +configuration options. + +This key manager implementation provides limited security, assuming that the +key remains secret. Using the volume encryption feature as an example, +encryption provides protection against a lost or stolen disk, assuming that +the configuration file that contains the key is not stored on the disk. +Encryption also protects the confidentiality of data as it is transmitted via +iSCSI from the compute host to the storage host (again assuming that an +attacker who intercepts the data does not know the secret key). + +Because this implementation uses a single, fixed key, it proffers no +protection once that key is compromised. In particular, different volumes +encrypted with a key provided by this key manager actually share the same +encryption key so *any* volume can be decrypted once the fixed key is known. +""" + +import array + +from oslo.config import cfg + +from cinder import exception +from cinder.keymgr import key +from cinder.keymgr import key_mgr +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging + + +key_mgr_opts = [ + cfg.StrOpt('fixed_key', + help='Fixed key returned by key manager, specified in hex'), +] + +CONF = cfg.CONF +CONF.register_opts(key_mgr_opts, group='keymgr') + + +LOG = logging.getLogger(__name__) + + +class ConfKeyManager(key_mgr.KeyManager): + """Key Manager that supports one key defined by the fixed_key conf option. + + This key manager implementation supports all the methods specified by the + key manager interface. This implementation creates a single key in response + to all invocations of create_key. Side effects (e.g., raising exceptions) + for each method are handled as specified by the key manager interface. + """ + + def __init__(self): + super(ConfKeyManager, self).__init__() + + self.key_id = '00000000-0000-0000-0000-000000000000' + + def _generate_key(self, **kwargs): + _hex = self._generate_hex_key(**kwargs) + return key.SymmetricKey('AES', + array.array('B', _hex.decode('hex')).tolist()) + + def _generate_hex_key(self, **kwargs): + if CONF.keymgr.fixed_key is None: + LOG.warn(_('config option keymgr.fixed_key has not been defined: ' + 'some operations may fail unexpectedly')) + raise ValueError(_('keymgr.fixed_key not defined')) + return CONF.keymgr.fixed_key + + def create_key(self, ctxt, **kwargs): + """Creates a key. + + This implementation returns a UUID for the created key. A + NotAuthorized exception is raised if the specified context is None. + """ + if ctxt is None: + raise exception.NotAuthorized() + + return self.key_id + + def store_key(self, ctxt, key, **kwargs): + """Stores (i.e., registers) a key with the key manager.""" + if ctxt is None: + raise exception.NotAuthorized() + + if key != self._generate_key(): + raise exception.KeyManagerError( + reason="cannot store arbitrary keys") + + return self.key_id + + def copy_key(self, ctxt, key_id, **kwargs): + if ctxt is None: + raise exception.NotAuthorized() + + return self.key_id + + def get_key(self, ctxt, key_id, **kwargs): + """Retrieves the key identified by the specified id. + + This implementation returns the key that is associated with the + specified UUID. A NotAuthorized exception is raised if the specified + context is None; a KeyError is raised if the UUID is invalid. + """ + if ctxt is None: + raise exception.NotAuthorized() + + if key_id != self.key_id: + raise KeyError(key_id) + + return self._generate_key() + + def delete_key(self, ctxt, key_id, **kwargs): + if ctxt is None: + raise exception.NotAuthorized() + + if key_id != self.key_id: + raise exception.KeyManagerError( + reason="cannot delete non-existent key") + + LOG.warn(_("Not deleting key %s"), key_id) diff --git a/cinder/keymgr/key.py b/cinder/keymgr/key.py new file mode 100644 index 0000000000..54080ab64c --- /dev/null +++ b/cinder/keymgr/key.py @@ -0,0 +1,90 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Base Key and SymmetricKey Classes + +This module defines the Key and SymmetricKey classes. The Key class is the base +class to represent all encryption keys. The basis for this class was copied +from Java. +""" + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class Key(object): + """Base class to represent all keys.""" + + @abc.abstractmethod + def get_algorithm(self): + """Returns the key's algorithm. + + Returns the key's algorithm. For example, "DSA" indicates that this key + is a DSA key and "AES" indicates that this key is an AES key. + """ + pass + + @abc.abstractmethod + def get_format(self): + """Returns the encoding format. + + Returns the key's encoding format or None if this key is not encoded. + """ + pass + + @abc.abstractmethod + def get_encoded(self): + """Returns the key in the format specified by its encoding.""" + pass + + +class SymmetricKey(Key): + """This class represents symmetric keys.""" + + def __init__(self, alg, key): + """Create a new SymmetricKey object. + + The arguments specify the algorithm for the symmetric encryption and + the bytes for the key. + """ + self.alg = alg + self.key = key + + def get_algorithm(self): + """Returns the algorithm for symmetric encryption.""" + return self.alg + + def get_format(self): + """This method returns 'RAW'.""" + return "RAW" + + def get_encoded(self): + """Returns the key in its encoded format.""" + return self.key + + def __eq__(self, other): + if isinstance(other, SymmetricKey): + return (self.alg == other.alg and + self.key == other.key) + return NotImplemented + + def __ne__(self, other): + result = self.__eq__(other) + if result is NotImplemented: + return result + return not result diff --git a/cinder/keymgr/key_mgr.py b/cinder/keymgr/key_mgr.py new file mode 100644 index 0000000000..4fb4f07bc0 --- /dev/null +++ b/cinder/keymgr/key_mgr.py @@ -0,0 +1,100 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Key manager API +""" + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class KeyManager(object): + """Base Key Manager Interface + + A Key Manager is responsible for managing encryption keys for volumes. A + Key Manager is responsible for creating, reading, and deleting keys. + """ + + @abc.abstractmethod + def create_key(self, ctxt, algorithm='AES', length=256, expiration=None, + **kwargs): + """Creates a key. + + This method creates a key and returns the key's UUID. If the specified + context does not permit the creation of keys, then a NotAuthorized + exception should be raised. + """ + pass + + @abc.abstractmethod + def store_key(self, ctxt, key, expiration=None, **kwargs): + """Stores (i.e., registers) a key with the key manager. + + This method stores the specified key and returns its UUID that + identifies it within the key manager. If the specified context does + not permit the creation of keys, then a NotAuthorized exception should + be raised. + """ + pass + + @abc.abstractmethod + def copy_key(self, ctxt, key_id, **kwargs): + """Copies (i.e., clones) a key stored by the key manager. + + This method copies the specified key and returns the copy's UUID. If + the specified context does not permit copying keys, then a + NotAuthorized error should be raised. + + Implementation note: This method should behave identically to + store_key(context, get_key(context, )) + although it is preferable to perform this operation within the key + manager to avoid unnecessary handling of the key material. + """ + pass + + @abc.abstractmethod + def get_key(self, ctxt, key_id, **kwargs): + """Retrieves the specified key. + + Implementations should verify that the caller has permissions to + retrieve the key by checking the context object passed in as ctxt. If + the user lacks permission then a NotAuthorized exception is raised. + + If the specified key does not exist, then a KeyError should be raised. + Implementations should preclude users from discerning the UUIDs of + keys that belong to other users by repeatedly calling this method. + That is, keys that belong to other users should be considered "non- + existent" and completely invisible. + """ + pass + + @abc.abstractmethod + def delete_key(self, ctxt, key_id, **kwargs): + """Deletes the specified key. + + Implementations should verify that the caller has permission to delete + the key by checking the context object (ctxt). A NotAuthorized + exception should be raised if the caller lacks permission. + + If the specified key does not exist, then a KeyError should be raised. + Implementations should preclude users from discerning the UUIDs of + keys that belong to other users by repeatedly calling this method. + That is, keys that belong to other users should be considered "non- + existent" and completely invisible. + """ + pass diff --git a/cinder/keymgr/not_implemented_key_mgr.py b/cinder/keymgr/not_implemented_key_mgr.py new file mode 100644 index 0000000000..60f6470296 --- /dev/null +++ b/cinder/keymgr/not_implemented_key_mgr.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Key manager implementation that raises NotImplementedError +""" + +from cinder.keymgr import key_mgr + + +class NotImplementedKeyManager(key_mgr.KeyManager): + """Key Manager Interface that raises NotImplementedError for all operations + """ + + def create_key(self, ctxt, algorithm='AES', length=256, expiration=None, + **kwargs): + raise NotImplementedError() + + def store_key(self, ctxt, key, expiration=None, **kwargs): + raise NotImplementedError() + + def copy_key(self, ctxt, key_id, **kwargs): + raise NotImplementedError() + + def get_key(self, ctxt, key_id, **kwargs): + raise NotImplementedError() + + def delete_key(self, ctxt, key_id, **kwargs): + raise NotImplementedError() diff --git a/cinder/locale/ar/LC_MESSAGES/cinder.po b/cinder/locale/ar/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..cf2fe75d82 --- /dev/null +++ b/cinder/locale/ar/LC_MESSAGES/cinder.po @@ -0,0 +1,10422 @@ +# Arabic translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-09-12 07:56+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: Arabic " +"(http://www.transifex.com/projects/p/openstack/language/ar/)\n" +"Plural-Forms: nplurals=6; plural=n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : " +"n%100>=3 && n%100<=10 ? 3 : n%100>=11 && n%100<=99 ? 4 : 5\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/bg_BG/LC_MESSAGES/cinder.po b/cinder/locale/bg_BG/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..0e89a873b8 --- /dev/null +++ b/cinder/locale/bg_BG/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Bulgarian (Bulgaria) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Bulgarian (Bulgaria) " +"(http://www.transifex.com/projects/p/openstack/language/bg_BG/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/bn_IN/LC_MESSAGES/cinder.po b/cinder/locale/bn_IN/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..00250f99b2 --- /dev/null +++ b/cinder/locale/bn_IN/LC_MESSAGES/cinder.po @@ -0,0 +1,10085 @@ +# Bengali (India) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-10-20 01:34+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: Bengali (India) " +"(http://www.transifex.com/projects/p/openstack/language/bn_IN/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/bs/LC_MESSAGES/cinder.po b/cinder/locale/bs/LC_MESSAGES/cinder.po index b59cd8db3d..7296df6ce0 100644 --- a/cinder/locale/bs/LC_MESSAGES/cinder.po +++ b/cinder/locale/bs/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2012-01-19 20:22+0000\n" "Last-Translator: yazar \n" "Language-Team: Bosnian \n" @@ -15,8187 +15,10729 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." msgstr "" -#: cinder/crypto.py:51 -msgid "Filename of private key" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -#: cinder/crypto.py:57 -msgid "Where we keep our keys" +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" msgstr "" -#: cinder/crypto.py:67 -#, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" msgstr "" -#: cinder/crypto.py:72 +#: cinder/exception.py:133 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" +msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/crypto.py:292 +#: cinder/exception.py:137 #, python-format -msgid "Flags path: %s" +msgid "Not authorized for image %(image_id)s." msgstr "" -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Neočekivana greška prilikom pokretanja komande." +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" -#: cinder/exception.py:59 +#: cinder/exception.py:150 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +msgid "Invalid snapshot: %(reason)s" msgstr "" -#: cinder/exception.py:94 -msgid "DB exception wrapped." +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:155 -msgid "An unknown exception occurred." +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" +#: cinder/exception.py:163 +msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" +#: cinder/exception.py:167 +msgid "The results are invalid." msgstr "" -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:195 -msgid "Connection to glance failed" +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:203 -msgid "Not authorized." +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:208 -msgid "User does not have admin privileges" +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" msgstr "" -#: cinder/exception.py:212 +#: cinder/exception.py:197 #, python-format -msgid "Policy doesn't allow %(action)s to be performed." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:216 +#: cinder/exception.py:201 #, python-format -msgid "Not authorized for image %(image_id)s." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:220 -msgid "Unacceptable parameters." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:218 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:237 -msgid "Failed to load data into json format" +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:245 +#: cinder/exception.py:242 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:253 +#: cinder/exception.py:250 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:264 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" #: cinder/exception.py:269 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:274 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:277 +#: cinder/exception.py:278 #, python-format -msgid "Invalid cidr %(cidr)s." +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:291 #, python-format -msgid "%(err)s" +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:295 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:299 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:303 #, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:307 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:311 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:315 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:334 -msgid "Failed to terminate instance" +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:348 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:373 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "Malformed message body: %(reason)s" msgstr "" #: cinder/exception.py:377 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "Could not find config at %(path)s" msgstr "" #: cinder/exception.py:381 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "Could not find parameter %(param)s" msgstr "" #: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" #: cinder/exception.py:389 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:398 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:402 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:409 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:415 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:419 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:423 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." -msgstr "" - -#: cinder/exception.py:422 -msgid "Resource could not be found." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" #: cinder/exception.py:427 #, python-format -msgid "Required flag %(flag)s not set." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:432 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:436 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" #: cinder/exception.py:440 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" #: cinder/exception.py:444 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" #: cinder/exception.py:449 -msgid "Zero volume types found." +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" #: cinder/exception.py:453 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Failed to export for volume: %(reason)s" msgstr "" #: cinder/exception.py:457 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:461 #, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:465 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:469 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Failed to copy image to volume: %(reason)s" msgstr "" -#: cinder/exception.py:475 -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:480 -#, python-format -msgid "No target id found for volume %(volume_id)s." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:484 -#, python-format -msgid "No disk at %(location)s" +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:485 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Invalid backup: %(reason)s" msgstr "" -#: cinder/exception.py:496 -msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" msgstr "" #: cinder/exception.py:501 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Transfer %(transfer_id)s could not be found." msgstr "" #: cinder/exception.py:505 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" #: cinder/exception.py:509 #, python-format -msgid "User %(user_id)s could not be found." +msgid "SSH command injection detected: %(command)s" msgstr "" #: cinder/exception.py:513 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" #: cinder/exception.py:517 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:522 #, python-format -msgid "Role %(role_id)s could not be found." -msgstr "" - -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:529 +#: cinder/exception.py:527 #, python-format -msgid "%(req)s is required to create a network." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:531 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:536 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" #: cinder/exception.py:541 #, python-format -msgid "Network could not be found for uuid %(uuid)s" +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:546 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:550 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:557 -#, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:562 -#, python-format -msgid "Host is not set to the network (%(network_id)s)." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:566 -#, python-format -msgid "Network %(network)s has active ports, cannot delete." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:576 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:580 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:584 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:586 -#, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:593 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "Unable to create server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:597 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:599 -#, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:605 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:609 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:620 -#, python-format -msgid "Floating ip not found for id %(id)s." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:626 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:630 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:636 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:641 +msgid "Unknown NFS exception" msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:644 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Floating ip %(address)s is not associated." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" msgstr "" -#: cinder/exception.py:652 -#, python-format -msgid "Interface %(interface)s not found." -msgstr "" - -#: cinder/exception.py:656 -#, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:660 -#, python-format -msgid "Certificate %(certificate_id)s not found." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:668 -#, python-format -msgid "Host %(host)s could not be found." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:672 +#: cinder/quota.py:105 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:676 +#: cinder/quota.py:748 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:680 +#: cinder/quota.py:770 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:684 +#: cinder/quota.py:790 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:692 -#, python-format -msgid "Quota for project %(project_id)s could not be found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:696 +#: cinder/quota_utils.py:46 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:700 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:704 +#: cinder/service.py:95 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:709 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Security group with rule %(rule_id)s not found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:713 +#: cinder/service.py:148 #, python-format msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:718 -#, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" msgstr "" -#: cinder/exception.py:723 -#, python-format -msgid "Migration %(migration_id)s could not be found." +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." msgstr "" -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +#: cinder/service.py:270 +msgid "Recovered model server connection!" msgstr "" -#: cinder/exception.py:732 -#, python-format -msgid "Console pool %(pool_id)s could not be found." +#: cinder/service.py:276 +msgid "model server went away" msgstr "" -#: cinder/exception.py:736 +#: cinder/service.py:298 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:742 -#, python-format -msgid "Console %(console_id)s could not be found." +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" msgstr "" -#: cinder/exception.py:746 +#: cinder/service.py:387 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:750 +#: cinder/utils.py:96 #, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:755 +#: cinder/utils.py:127 #, python-format -msgid "Invalid console type %(console_type)s " +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:763 +#: cinder/utils.py:228 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "Error connecting via ssh: %s" msgstr "" -#: cinder/exception.py:767 +#: cinder/utils.py:412 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "Invalid backend: %s" msgstr "" -#: cinder/exception.py:772 +#: cinder/utils.py:423 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "backend %s" msgstr "" -#: cinder/exception.py:776 +#: cinder/utils.py:698 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:780 +#: cinder/utils.py:759 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:784 +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgid "Unable to find cert_file : %s" msgstr "" -#: cinder/exception.py:789 +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 #, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +msgid "Unable to find ca_file : %s" msgstr "" -#: cinder/exception.py:793 +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgid "Unable to find key_file : %s" msgstr "" -#: cinder/exception.py:798 -#, python-format +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:807 +#: cinder/wsgi.py:206 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:811 -#, python-format -msgid "LDAP group %(group_id)s could not be found." +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:815 -#, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:819 -#, python-format -msgid "File %(file_path)s could not be found." +#: cinder/wsgi.py:313 +msgid "You must implement __call__" msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:827 -#, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:832 -#, python-format -msgid "Network adapter %(adapter)s could not be found." +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:844 +#: cinder/api/common.py:162 #, python-format -msgid "Unable to use global role %(role_id)s" +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:861 +#: cinder/api/extensions.py:197 #, python-format -msgid "Key pair %(key_name)s already exists." +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:865 +#: cinder/api/extensions.py:235 #, python-format -msgid "User %(user)s already exists." +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:869 +#: cinder/api/extensions.py:236 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:873 +#: cinder/api/extensions.py:237 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:877 +#: cinder/api/extensions.py:239 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:882 +#: cinder/api/extensions.py:240 #, python-format -msgid "Project %(project)s already exists." +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:886 +#: cinder/api/extensions.py:242 #, python-format -msgid "Instance %(name)s already exists." +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:890 +#: cinder/api/extensions.py:256 #, python-format -msgid "Instance Type %(name)s already exists." +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:894 +#: cinder/api/extensions.py:262 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:898 +#: cinder/api/extensions.py:276 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:906 +#: cinder/api/extensions.py:287 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:910 +#: cinder/api/extensions.py:356 #, python-format -msgid "Could not find config at %(path)s" +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/extensions.py:381 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:938 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/contrib/backups.py:140 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "delete called for member %s" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Delete backup with id: %s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" msgstr "" -#: cinder/exception.py:958 -#, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/exception.py:963 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/exception.py:967 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/exception.py:971 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/exception.py:975 -#, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/exception.py:980 -#, python-format -msgid "Host %(host)s already member of another aggregate." +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/exception.py:984 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Host '%s' could not be found." msgstr "" -#: cinder/exception.py:988 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "Invalid status: '%s'" msgstr "" -#: cinder/exception.py:992 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/exception.py:1005 -#, python-format -msgid "Error in SolidFire API response: status=%(status)s" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/exception.py:1009 -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/exception.py:1013 -#, python-format -msgid "Detected existing vlan with id %(vlan)d" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/exception.py:1017 -#, python-format -msgid "Instance %(instance_id)s could not be found." +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/exception.py:1025 -#, python-format -msgid "Could not fetch image %(image)s" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/log.py:315 -#, python-format -msgid "syslog facility must be one of: %s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/manager.py:146 -#, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/service.py:177 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -#: cinder/service.py:195 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/service.py:340 -msgid "model server went away" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/service.py:440 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/utils.py:352 +#: cinder/api/contrib/volume_transfer.py:147 #, python-format -msgid "debug in callback: %s" +msgid "Creating new volume transfer %s" msgstr "" -#: cinder/utils.py:534 +#: cinder/api/contrib/volume_transfer.py:162 #, python-format -msgid "Link Local address is not found.:%s" +msgid "Creating transfer of volume %s" msgstr "" -#: cinder/utils.py:537 +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/utils.py:648 +#: cinder/api/contrib/volume_transfer.py:196 #, python-format -msgid "Invalid backend: %s" +msgid "Accepting transfer %s" msgstr "" -#: cinder/utils.py:659 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "backend %s" +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/utils.py:942 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgid "Valid control location are: %s" msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/utils.py:1138 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "Expected object of type: %s" +msgid "Caught error: %s" msgstr "" -#: cinder/utils.py:1169 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Invalid server_string: %s" +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/utils.py:1298 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/utils.py:1463 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "Class %(fullname)s is deprecated" +msgid "Extended resource: %s" msgstr "" -#: cinder/utils.py:1495 +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/utils.py:1497 +#: cinder/api/openstack/__init__.py:110 #, python-format -msgid "Function %(name)s in %(location)s is deprecated" +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/wsgi.py:97 -#, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/wsgi.py:117 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgid "Exception handling resource: %s" msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/direct.py:299 -#, python-format -msgid "Returned non-serializeable type: %s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/validator.py:142 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/__init__.py:95 +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "FaultWrapper: %s" +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/__init__.py:180 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Authentication Failure: %s" +msgid "Delete snapshot with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:404 -#, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" msgstr "" -#: cinder/api/ec2/__init__.py:435 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "action: %s" +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/__init__.py:437 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" msgstr "" -#: cinder/api/ec2/__init__.py:584 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "InstanceNotFound raised: %s" +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/__init__.py:590 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "VolumeNotFound raised: %s" +msgid "Delete volume with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:596 -#, python-format -msgid "SnapshotNotFound raised: %s" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/__init__.py:602 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "NotFound raised: %s" +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:605 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "EC2APIError raised: %s" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "KeyPairExists raised: %s" +msgid "Create volume of %s GB" msgstr "" -#: cinder/api/ec2/__init__.py:617 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "InvalidParameterValue raised: %s" +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/__init__.py:621 +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "InvalidPortRange raised: %s" +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "NotAuthorized raised: %s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:629 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "InvalidRequest raised: %s" +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/ec2/__init__.py:633 +#: cinder/backup/api.py:154 #, python-format -msgid "QuotaError raised: %s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/__init__.py:637 +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Status volumena mora biti omogućen" + +#: cinder/backup/api.py:176 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/__init__.py:646 +#: cinder/backup/api.py:181 #, python-format -msgid "Unexpected error raised: %s" +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/__init__.py:647 +#: cinder/backup/manager.py:100 #, python-format -msgid "Environment: %s" +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/apirequest.py:64 +#: cinder/backup/manager.py:117 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/ec2/cloud.py:336 +#: cinder/backup/manager.py:123 #, python-format -msgid "Create snapshot of volume %s" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/ec2/cloud.py:372 +#: cinder/backup/manager.py:129 #, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:378 +#: cinder/backup/manager.py:147 #, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/ec2/cloud.py:382 +#: cinder/backup/manager.py:154 #, python-format -msgid "Create key pair %s" +msgid "Registering default backend %s." msgstr "" -#: cinder/api/ec2/cloud.py:391 +#: cinder/backup/manager.py:158 #, python-format -msgid "Import key %s" +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/backup/manager.py:165 #, python-format -msgid "Delete key pair %s" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/backup/manager.py:194 #, python-format -msgid "Revoke security group ingress %s" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#: cinder/backup/manager.py:206 #, python-format -msgid "%s Not enough parameters to build a valid rule" +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/backup/manager.py:217 #, python-format -msgid "Authorize security group ingress %s" +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:725 +#: cinder/backup/manager.py:225 #, python-format -msgid "%s - This rule already exists in group" +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:769 +#: cinder/backup/manager.py:237 #, python-format msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/backup/manager.py:249 #, python-format msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#: cinder/backup/manager.py:282 #, python-format -msgid "Create Security Group %s" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/backup/manager.py:286 #, python-format -msgid "group %s already exists" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/backup/manager.py:299 #, python-format -msgid "Delete security group %s" +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#: cinder/backup/manager.py:310 #, python-format -msgid "Get console output for instance %s" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:894 +#: cinder/backup/manager.py:329 #, python-format -msgid "Create volume from snapshot %s" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/backup/manager.py:360 #, python-format -msgid "Create volume of %s GB" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/backup/manager.py:386 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/backup/manager.py:422 #, python-format -msgid "Detach volume %s" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/ec2/cloud.py:959 -msgid "Detach Volume Failed." +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/backup/drivers/ceph.py:147 #, python-format -msgid "attribute not supported: %s" +msgid "invalid user '%s'" msgstr "" -#: cinder/api/ec2/cloud.py:1107 -#, python-format -msgid "vol = %s\n" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/ec2/cloud.py:1267 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "Release address %s" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/drivers/ceph.py:250 #, python-format -msgid "Disassociate address %s" +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "Reboot instance %r" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/drivers/ceph.py:361 #, python-format -msgid "De-registering image %s" +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/drivers/ceph.py:377 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/drivers/ceph.py:397 #, python-format -msgid "Updating image %s publicity" +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/backup/drivers/ceph.py:407 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/backup/drivers/ceph.py:488 #, python-format -msgid "Caught error: %s" +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/drivers/ceph.py:543 #, python-format -msgid "Extended resource: %s" +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/drivers/ceph.py:555 #, python-format msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/drivers/ceph.py:566 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/drivers/ceph.py:586 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "Could not find %s in request." +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/drivers/ceph.py:697 #, python-format -msgid "Successfully authenticated '%s'" +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/drivers/ceph.py:708 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "marker [%s] not found" +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 #, python-format -msgid "href %s does not contain version" +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "Converting nw_info: %s" +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/drivers/ceph.py:916 #, python-format -msgid "Converted networks: %s" +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/drivers/ceph.py:964 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:1005 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/backup/drivers/ceph.py:1023 #, python-format -msgid "Loaded extension: %s" +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "Ext name: %s" +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/extensions.py:226 -#, python-format -msgid "Ext alias: %s" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/drivers/ceph.py:1037 #, python-format -msgid "Ext description: %s" +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/drivers/ceph.py:1039 #, python-format -msgid "Ext namespace: %s" +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/drivers/swift.py:106 #, python-format -msgid "Ext updated: %s" +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/swift.py:123 #, python-format -msgid "Exception loading extension: %s" +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/drivers/swift.py:141 #, python-format -msgid "Loading extension %s" +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/backup/drivers/swift.py:146 #, python-format -msgid "Calling extension factory %s" +msgid "container %s does not exist" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/drivers/swift.py:151 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/drivers/swift.py:157 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/swift.py:173 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/wsgi.py:582 -#, python-format -msgid "Exception handling resource: %s" +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/swift.py:219 #, python-format -msgid "Fault thrown: %s" +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/swift.py:224 #, python-format -msgid "HTTP exception thrown: %s" +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/swift.py:278 #, python-format -msgid "There is no such action: %s" +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/swift.py:301 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 #, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 #, python-format -msgid "Invalid server status: %(status)s" +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/tsm.py:286 #, python-format -msgid "Bad personality format: missing %s" +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/tsm.py:308 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 +#: cinder/backup/drivers/tsm.py:338 #, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/tsm.py:352 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/tsm.py:362 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/tsm.py:413 #, python-format -msgid "Bad network format: missing %s" +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/brick/exception.py:109 #, python-format -msgid "Error in confirm-resize %s" +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/brick/exception.py:113 #, python-format -msgid "Error in revert-resize %s" +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Error in reboot %s" +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/brick/initiator/linuxscsi.py:145 #, python-format -msgid "Compute.api::pause %s" +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/brick/initiator/linuxscsi.py:149 #, python-format -msgid "Compute.api::unpause %s" +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 -#, python-format -msgid "compute.api::suspend %s" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "compute.api::resume %s" +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/brick/iscsi/iscsi.py:177 #, python-format -msgid "Error in migrate %s" +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/brick/iscsi/iscsi.py:184 #, python-format -msgid "Compute.api::reset_network %s" +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/brick/iscsi/iscsi.py:227 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Compute.api::lock %s" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Compute.api::unlock %s" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/brick/iscsi/iscsi.py:280 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#: cinder/brick/iscsi/iscsi.py:532 #, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +msgid "Removing iscsi_target: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/brick/iscsi/iscsi.py:571 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 -#, python-format -msgid "Cannot delete aggregate: %(id)s" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Aggregates does not have %s action" +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/local_dev/lvm.py:82 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "Unable to locate Volume Group %s" msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 -msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/local_dev/lvm.py:489 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." -msgstr "" - -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +msgid "Unable to find LV: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "No more floating ips in pool %s." +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 -#, python-format -msgid "Invalid status: '%s'" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 -#, python-format -msgid "Invalid mode: '%s'" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 -#, python-format -msgid "Invalid update setting: '%s'" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/compute/nova.py:97 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Key pair '%s' already exists." +msgid "No backup with id %s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" -msgstr "" +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "Status volumena mora biti omogućen" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "Unable to find address %r" +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 -#, python-format -msgid "Network does not have %s action" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 -#, python-format -msgid "Disassociating network with id %s" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 -#, python-format -msgid "Showing network with id %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Deleting network with id %s" +msgid "Table |%s| not created!" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 -#, python-format -msgid "Security group %s already exists" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 -#, python-format -msgid "Security group %s is not a string or unicode" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 -#, python-format -msgid "Security group %s cannot be empty." +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 -#, python-format -msgid "Security group %s should not be greater than 255 characters." +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 -#, python-format -msgid "Security group (%s) not found" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "This rule already exists in group %s" +msgid "Table |%s| not created" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "Rule (%s) not found" +msgid "Column |%s| not created!" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 -#, python-format -msgid "start instance %r" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 -#, python-format -msgid "stop instance %r" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 -#, python-format -msgid "vol=%s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 -#, python-format -msgid "Delete volume with id: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 -#, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 -#, python-format -msgid "Delete snapshot with id: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 -#, python-format -msgid "Create snapshot from volume %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/auth/ldapdriver.py:650 -#, python-format +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/auth/manager.py:302 -#, python-format -msgid "Failed authorization for access key %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Using project name = user name (%s)" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/auth/manager.py:315 +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 #, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/auth/manager.py:324 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#: cinder/image/image_utils.py:157 #, python-format -msgid "user.secret: %s" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#: cinder/image/image_utils.py:178 #, python-format -msgid "expected_signature: %s" +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#: cinder/image/image_utils.py:206 #, python-format -msgid "signature: %s" +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#: cinder/image/image_utils.py:224 #, python-format -msgid "Invalid signature for user %s" +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/auth/manager.py:353 +#: cinder/image/image_utils.py:260 #, python-format -msgid "host_only_signature: %s" +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/auth/manager.py:493 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +msgid "Not deleting key %s" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/openstack/common/excutils.py:48 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +msgid "Reloading cached file %s" msgstr "" -#: cinder/auth/manager.py:613 -#, python-format -msgid "modifying project %s" +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/auth/manager.py:676 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "Deleting project %s" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/auth/manager.py:734 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/auth/manager.py:743 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "Deleting user %s" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/auth/manager.py:753 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "Access Key change for user %s" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/auth/manager.py:755 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "Secret Key change for user %s" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/auth/manager.py:757 +#: cinder/openstack/common/log.py:301 #, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +msgid "Deprecated: %s" msgstr "" -#: cinder/auth/manager.py:802 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "No vpn data for project %s" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -msgid "Instance type for vpn instances" +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "Launching VPN for %s" +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/api.py:144 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "Unable to find host for Instance %s" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/api.py:192 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/api.py:203 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" -msgstr "" - -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/api.py:259 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/api.py:261 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/api.py:312 +#: cinder/openstack/common/policy.py:163 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/api.py:383 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Going to run %s instances..." +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/api.py:447 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "bdm %s" +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "block_device_mapping %s" +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "Running cmd (subprocess): %s" msgstr "" -#: cinder/compute/api.py:871 -msgid "Going to try to soft delete instance" +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/api.py:939 -msgid "Going to try to terminate instance" +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" msgstr "" -#: cinder/compute/api.py:977 -msgid "Going to try to stop instance" +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/api.py:996 -msgid "Going to try to start instance" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/api.py:1000 +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 #, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 -#, python-format -msgid "Searching by: %s" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/api.py:1201 -#, python-format -msgid "Image type not recognized %s" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" msgstr "" -#: cinder/compute/api.py:1377 -#, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "Started child %d" msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "DB error: %s" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "Instance type %s not found for deletion" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:144 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:146 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/versionutils.py:69 #, python-format msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:329 -#, python-format -msgid "Setting up bdm %s" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:400 -#, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "Instance %s not found." +msgid "SQL connection failed. %s attempts left." msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/openstack/common/notifier/api.py:145 #, python-format msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" -msgstr "" - -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/compute/manager.py:565 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "Instance network_info: |%s|" -msgstr "" - -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "%(action_str)s instance" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "terminating bdm %s" +msgid "unpacked context: %s" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +msgid "received %s" msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "no method for message: %s" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "Rebuilding instance %s" +msgid "No method for message: %s" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "Rebooting instance %s" +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/compute/manager.py:891 -#, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "instance %s: snapshotting" +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "MSG_ID is %s" msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:1001 -#, python-format -msgid "Rotating out %d backups" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "Deleting image %s" +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/compute/manager.py:1035 -#, python-format -msgid "Failed to set admin password. Instance %s is not running" +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/rpc/common.py:104 #, python-format -msgid "Instance %s: Root password set" +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:1079 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:1084 -#, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/rpc/common.py:151 #, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/rpc/common.py:156 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "instance %s: rescuing" +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "instance %s: unrescuing" +msgid "Returning exception %s to caller" msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/rpc/impl_kombu.py:477 #, python-format -msgid "instance %s: migrating" +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/rpc/impl_kombu.py:499 #, python-format -msgid "instance %s: pausing" +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format -msgid "instance %s: unpausing" +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 #, python-format -msgid "instance %s: suspending" +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "instance %s: resuming" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 #, python-format -msgid "instance %s: locking" +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format -msgid "instance %s: unlocking" +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "instance %s: getting locked state" +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "instance %s: reset network" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "instance %s: inject network info" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/compute/manager.py:1618 -#, python-format -msgid "network_info to inject: |%s|" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/compute/manager.py:1655 -#, python-format -msgid "instance %s: getting vnc console" +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/compute/manager.py:1685 -#, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +msgid "Deserializing: %s" msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/compute/manager.py:1714 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/compute/manager.py:1752 -#, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Subscribing to %s" msgstr "" -#: cinder/compute/manager.py:1822 -#, python-format -msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/impl_zmq.py:267 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Running func with context: %s" msgstr "" -#: cinder/compute/manager.py:1973 -#, python-format -msgid "Pre live migration failed at %(dest)s" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/compute/manager.py:2040 -#, python-format -msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/compute/manager.py:2075 -msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/compute/manager.py:2305 -#, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/compute/manager.py:2344 -#, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/compute/manager.py:2458 -#, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/compute/manager.py:2465 -#, python-format -msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/compute/manager.py:2472 -#, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/impl_zmq.py:681 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" +msgid "Received message: %s" msgstr "" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/console/manager.py:97 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/console/vmrc_manager.py:122 +#: cinder/openstack/common/rpc/impl_zmq.py:721 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "%(msg)s" msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "Removing console %(console_id)s." -msgstr "" - -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/console/xvp.py:116 -#, python-format -msgid "Re-wrote %s" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/openstack/common/rpc/impl_zmq.py:815 #, python-format -msgid "Error starting xvp: %s" +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/consoleauth/manager.py:75 -#, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 -#, python-format -msgid "No ComputeNode for %(host)s" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "No backend config with id %(sm_backend_id)s" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 -#, python-format -msgid "No sm_flavor called %(sm_flavor)s" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 -#, python-format -msgid "No sm_volume with id %(volume_id)s" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "SQL connection failed. %s attempts left." +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format -msgid "Table |%s| not created!" +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "join list for moving mac_addresses |%s|" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/image/glance.py:278 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/image/glance.py:281 -#, python-format -msgid "Metadata after formatting for Glance %s" +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/image/glance.py:289 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/image/glance.py:410 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/image/s3.py:309 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/image/s3.py:328 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/image/s3.py:340 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/image/s3.py:379 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "Faked command matched %s" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/tests/test_netapp_nfs.py:360 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "Unknown chain: %r" +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/linux_net.py:694 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "Given data: %s" msgstr "" -#: cinder/network/linux_net.py:696 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "Result data: %s" msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "killing radvd threw %s" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/linux_net.py:758 -#, python-format -msgid "Pid %d is stale, relaunching radvd" +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/tests/integrated/test_login.py:29 #, python-format -msgid "Starting VLAN inteface %s" +msgid "volume: %s" msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/tests/integrated/api/client.py:32 #, python-format -msgid "Starting Bridge interface for %s" +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/linux_net.py:1142 -#, python-format -msgid "Starting bridge %s " +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/network/linux_net.py:1149 -#, python-format -msgid "Done starting bridge %s" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/network/linux_net.py:1167 -#, python-format -msgid "Failed unplugging gateway interface '%s'" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/linux_net.py:1170 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "Unplugged gateway interface '%s'" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/manager.py:291 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "Body: %s" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#: cinder/tests/integrated/api/client.py:121 #, python-format -msgid "Interface %(interface)s not found" +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:315 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "floating IP allocation for instance |%s|" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:353 -#, python-format -msgid "floating IP deallocation for instance |%s|" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Failed to create transfer record for %s" msgstr "" -#: cinder/network/manager.py:402 +#: cinder/transfer/api.py:136 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/manager.py:614 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/manager.py:660 +#: cinder/transfer/api.py:182 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/transfer/api.py:199 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "Volume %s has been transferred." msgstr "" -#: cinder/network/manager.py:778 +#: cinder/volume/api.py:143 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/network/manager.py:896 -#, python-format -msgid "network allocations for instance |%s|" +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/network/manager.py:901 +#: cinder/volume/api.py:214 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/network/manager.py:930 -#, python-format -msgid "network deallocation for instance |%s|" +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/volume/api.py:229 #, python-format -msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Searching by: %s" msgstr "" -#: cinder/network/manager.py:1244 -#, python-format -msgid "Leased IP |%(address)s|" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/network/manager.py:1248 -#, python-format -msgid "IP %s leased that is not associated" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/network/manager.py:1256 -#, python-format -msgid "IP |%s| leased that isn't allocated" +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/network/manager.py:1261 -#, python-format -msgid "Released IP |%(address)s|" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/network/manager.py:1265 +#: cinder/volume/api.py:490 #, python-format -msgid "IP %s released that is not associated" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/manager.py:1268 +#: cinder/volume/api.py:502 #, python-format -msgid "IP %s released that was not leased" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" -msgstr "" +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Status volumena mora biti omogućen" -#: cinder/network/manager.py:1334 -#, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/network/manager.py:1345 -#, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/network/manager.py:1423 -#, python-format -msgid "Network must be disassociated from project %s before delete" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/volume/api.py:757 #, python-format msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/network/quantum/client.py:180 +#: cinder/volume/api.py:797 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "No available service named %s" msgstr "" -#: cinder/network/quantum/client.py:196 -#, python-format -msgid "Quantum entity not found: %s" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/volume/api.py:842 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/volume/api.py:868 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/network/quantum/manager.py:301 +#: cinder/volume/driver.py:327 #, python-format -msgid "network allocations for instance %s" +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/volume/driver.py:340 #, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/volume/driver.py:358 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/network/quantum/melange_connection.py:96 +#: cinder/volume/driver.py:394 #, python-format -msgid "Server returned error: %s" +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/driver.py:451 #, python-format -msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 +#: cinder/volume/driver.py:546 #, python-format -msgid "No network with net_id = %s" +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 +#: cinder/volume/driver.py:548 #, python-format -msgid "No fixed IPs to deallocate for vif %s" +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 +#: cinder/volume/driver.py:595 #, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/notifier/api.py:130 -#, python-format -msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" msgstr "" -#: cinder/notifier/list_notifier.py:65 -#, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 +#: cinder/volume/manager.py:203 #, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/rpc/amqp.py:146 -#, python-format -msgid "Returning exception %s to caller" +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/rpc/amqp.py:188 +#: cinder/volume/manager.py:228 #, python-format -msgid "unpacked context: %s" +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/rpc/amqp.py:231 +#: cinder/volume/manager.py:235 #, python-format -msgid "received %s" +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/rpc/amqp.py:236 +#: cinder/volume/manager.py:244 #, python-format -msgid "no method for message: %s" +msgid "Re-exporting %s volumes" msgstr "" -#: cinder/rpc/amqp.py:237 +#: cinder/volume/manager.py:257 #, python-format -msgid "No method for message: %s" +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/rpc/amqp.py:321 +#: cinder/volume/manager.py:264 #, python-format -msgid "Making asynchronous call on %s ..." +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/volume/manager.py:271 #, python-format -msgid "MSG_ID is %s" +msgid "volume %s: skipping export" msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/manager.py:273 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/rpc/amqp.py:379 +#: cinder/volume/manager.py:286 #, python-format -msgid "Sending notification on %s..." +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/rpc/common.py:54 -#, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/manager.py:389 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "volume %s: removing export" msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/manager.py:394 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/rpc/impl_kombu.py:466 -#, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/manager.py:427 #, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/manager.py:430 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/manager.py:434 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "volume %s: deleted successfully" msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/volume/manager.py:451 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "snapshot %s: creating" msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/manager.py:462 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/manager.py:490 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/rpc/impl_qpid.py:346 +#: cinder/volume/manager.py:496 #, python-format -msgid "Connected to AMQP server on %s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/scheduler/driver.py:63 -#, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/scheduler/driver.py:80 -#, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/scheduler/driver.py:89 -#, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/scheduler/driver.py:107 -#, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/manager.py:880 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/manager.py:909 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/manager.py:921 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/volume/manager.py:940 #, python-format -msgid "No host selection for %s defined." +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/manager.py:976 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/manager.py:1024 #, python-format -msgid "Filtered %(hosts)s" +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 +#: cinder/volume/manager.py:1044 #, python-format -msgid "Weighted %(weighted_host)s" +msgid "Notification {%s} received" msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/manager.py:1091 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/manager.py:1103 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "volume %s: extending" msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/manager.py:1105 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "volume %s: extended successfully" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/manager.py:1107 #, python-format -msgid "Host filter passes for %(host)s" +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/scheduler/host_manager.py:272 -#, python-format -msgid "Received %(service_name)s service update from %(host)s." +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/manager.py:1190 #, python-format -msgid "No service for compute ID %s" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/manager.py:1193 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/scheduler/manager.py:150 -#, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/scheduler/manager.py:159 -#, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "DB error: %s" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/fake_utils.py:72 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "Faking execution of cmd (subprocess): %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "Faked command matched %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "Faked command raised an exception %s" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "Running instances: %s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "After terminating instances: %s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "After force-killing instances: %s" +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Target %s allocated" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/tests/test_volume_types.py:58 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "Given data: %s" +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/tests/test_volume_types.py:59 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "Result data: %s" +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 -#, python-format -msgid "Quota exceeded: code=%(code)s" +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "_create: %s" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "_delete: %s" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 -#, python-format -msgid "_get: %s" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "_get_all: %s" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 -#, python-format -msgid "test_snapshot_create: resp_dict=%s" +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/tests/integrated/test_login.py:31 -#, python-format -msgid "flavor: %s" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +msgid "Failed to create volume %s" msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/volume/drivers/eqlx.py:374 #, python-format -msgid "Body: %s" +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/drivers/eqlx.py:384 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/drivers/eqlx.py:405 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Decoding JSON: %s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/drivers/eqlx.py:440 #, python-format -msgid "Nested received %(queue)s, %(value)s" +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "Nested return %s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/glusterfs.py:91 #, python-format -msgid "Received %s" +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "Compute_service record created for %s " +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 -#, python-format -msgid "Compute_service record updated for %s " +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "casted to %s" msgstr "" -#: cinder/virt/firewall.py:137 -#, python-format -msgid "Filters added to instance %s" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Adding security group rule: %r" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Adding provider rule: %s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/glusterfs.py:403 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "nova call result: %s" msgstr "" -#: cinder/virt/images.py:104 -#, python-format -msgid "Converted to raw, but format is now %s" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/glusterfs.py:431 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/glusterfs.py:457 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/xenapi_conn.py:140 +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/glusterfs.py:690 #, python-format -msgid "Got exception: %s" +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "No such domain (%s)" +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "Failed power down Bare-metal node %s" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "No such domain %s" +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "Domains: %s" +msgid "Available shares: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/glusterfs.py:1038 #, python-format -msgid "Nodes: %s" +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "After storing domains: %s" +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/gpfs.py:160 #, python-format -msgid "Created new domain: %s" +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/gpfs.py:169 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/baremetal/dom.py:226 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "change_domain_state: to new state %s" +msgid "%s is not a directory." msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/gpfs.py:197 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/gpfs.py:637 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "instance %s: rebooted" +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "instance %s: rescued" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "instance %s: is building" +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "instance %s: booted" +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "instance %s spawned successfully" +msgid " but size is now %d" msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/nfs.py:361 #, python-format -msgid "instance %s:not booted" +msgid "%s is already mounted" msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "instance %s: Creating image" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "instance %s: starting toXML method" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/nfs.py:526 #, python-format -msgid "instance %s: finished toXML method" +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 -msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/virt/baremetal/tilera.py:216 +#: cinder/volume/drivers/rbd.py:210 #, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgid "error opening rbd image %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "unknown disk image handler: %s" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Failed to remove container: %s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "User %(username)s not found in password file." +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "deleting parent %s" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/rbd.py:593 #, python-format -msgid "unsupported partition: %s" +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "Failed to map partitions: %s" +msgid "connection data: %s" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/disk/nbd.py:81 -#, python-format -msgid "qemu-nbd error: %s" +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/rbd.py:724 #, python-format -msgid "nbd device %s did not show up" +msgid "not cloneable: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Connecting to libvirt: %s" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/rbd.py:747 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "Unable to open image %(loc)s: %(err)s" msgstr "" -#: cinder/virt/libvirt/connection.py:400 -#, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/connection.py:411 -#, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/connection.py:461 -#, python-format -msgid "Deleting instance files %(target)s" +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -msgid "Instance soft rebooted successfully." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/solidfire.py:151 #, python-format msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Automatically confirming migration %d" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -msgid "Guest does not have a console available" +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "block_device_list %s" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "'' must be 1, but %d\n" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/connection.py:2067 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/libvirt/connection.py:2136 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Timeout migrating for %s. nwfilter not found." +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 -#, python-format -msgid "skipping %(path)s since it looks like volume" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 -#, python-format -msgid "Instance %s: Starting finish_migration" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/libvirt/connection.py:2565 -#, python-format -msgid "Instance %s: Starting finish_revert_migration" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/libvirt/firewall.py:171 -#, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 #, python-format -msgid "%s is a valid instance name" +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/zadara.py:438 #, python-format -msgid "%s has a disk file" +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/zadara.py:464 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/zadara.py:472 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Base file too young to remove: %s" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "Removing base file: %s" +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Create Volume: %(volume)s Size: %(size)lu" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format -msgid "Unknown base file: %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format -msgid "Active base files: %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format -msgid "Corrupt base files: %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format -msgid "Removable base files: %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/libvirt/vif.py:99 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format -msgid "Ensuring bridge %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "Failed while unplugging vif of instance '%s'" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/libvirt/volume.py:163 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "iSCSI device not found at %s" +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/libvirt/volume.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "%(text)s: _db_content => %(content)s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Glance image %s is in killed state" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "httplib error in %s: " +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Socket error in %s: " +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format -msgid "Type error in %s: " +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format -msgid "Exception in %s " -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Got total of %s instances" +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 -#, python-format -msgid "Creating VM with the name %s on the ESX host" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "Create export: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Powered on the VM instance %s" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:674 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "Error mapping volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/emc/emc_smis_common.py:678 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "ExposePaths for volume %s completed successfully." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:711 #, python-format -msgid "Uploading image %s" +msgid "HidePaths for volume %s completed successfully." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "Uploaded image %s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:744 #, python-format -msgid "Deleted temporary vmdk file %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +msgid "AddMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:775 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "RemoveMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "Did hard reboot of VM %s" +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "instance - %s not present" +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Powering off the VM %s" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "Powered off the VM %s" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Unregistering the VM %s" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Unregistered the VM %s" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 -#, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "Suspending the VM %s " +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 -#, python-format -msgid "Suspended the VM %s " +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "Resuming the VM %s" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "Resumed the VM %s " +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Creating directory with path %s" +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Created directory with path %s" +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "Downloading image %s from glance image server" +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "Calling %(localname)s %(impl)s" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "Calling getter %s" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "Found no network for bridge %s" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." msgstr "" -#: cinder/virt/xenapi/pool.py:126 -#, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/pool.py:146 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Unable to join %(host)s in the pool" +msgid "Add target WWN: %s." msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/pool.py:174 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "Unable to set up pool: %(e)s." +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 #, python-format -msgid "Found no PIF for device %s" +msgid "Cannot find device number for volume %s" msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Found iSCSI endpoint: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "VBD not found in instance %s" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "VBD %s already detached" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "Unable to unplug VBD %s" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/hds/hds.py:178 #, python-format -msgid "Unable to destroy VBD %s" +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/hds/hds.py:197 #, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/hds/hds.py:250 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "HDP not found: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "Unable to destroy VDI %s" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/hds/hds.py:327 #, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/hds/hds.py:355 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/hds/hds.py:372 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/hds/hds.py:395 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 +#: cinder/volume/drivers/hds/hds.py:480 #, python-format -msgid "Snapshotting with label '%(label)s'" +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "Created snapshot %(template_vm_ref)s" +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "download_vhd failed: %r" +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 -#, python-format -msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/huawei/huawei_utils.py:129 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/huawei/rest_common.py:59 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "VDI %s is still available" +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Bad response from server: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 -#, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/huawei/rest_common.py:166 #, python-format -msgid "Re-scanning SR %s" +msgid "" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/huawei/rest_common.py:173 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/huawei/rest_common.py:527 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/huawei/rest_common.py:553 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/huawei/rest_common.py:874 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/huawei/rest_common.py:937 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/huawei/rest_common.py:964 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/rest_common.py:1101 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/rest_common.py:1124 #, python-format -msgid "Plugging VBD %s ... " +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Plugging VBD %s done." +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 -#, python-format -msgid "Destroying VBD for VDI %s ... " +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/rest_common.py:1256 #, python-format -msgid "Destroying VBD for VDI %s done." +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 -#, python-format -msgid "Running pygrub against %s" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Found Xen kernel %s" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Writing partition table %s done." +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 -msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 +#: cinder/volume/drivers/huawei/ssh_common.py:395 msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/ssh_common.py:466 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/ssh_common.py:501 #, python-format -msgid "Updating progress to %(progress)d" +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/ssh_common.py:511 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -msgid "Starting instance" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/ssh_common.py:697 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Injecting file path: '%s'" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/ssh_common.py:865 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/ssh_common.py:933 #, python-format -msgid "Instance agent version: %s" +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/ssh_common.py:1079 #, python-format -msgid "Updating Agent to %s" +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/ssh_common.py:1102 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:686 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format -msgid "Unable to Snapshot instance: %(exc)s" +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 -#, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." msgstr "" -#: cinder/virt/xenapi/vmops.py:893 -#, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:1413 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "create_snapshot: %(device)s does not support snapshot." msgstr "" -#: cinder/virt/xenapi/vmops.py:998 -#, python-format -msgid "Failed to update password: %(resp)r" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "Failed getting details for pool %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 -#, python-format -msgid "Instance is already in Rescue Mode: %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "ensure_export: Volume %s not found on storage" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 -#, python-format -msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "Creating VIF for network %(network_ref)s" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "OpenSSL error: %s" +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "type is = %s" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 #, python-format -msgid "name = %s" +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 #, python-format -msgid "Forgetting SR %s..." +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "Introducing %s..." +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "enter: delete_vdisk: vdisk %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "Tried to delete non-existant vdisk %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgid "leave: delete_vdisk: vdisk %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 #, python-format -msgid "Error finding vdis in SR %s" +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 #, python-format -msgid "Creating SR %s" +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/netapp/api.py:419 #, python-format -msgid "Introducing SR %s" +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/netapp/common.py:103 #, python-format -msgid "Checking for SR %s" +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/netapp/common.py:109 #, python-format -msgid "SR %s not found in the xapi database" +msgid "Storage family %s is not supported" msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/netapp/common.py:123 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 +#: cinder/volume/drivers/netapp/common.py:130 #, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/netapp/common.py:158 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/netapp/iscsi.py:69 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "No metadata property %(prop)s defined for the LUN %(name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/netapp/iscsi.py:105 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/netapp/iscsi.py:166 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "Created LUN with name %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/netapp/iscsi.py:175 #, python-format -msgid "Unable to locate volume %s" +msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/netapp/iscsi.py:191 #, python-format -msgid "Unable to detach volume %s" +msgid "Destroyed LUN %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/netapp/iscsi.py:227 #, python-format -msgid "Unable to destroy vbd %s" +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/netapp/iscsi.py:232 #, python-format -msgid "Error purging SR %s" +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/netapp/iscsi.py:238 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "Failed to get LUN target details for the LUN %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/netapp/iscsi.py:249 #, python-format -msgid "Error in handshake: %s" +msgid "Failed to get target portal for the LUN %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/netapp/iscsi.py:252 #, python-format -msgid "Invalid request: %s" +msgid "Failed to get target IQN for the LUN %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/netapp/iscsi.py:290 #, python-format -msgid "Request: %s" +msgid "Snapshot %s deletion successful" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 #, python-format -msgid "Request made with missing token: %s" +msgid "Resizing %s failed. Cleaning volume." msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/netapp/iscsi.py:325 #, python-format -msgid "Request made with invalid token: %s" +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/netapp/iscsi.py:412 #, python-format -msgid "Unexpected error: %s" +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/netapp/iscsi.py:431 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/netapp/iscsi.py:543 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "Message: %s" msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "Status volumena mora biti omogućen" +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/netapp/iscsi.py:600 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "No need to extend volume %s as it is already the requested new size." msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." msgstr "" -#: cinder/volume/api.py:325 -#, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "Status volumena mora biti omogućen" +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/netapp/iscsi.py:684 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/netapp/iscsi.py:690 #, python-format -msgid "volume group %s doesn't exist" +msgid "%s cannot be sub clone resized as it contains no blocks." msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/netapp/iscsi.py:707 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "Post clone resize lun %s" msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/netapp/iscsi.py:718 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "Failure staging lun %s to tmp." msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/netapp/nfs.py:263 #, python-format -msgid "Could not find iSCSI export for volume %s" +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/netapp/nfs.py:293 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "Cleaning cache for share %s." msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/netapp/nfs.py:298 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "Files to be queued for deletion %s" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/netapp/nfs.py:305 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/netapp/nfs.py:336 #, python-format -msgid "rbd has no pool %s" +msgid "Bytes to free %s" msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/netapp/nfs.py:343 #, python-format -msgid "Sheepdog is not working: %s" +msgid "Delete file path %s" msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/netapp/nfs.py:363 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "Exception during deleting %s" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/netapp/nfs.py:395 #, python-format -msgid "Re-exporting %s volumes" +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/netapp/nfs.py:411 #, python-format -msgid "volume %s: skipping export" +msgid "Cloning image %s from cache" msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/netapp/nfs.py:415 #, python-format -msgid "volume %s: creating" +msgid "Cache share: %s" msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/netapp/nfs.py:425 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "Unexpected exception during image cloning in share %s" msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/netapp/nfs.py:431 #, python-format -msgid "volume %s: creating export" +msgid "Cloning image %s directly in share" msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/netapp/nfs.py:436 #, python-format -msgid "volume %s: created successfully" +msgid "Share is cloneable %s" msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/netapp/nfs.py:457 #, python-format -msgid "volume %s: removing export" +msgid "Converted to raw, but format is now %s" msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/netapp/nfs.py:467 #, python-format -msgid "volume %s: deleting" +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/netapp/nfs.py:482 #, python-format -msgid "volume %s: volume is busy" +msgid "Resizing file to %sG" msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 #, python-format -msgid "volume %s: deleted successfully" +msgid "Image location not in the expected format %s" msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/netapp/nfs.py:557 #, python-format -msgid "snapshot %s: creating" +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/netapp/nfs.py:599 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "Extending volume %s." msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/netapp/nfs.py:710 #, python-format -msgid "snapshot %s: created successfully" +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/netapp/nfs.py:757 #, python-format -msgid "snapshot %s: deleting" +msgid "Exception creating vol %(name)s on share %(share)s" msgstr "" -#: cinder/volume/manager.py:214 +#: cinder/volume/drivers/netapp/nfs.py:765 #, python-format -msgid "snapshot %s: snapshot is busy" +msgid "Volume %s could not be created on shares." msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/netapp/nfs.py:815 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "No interface found on cluster for ip %s" msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/netapp/nfs.py:862 #, python-format -msgid "New capabilities found: %s" +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 #, python-format -msgid "Notification {%s} received" +msgid "Shortlisted del elg files %s" msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "Getting file usage for %s" msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 #, python-format -msgid "%s is not set" +msgid "file-usage for path %(path)s is %(bytes)s" msgstr "" -#: cinder/volume/netapp.py:128 -msgid "Connected to DFM server" +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 #, python-format -msgid "Job failed: %s" +msgid "No share match found for ip %s" msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#: cinder/volume/drivers/netapp/nfs.py:1139 #, python-format -msgid "Failed to find LUN ID for volume %s" +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" -msgstr "" +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "Status volumena mora biti omogućen" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Neočekivana greška prilikom pokretanja komande." + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Neočekivana greška prilikom pokretanja komande." -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, python-format -msgid "No LUN ID for volume %s" -msgstr "" +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "" +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:614 -#, python-format -msgid "Failed to get host details for host ID %s" -msgstr "" +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:620 -#, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "" +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:625 -#, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "" +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Connection to glance failed" +#~ msgstr "" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Invalid snapshot" +#~ msgstr "" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid input received" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid volume type" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, python-format -msgid "Got response: %s" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "The request is invalid." #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Reconnected to queue" +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "response %s" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "topic is %s" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "message %s" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "Publishing to route %s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "Declaring queue %s" +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "Declaring exchange %s" +#~ msgid "zfs send/recv done, new volume %s created" #~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "rbd export-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "Created VM %s..." +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " +#~ msgid "migrate_volume: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "_wait_child %d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "wait wrap.failed %s" #~ msgstr "" #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "volume %s mapping to multi host" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/ca/LC_MESSAGES/cinder.po b/cinder/locale/ca/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..ad8e93fe5c --- /dev/null +++ b/cinder/locale/ca/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Catalan translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Catalan " +"(http://www.transifex.com/projects/p/openstack/language/ca/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/cinder.pot b/cinder/locale/cinder.pot index be9265796c..8a5111ad46 100644 --- a/cinder/locale/cinder.pot +++ b/cinder/locale/cinder.pot @@ -1,7463 +1,9562 @@ # Translations template for cinder. -# Copyright (C) 2012 ORGANIZATION +# Copyright (C) 2014 ORGANIZATION # This file is distributed under the same license as the cinder project. -# FIRST AUTHOR , 2012. +# FIRST AUTHOR , 2014. # #, fuzzy msgid "" msgstr "" -"Project-Id-Version: cinder 2012.2\n" +"Project-Id-Version: cinder jenkins.cinder.propose.translation.update.274\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/crypto.py:51 -msgid "Filename of private key" +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" msgstr "" -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/crypto.py:57 -msgid "Where we keep our keys" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/crypto.py:67 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +msgid "Caught error: %s" msgstr "" -#: cinder/crypto.py:72 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/crypto.py:292 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "Flags path: %s" +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/exception.py:56 -msgid "Unexpected error while running command." +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" msgstr "" -#: cinder/exception.py:59 +#: cinder/api/openstack/__init__.py:104 #, python-format msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/exception.py:94 -msgid "DB exception wrapped." +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/exception.py:155 -msgid "An unknown exception occurred." +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" msgstr "" -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" msgstr "" -#: cinder/exception.py:195 -msgid "Connection to glance failed" +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/exception.py:203 -msgid "Not authorized." +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/exception.py:208 -msgid "User does not have admin privileges" +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/exception.py:212 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "Policy doesn't allow %(action)s to be performed." +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" msgstr "" -#: cinder/exception.py:216 +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "Not authorized for image %(image_id)s." +msgid "%(url)s returned a fault: %(e)s" msgstr "" -#: cinder/exception.py:220 -msgid "Unacceptable parameters." +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." msgstr "" -#: cinder/exception.py:229 +#: cinder/backup/manager.py:158 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/exception.py:237 -msgid "Failed to load data into json format" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/exception.py:245 +#: cinder/backup/manager.py:194 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/exception.py:253 +#: cinder/backup/manager.py:212 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/exception.py:265 +#: cinder/backup/manager.py:237 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/exception.py:269 +#: cinder/backup/manager.py:249 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/exception.py:273 +#: cinder/backup/manager.py:282 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/exception.py:277 +#: cinder/backup/manager.py:286 #, python-format -msgid "Invalid cidr %(cidr)s." +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/exception.py:292 +#: cinder/backup/manager.py:329 #, python-format -msgid "%(err)s" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/exception.py:296 +#: cinder/backup/manager.py:360 #, python-format msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/exception.py:301 +#: cinder/backup/manager.py:379 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/exception.py:305 +#: cinder/backup/manager.py:386 #, python-format msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/exception.py:310 +#: cinder/backup/manager.py:399 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/exception.py:314 +#: cinder/backup/manager.py:422 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/exception.py:318 +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "invalid user '%s'" msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/exception.py:334 -msgid "Failed to terminate instance" +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/exception.py:350 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "creating base image '%s'" msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" msgstr "" -#: cinder/exception.py:372 +#: cinder/backup/drivers/ceph.py:377 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/exception.py:377 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "deleting base image='%s'" msgstr "" -#: cinder/exception.py:381 +#: cinder/backup/drivers/ceph.py:389 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/exception.py:389 +#: cinder/backup/drivers/ceph.py:397 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/exception.py:393 +#: cinder/backup/drivers/ceph.py:407 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "deleting source snap '%s'" msgstr "" -#: cinder/exception.py:399 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/exception.py:406 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/exception.py:410 +#: cinder/backup/drivers/ceph.py:488 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/exception.py:414 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "diff format image '%s' not found" msgstr "" -#: cinder/exception.py:418 +#: cinder/backup/drivers/ceph.py:528 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." +msgid "using --from-snap '%s'" msgstr "" -#: cinder/exception.py:422 -msgid "Resource could not be found." +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/exception.py:427 +#: cinder/backup/drivers/ceph.py:555 #, python-format -msgid "Required flag %(flag)s not set." +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/exception.py:431 +#: cinder/backup/drivers/ceph.py:566 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/exception.py:435 +#: cinder/backup/drivers/ceph.py:586 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/exception.py:440 -#, python-format -msgid "Volume not found for instance %(instance_id)s." +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/exception.py:444 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "creating base image='%s'" msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/exception.py:453 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/exception.py:457 +#: cinder/backup/drivers/ceph.py:697 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/exception.py:462 +#: cinder/backup/drivers/ceph.py:704 #, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/exception.py:467 +#: cinder/backup/drivers/ceph.py:708 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/exception.py:471 +#: cinder/backup/drivers/ceph.py:713 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "found snapshot '%s'" msgstr "" -#: cinder/exception.py:475 -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/exception.py:480 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "No target id found for volume %(volume_id)s." +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/exception.py:484 -#, python-format -msgid "No disk at %(location)s" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/exception.py:488 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/exception.py:492 +#: cinder/backup/drivers/ceph.py:846 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/exception.py:496 -msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/exception.py:501 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/exception.py:505 +#: cinder/backup/drivers/ceph.py:916 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "rbd has %s extents" msgstr "" -#: cinder/exception.py:509 -#, python-format -msgid "User %(user_id)s could not be found." +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/exception.py:513 -#, python-format -msgid "Project %(project_id)s could not be found." +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/exception.py:517 +#: cinder/backup/drivers/ceph.py:964 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/exception.py:521 +#: cinder/backup/drivers/ceph.py:1005 #, python-format -msgid "Role %(role_id)s could not be found." +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/exception.py:529 -#, python-format -msgid "%(req)s is required to create a network." +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/exception.py:533 +#: cinder/backup/drivers/ceph.py:1023 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "restore finished with error - %s" msgstr "" -#: cinder/exception.py:537 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "delete started for backup=%s" msgstr "" -#: cinder/exception.py:541 -#, python-format -msgid "Network could not be found for uuid %(uuid)s" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/exception.py:545 +#: cinder/backup/drivers/ceph.py:1037 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/exception.py:549 +#: cinder/backup/drivers/ceph.py:1039 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "delete '%s' finished" msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/exception.py:557 +#: cinder/backup/drivers/swift.py:123 #, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/exception.py:562 +#: cinder/backup/drivers/swift.py:141 #, python-format -msgid "Host is not set to the network (%(network_id)s)." +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/exception.py:566 +#: cinder/backup/drivers/swift.py:146 #, python-format -msgid "Network %(network)s has active ports, cannot delete." +msgid "container %s does not exist" msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/exception.py:574 +#: cinder/backup/drivers/swift.py:157 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/exception.py:578 +#: cinder/backup/drivers/swift.py:173 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/exception.py:582 +#: cinder/backup/drivers/swift.py:182 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "generated object list: %s" msgstr "" -#: cinder/exception.py:586 +#: cinder/backup/drivers/swift.py:192 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/exception.py:591 +#: cinder/backup/drivers/swift.py:209 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/exception.py:595 -#, python-format -msgid "Host %(host)s has zero fixed ips." +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/exception.py:599 +#: cinder/backup/drivers/swift.py:219 #, python-format msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/exception.py:604 +#: cinder/backup/drivers/swift.py:224 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/exception.py:608 +#: cinder/backup/drivers/swift.py:234 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "volume size %d is invalid." msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/exception.py:620 +#: cinder/backup/drivers/swift.py:278 #, python-format -msgid "Floating ip not found for id %(id)s." +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/exception.py:624 -#, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/exception.py:628 -#, python-format -msgid "Floating ip not found for address %(address)s." +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/exception.py:632 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Floating ip not found for host %(host)s." -msgstr "" - -#: cinder/exception.py:636 -msgid "Zero floating ips available." +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/exception.py:640 +#: cinder/backup/drivers/swift.py:301 #, python-format -msgid "Floating ip %(address)s is associated." +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/exception.py:644 +#: cinder/backup/drivers/swift.py:304 #, python-format -msgid "Floating ip %(address)s is not associated." +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/exception.py:652 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "Interface %(interface)s not found." +msgid "backup %s finished." msgstr "" -#: cinder/exception.py:656 +#: cinder/backup/drivers/swift.py:345 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/exception.py:660 +#: cinder/backup/drivers/swift.py:350 #, python-format -msgid "Certificate %(certificate_id)s not found." +msgid "metadata_object_names = %s" msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/exception.py:668 +#: cinder/backup/drivers/swift.py:362 #, python-format -msgid "Host %(host)s could not be found." +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/exception.py:672 +#: cinder/backup/drivers/swift.py:378 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/exception.py:676 +#: cinder/backup/drivers/swift.py:401 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/exception.py:680 +#: cinder/backup/drivers/swift.py:409 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/exception.py:684 +#: cinder/backup/drivers/swift.py:423 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/exception.py:692 +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 #, python-format -msgid "Quota for project %(project_id)s could not be found." +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/exception.py:696 +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/exception.py:700 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/exception.py:704 +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "delete %s finished" msgstr "" -#: cinder/exception.py:709 +#: cinder/backup/drivers/tsm.py:85 #, python-format -msgid "Security group with rule %(rule_id)s not found." +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/exception.py:713 +#: cinder/backup/drivers/tsm.py:143 #, python-format msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/exception.py:718 +#: cinder/backup/drivers/tsm.py:173 #, python-format msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/exception.py:723 +#: cinder/backup/drivers/tsm.py:199 #, python-format -msgid "Migration %(migration_id)s could not be found." +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/exception.py:727 +#: cinder/backup/drivers/tsm.py:206 #, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/exception.py:732 +#: cinder/backup/drivers/tsm.py:213 #, python-format -msgid "Console pool %(pool_id)s could not be found." +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/exception.py:736 +#: cinder/backup/drivers/tsm.py:260 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/exception.py:742 +#: cinder/backup/drivers/tsm.py:286 #, python-format -msgid "Console %(console_id)s could not be found." +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/exception.py:746 +#: cinder/backup/drivers/tsm.py:298 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/exception.py:750 +#: cinder/backup/drivers/tsm.py:308 #, python-format msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/exception.py:755 +#: cinder/backup/drivers/tsm.py:338 #, python-format -msgid "Invalid console type %(console_type)s " +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/exception.py:763 +#: cinder/backup/drivers/tsm.py:362 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/exception.py:767 +#: cinder/backup/drivers/tsm.py:413 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/exception.py:772 +#: cinder/backup/drivers/tsm.py:421 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/exception.py:776 +#: cinder/backup/drivers/tsm.py:432 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/exception.py:780 +#: cinder/brick/exception.py:55 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/exception.py:784 -#, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/exception.py:789 -#, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/exception.py:793 +#: cinder/brick/exception.py:93 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/exception.py:798 +#: cinder/brick/exception.py:97 #, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/exception.py:807 +#: cinder/brick/exception.py:105 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:811 +#: cinder/brick/exception.py:109 #, python-format -msgid "LDAP group %(group_id)s could not be found." +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:815 +#: cinder/brick/exception.py:113 #, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:819 +#: cinder/brick/exception.py:117 #, python-format -msgid "File %(file_path)s could not be found." +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/exception.py:827 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/exception.py:832 +#: cinder/brick/initiator/connector.py:229 #, python-format -msgid "Network adapter %(adapter)s could not be found." +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/exception.py:836 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/exception.py:844 -#, python-format -msgid "Unable to use global role %(role_id)s" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/exception.py:861 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "Key pair %(key_name)s already exists." +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/exception.py:865 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "User %(user)s already exists." +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/exception.py:869 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/exception.py:873 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/exception.py:877 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/exception.py:882 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Project %(project)s already exists." +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/exception.py:886 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Instance %(name)s already exists." +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/exception.py:890 +#: cinder/brick/initiator/connector.py:834 #, python-format -msgid "Instance Type %(name)s already exists." +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/exception.py:894 -#, python-format -msgid "Volume Type %(name)s already exists." +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/exception.py:898 -#, python-format -msgid "%(path)s is on shared storage: %(reason)s" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/exception.py:906 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/exception.py:910 +#: cinder/brick/initiator/linuxscsi.py:145 #, python-format -msgid "Could not find config at %(path)s" +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/exception.py:914 +#: cinder/brick/initiator/linuxscsi.py:149 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/exception.py:938 +#: cinder/brick/iscsi/iscsi.py:227 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/exception.py:946 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/exception.py:950 +#: cinder/brick/iscsi/iscsi.py:280 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/exception.py:958 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/exception.py:963 -#, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/exception.py:967 +#: cinder/brick/iscsi/iscsi.py:489 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/exception.py:971 +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/exception.py:975 +#: cinder/brick/iscsi/iscsi.py:532 #, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgid "Removing iscsi_target: %s" msgstr "" -#: cinder/exception.py:980 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/exception.py:984 +#: cinder/brick/iscsi/iscsi.py:571 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/exception.py:988 -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/exception.py:992 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" - -#: cinder/exception.py:997 -msgid "Unable to create instance type" +msgid "Cmd :%s" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" msgstr "" -#: cinder/exception.py:1005 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "Error in SolidFire API response: status=%(status)s" +msgid "StdErr :%s" msgstr "" -#: cinder/exception.py:1009 +#: cinder/brick/local_dev/lvm.py:82 #, python-format -msgid "Error in SolidFire API response: data=%(data)s" +msgid "Unable to locate Volume Group %s" msgstr "" -#: cinder/exception.py:1013 -#, python-format -msgid "Detected existing vlan with id %(vlan)d" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/exception.py:1017 +#: cinder/brick/local_dev/lvm.py:370 #, python-format -msgid "Instance %(instance_id)s could not be found." +msgid "Unable to find VG: %s" msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/exception.py:1025 -#, python-format -msgid "Could not fetch image %(image)s" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/log.py:315 +#: cinder/brick/local_dev/lvm.py:489 #, python-format -msgid "syslog facility must be one of: %s" +msgid "Unable to find LV: %s" msgstr "" -#: cinder/manager.py:146 -#, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/manager.py:159 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Error during %(full_task_name)s: %(e)s" +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/service.py:177 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Already mounted: %s" msgstr "" -#: cinder/service.py:195 -#, python-format -msgid "Creating Consumer connection for Service %s" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/service.py:340 -msgid "model server went away" +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/service.py:440 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/utils.py:79 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "Inner Exception: %s" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/utils.py:165 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Fetching %s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/utils.py:210 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Got unknown keyword args to utils.execute: %r" +msgid "No backup with id %s" msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "Result was %s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/utils.py:249 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "%r failed. Retrying." +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/utils.py:534 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Link Local address is not found.:%s" +msgid "Table |%s| not created!" msgstr "" -#: cinder/utils.py:537 -#, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/utils.py:648 -#, python-format -msgid "Invalid backend: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/utils.py:659 -#, python-format -msgid "backend %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/utils.py:942 -#, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/utils.py:1023 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +msgid "Table |%s| not created" msgstr "" -#: cinder/utils.py:1028 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgid "Exception while dropping table %s." msgstr "" -#: cinder/utils.py:1138 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "Expected object of type: %s" +msgid "Exception while creating table %s." msgstr "" -#: cinder/utils.py:1169 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "Invalid server_string: %s" +msgid "Column |%s| not created!" msgstr "" -#: cinder/utils.py:1298 -#, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/utils.py:1495 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/wsgi.py:97 -#, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/wsgi.py:117 -#, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/api/direct.py:299 +#: cinder/image/image_utils.py:101 #, python-format -msgid "Returned non-serializeable type: %s" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/api/validator.py:142 +#: cinder/image/image_utils.py:157 #, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/api/ec2/__init__.py:73 +#: cinder/image/image_utils.py:178 #, python-format -msgid "%(code)s: %(message)s" +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/api/ec2/__init__.py:95 +#: cinder/image/image_utils.py:206 #, python-format -msgid "FaultWrapper: %s" +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/api/ec2/__init__.py:180 +#: cinder/image/image_utils.py:260 #, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "Authentication Failure: %s" +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/api/ec2/__init__.py:404 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/api/ec2/__init__.py:435 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "action: %s" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/api/ec2/__init__.py:437 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/api/ec2/__init__.py:512 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/api/ec2/__init__.py:584 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "InstanceNotFound raised: %s" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/api/ec2/__init__.py:590 +#: cinder/openstack/common/log.py:301 #, python-format -msgid "VolumeNotFound raised: %s" +msgid "Deprecated: %s" msgstr "" -#: cinder/api/ec2/__init__.py:596 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "SnapshotNotFound raised: %s" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/api/ec2/__init__.py:602 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "NotFound raised: %s" +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/api/ec2/__init__.py:605 +#: cinder/openstack/common/log.py:623 #, python-format -msgid "EC2APIError raised: %s" +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "KeyPairExists raised: %s" +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/api/ec2/__init__.py:617 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "InvalidParameterValue raised: %s" +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/api/ec2/__init__.py:621 -#, python-format -msgid "InvalidPortRange raised: %s" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/api/ec2/__init__.py:625 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "NotAuthorized raised: %s" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/api/ec2/__init__.py:629 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "InvalidRequest raised: %s" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/api/ec2/__init__.py:633 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "QuotaError raised: %s" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/api/ec2/__init__.py:637 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/api/ec2/__init__.py:646 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "Unexpected error raised: %s" +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/api/ec2/__init__.py:647 +#: cinder/openstack/common/policy.py:149 #, python-format -msgid "Environment: %s" +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/api/ec2/apirequest.py:64 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/api/ec2/cloud.py:336 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Create snapshot of volume %s" +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/api/ec2/cloud.py:372 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/api/ec2/cloud.py:378 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgid "Running cmd (subprocess): %s" msgstr "" -#: cinder/api/ec2/cloud.py:382 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "Create key pair %s" +msgid "Result was %s" msgstr "" -#: cinder/api/ec2/cloud.py:391 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "Import key %s" +msgid "%r failed. Retrying." msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "Delete key pair %s" +msgid "Running cmd (SSH): %s" msgstr "" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 #, python-format -msgid "Revoke security group ingress %s" +msgid "Caught %s, exiting" msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, python-format -msgid "%s Not enough parameters to build a valid rule" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 -#, python-format -msgid "Authorize security group ingress %s" +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/api/ec2/cloud.py:725 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "%s - This rule already exists in group" +msgid "Started child %d" msgstr "" -#: cinder/api/ec2/cloud.py:769 +#: cinder/openstack/common/service.py:337 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +msgid "Starting %d workers" msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "Create Security Group %s" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "group %s already exists" +msgid "pid %d not in child list" msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "Delete security group %s" +msgid "Caught %s, stopping children" msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "Get console output for instance %s" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/api/ec2/cloud.py:894 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "Create volume from snapshot %s" +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "Create volume of %s GB" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "Detach volume %s" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/api/ec2/cloud.py:959 -msgid "Detach Volume Failed." +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "attribute not supported: %s" +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "vol = %s\n" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/api/ec2/cloud.py:1267 -#, python-format -msgid "Release address %s" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "Disassociate address %s" +msgid "SQL connection failed. %s attempts left." msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/openstack/common/notifier/api.py:145 #, python-format -msgid "Reboot instance %r" +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "De-registering image %s" -msgstr "" - -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "Updating image %s publicity" +msgid "unpacked context: %s" msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "received %s" msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "no method for message: %s" msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "Caught error: %s" +msgid "No method for message: %s" msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "Extended resource: %s" +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +msgid "MSG_ID is %s" msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/api/openstack/auth.py:90 -#, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/openstack/common/rpc/common.py:104 #, python-format -msgid "Could not find %s in request." +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "Successfully authenticated '%s'" +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" -msgstr "" - -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "marker [%s] not found" +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "href %s does not contain version" +msgid "Returning exception %s to caller" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/openstack/common/rpc/impl_kombu.py:477 #, python-format -msgid "Converting nw_info: %s" +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/openstack/common/rpc/impl_kombu.py:499 #, python-format -msgid "Converted networks: %s" +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 #, python-format -msgid "Loaded extension: %s" +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format -msgid "Ext name: %s" +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "Ext alias: %s" +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "Ext description: %s" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "Ext namespace: %s" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/api/openstack/extensions.py:230 -#, python-format -msgid "Ext updated: %s" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/api/openstack/extensions.py:232 -#, python-format -msgid "Exception loading extension: %s" +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/api/openstack/extensions.py:246 -#, python-format -msgid "Loading extension %s" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "Calling extension factory %s" +msgid "Deserializing: %s" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/api/openstack/wsgi.py:582 -#, python-format -msgid "Exception handling resource: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/openstack/common/rpc/impl_zmq.py:267 #, python-format -msgid "Fault thrown: %s" +msgid "Running func with context: %s" msgstr "" -#: cinder/api/openstack/wsgi.py:589 -#, python-format -msgid "HTTP exception thrown: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/api/openstack/wsgi.py:816 -#, python-format -msgid "There is no such action: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "%(url)s returned a fault: %(e)s" -msgstr "" - -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/openstack/common/rpc/impl_zmq.py:506 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/openstack/common/rpc/impl_zmq.py:681 #, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +msgid "Received message: %s" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/openstack/common/rpc/impl_zmq.py:721 #, python-format -msgid "Invalid server status: %(status)s" +msgid "%(msg)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "Bad personality format: missing %s" +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/api/openstack/compute/servers.py:566 -#, python-format -msgid "Duplicate networks (%s) are not allowed" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "Bad network format: missing %s" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" -msgstr "" - -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "Error in confirm-resize %s" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "Error in revert-resize %s" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Error in reboot %s" +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "Compute.api::pause %s" +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Compute.api::unpause %s" +msgid "Faked command matched %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "compute.api::suspend %s" +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "compute.api::resume %s" +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "Error in migrate %s" +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/tests/test_netapp_nfs.py:360 #, python-format -msgid "Compute.api::reset_network %s" +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "unrecognized argument %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "Compute.api::lock %s" +msgid "Run CLI command: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "Compute.api::unlock %s" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/tests/integrated/test_login.py:29 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#: cinder/tests/integrated/api/client.py:32 #, python-format msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 -#, python-format -msgid "Cannot show aggregate: %(id)s" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "Body: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/tests/integrated/api/client.py:121 #, python-format -msgid "Aggregates does not have %s action" +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/transfer/api.py:119 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "Failed to create transfer record for %s" msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/volume/api.py:143 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 #, python-format -msgid "No more floating ips in pool %s." +msgid "Searching by: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/volume/api.py:490 #, python-format -msgid "Invalid status: '%s'" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 +#: cinder/volume/api.py:502 #, python-format -msgid "Invalid mode: '%s'" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 -#, python-format -msgid "Invalid update setting: '%s'" +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 -#, python-format -msgid "Setting host %(host)s to %(state)s." +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/volume/api.py:757 #, python-format -msgid "Key pair '%s' already exists." +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/volume/api.py:797 #, python-format -msgid "Unable to find address %r" +msgid "No available service named %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 -#, python-format -msgid "Network does not have %s action" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 -#, python-format -msgid "Disassociating network with id %s" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 +#: cinder/volume/api.py:842 #, python-format -msgid "Showing network with id %s" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 -#, python-format -msgid "Deleting network with id %s" +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#: cinder/volume/api.py:887 #, python-format -msgid "Security group %s already exists" +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#: cinder/volume/api.py:900 #, python-format -msgid "Security group %s is not a string or unicode" +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Security group %s cannot be empty." +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#: cinder/volume/driver.py:282 #, python-format -msgid "Security group %s should not be greater than 255 characters." +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#: cinder/volume/driver.py:327 #, python-format -msgid "Security group (%s) not found" +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/volume/driver.py:358 #, python-format -msgid "This rule already exists in group %s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#: cinder/volume/driver.py:451 #, python-format -msgid "Rule (%s) not found" +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/volume/driver.py:546 #, python-format -msgid "start instance %r" +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/volume/driver.py:548 #, python-format -msgid "stop instance %r" +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/volume/driver.py:595 #, python-format -msgid "vol=%s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Delete volume with id: %s" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 -#, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 -#, python-format -msgid "Delete snapshot with id: %s" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 -#, python-format -msgid "Create snapshot from volume %s" +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/auth/ldapdriver.py:650 -#, python-format +#: cinder/volume/manager.py:209 msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/auth/manager.py:298 +#: cinder/volume/manager.py:228 #, python-format -msgid "Looking up user: %r" +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/auth/manager.py:302 +#: cinder/volume/manager.py:235 #, python-format -msgid "Failed authorization for access key %s" +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/volume/manager.py:244 #, python-format -msgid "Using project name = user name (%s)" +msgid "Re-exporting %s volumes" msgstr "" -#: cinder/auth/manager.py:315 +#: cinder/volume/manager.py:257 #, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/auth/manager.py:324 +#: cinder/volume/manager.py:264 #, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#: cinder/volume/manager.py:271 #, python-format -msgid "user.secret: %s" +msgid "volume %s: skipping export" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#: cinder/volume/manager.py:273 #, python-format -msgid "expected_signature: %s" +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#: cinder/volume/manager.py:286 #, python-format -msgid "Invalid signature for user %s" +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/auth/manager.py:353 -#, python-format -msgid "host_only_signature: %s" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" msgstr "" -#: cinder/auth/manager.py:493 +#: cinder/volume/manager.py:389 #, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +msgid "volume %s: removing export" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/volume/manager.py:394 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/auth/manager.py:522 -#, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/volume/manager.py:427 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/auth/manager.py:613 +#: cinder/volume/manager.py:430 #, python-format -msgid "modifying project %s" +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/auth/manager.py:625 +#: cinder/volume/manager.py:434 #, python-format -msgid "Adding user %(uid)s to project %(pid)s" +msgid "volume %s: deleted successfully" msgstr "" -#: cinder/auth/manager.py:646 +#: cinder/volume/manager.py:451 #, python-format -msgid "Remove user %(uid)s from project %(pid)s" +msgid "snapshot %s: creating" msgstr "" -#: cinder/auth/manager.py:676 +#: cinder/volume/manager.py:462 #, python-format -msgid "Deleting project %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/auth/manager.py:734 +#: cinder/volume/manager.py:490 #, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/auth/manager.py:743 +#: cinder/volume/manager.py:496 #, python-format -msgid "Deleting user %s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/auth/manager.py:753 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Access Key change for user %s" +msgid "snapshot %s: deleting" msgstr "" -#: cinder/auth/manager.py:755 +#: cinder/volume/manager.py:526 #, python-format -msgid "Secret Key change for user %s" +msgid "Cannot delete snapshot %s: snapshot is busy" msgstr "" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/auth/manager.py:802 +#: cinder/volume/manager.py:559 #, python-format -msgid "No vpn data for project %s" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -msgid "Instance type for vpn instances" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/volume/manager.py:698 #, python-format -msgid "Launching VPN for %s" -msgstr "" - -#: cinder/compute/api.py:141 -msgid "No compute host specified" +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/compute/api.py:144 +#: cinder/volume/manager.py:760 #, python-format -msgid "Unable to find host for Instance %s" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/compute/api.py:192 +#: cinder/volume/manager.py:807 #, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/compute/api.py:203 -#, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/compute/api.py:259 +#: cinder/volume/manager.py:880 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/compute/api.py:261 +#: cinder/volume/manager.py:909 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/compute/api.py:312 +#: cinder/volume/manager.py:940 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/compute/api.py:383 +#: cinder/volume/manager.py:976 #, python-format -msgid "Going to run %s instances..." +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/compute/api.py:447 -#, python-format -msgid "bdm %s" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/volume/manager.py:1024 #, python-format -msgid "block_device_mapping %s" +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/compute/api.py:591 +#: cinder/volume/manager.py:1044 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "Notification {%s} received" msgstr "" -#: cinder/compute/api.py:871 -msgid "Going to try to soft delete instance" +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" msgstr "" -#: cinder/compute/api.py:939 -msgid "Going to try to terminate instance" +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" msgstr "" -#: cinder/compute/api.py:977 -msgid "Going to try to stop instance" +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/compute/api.py:996 -msgid "Going to try to start instance" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/compute/api.py:1000 -#, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Searching by: %s" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Image type not recognized %s" +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/compute/api.py:1377 -#, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "multiple fixedips exist, using the first: %s" -msgstr "" - -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format msgid "DB error: %s" msgstr "" -#: cinder/compute/instance_types.py:86 -#, python-format -msgid "Instance type %s not found for deletion" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/volume/qos_specs.py:189 #, python-format msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/compute/manager.py:144 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/compute/manager.py:146 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/compute/manager.py:155 -#, python-format -msgid "check_instance_lock: not executing |%s|" +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/volume/utils.py:144 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/volume/volume_types.py:130 #, python-format msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "Setting up bdm %s" +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "Creating clone of volume: %s" msgstr "" -#: cinder/compute/manager.py:444 -#, python-format -msgid "Instance %s not found." +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/compute/manager.py:523 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/compute/manager.py:565 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Instance network_info: |%s|" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "%(action_str)s instance" +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/compute/manager.py:708 -#, python-format -msgid "terminating bdm %s" +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/volume/drivers/eqlx.py:139 #, python-format msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +"CLI output\n" +"%s" msgstr "" -#: cinder/compute/manager.py:816 -#, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "Rebuilding instance %s" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Rebooting instance %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/compute/manager.py:891 -#, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "instance %s: snapshotting" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "Error running SSH command: %s" msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "Rotating out %d backups" +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "Deleting image %s" +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/compute/manager.py:1035 -#, python-format -msgid "Failed to set admin password. Instance %s is not running" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "Instance %s: Root password set" +msgid "Failed to create volume %s" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" msgstr "" -#: cinder/compute/manager.py:1079 +#: cinder/volume/drivers/eqlx.py:348 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/volume/drivers/eqlx.py:374 #, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/volume/drivers/eqlx.py:384 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/volume/drivers/eqlx.py:405 #, python-format -msgid "instance %s: rescuing" +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/volume/drivers/eqlx.py:415 #, python-format -msgid "instance %s: unrescuing" +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/volume/drivers/eqlx.py:440 #, python-format -msgid "instance %s: migrating" +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "instance %s: pausing" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/volume/drivers/glusterfs.py:86 #, python-format -msgid "instance %s: unpausing" +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/volume/drivers/glusterfs.py:91 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/compute/manager.py:1534 -#, python-format -msgid "instance %s: suspending" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "instance %s: resuming" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/compute/manager.py:1579 -#, python-format -msgid "instance %s: locking" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 #, python-format -msgid "instance %s: unlocking" +msgid "casted to %s" msgstr "" -#: cinder/compute/manager.py:1596 -#, python-format -msgid "instance %s: getting locked state" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "instance %s: reset network" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "instance %s: inject network info" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "network_info to inject: |%s|" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/compute/manager.py:1655 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "instance %s: getting vnc console" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/volume/drivers/glusterfs.py:403 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "nova call result: %s" msgstr "" -#: cinder/compute/manager.py:1703 -#, python-format -msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/compute/manager.py:1705 -#, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/compute/manager.py:1714 +#: cinder/volume/drivers/glusterfs.py:431 #, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/compute/manager.py:1752 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgid "create snapshot: %s" msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/volume/drivers/glusterfs.py:457 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "volume id: %s" msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +msgid "deleting snapshot %s" msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/volume/drivers/glusterfs.py:690 #, python-format -msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/volume/drivers/glusterfs.py:701 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." +msgid "No file depends on %s." msgstr "" -#: cinder/compute/manager.py:2075 -msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/compute/manager.py:2331 -#, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +msgid "Available shares: %s" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/volume/drivers/glusterfs.py:1038 #, python-format msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/volume/drivers/gpfs.py:97 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/console/manager.py:97 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/console/vmrc_manager.py:122 +#: cinder/volume/drivers/gpfs.py:160 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/volume/drivers/gpfs.py:169 #, python-format -msgid "Removing console %(console_id)s." +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." msgstr "" -#: cinder/console/xvp.py:116 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "Re-wrote %s" +msgid "%s is not a directory." msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "Error starting xvp: %s" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "No ComputeNode for %(host)s" +msgid "volume_info:%s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 -#, python-format -msgid "No backend config with id %(sm_backend_id)s" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "No sm_flavor called %(sm_flavor)s" +msgid "%s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "Symbolic link %s not found" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "SQL connection failed. %s attempts left." +msgid " but size is now %d" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "Table |%s| not created!" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "join list for moving mac_addresses |%s|" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/volume/drivers/rbd.py:160 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/image/glance.py:278 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/image/glance.py:281 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/image/glance.py:289 +#: cinder/volume/drivers/rbd.py:423 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "flattening source volume %s" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/image/glance.py:410 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/image/s3.py:309 -#, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/image/s3.py:328 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgid "creating volume '%s'" msgstr "" -#: cinder/image/s3.py:340 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/image/s3.py:353 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/image/s3.py:379 -#, python-format -msgid "Failed to decrypt private key: %s" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/volume/drivers/rbd.py:593 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 -#, python-format -msgid "Bad prefix for to_global_ipv6: %s" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "deleting rbd volume %s" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/network/linux_net.py:166 -#, python-format -msgid "Attempted to remove chain %s which does not exist" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "Unknown chain: %r" +msgid "connection data: %s" msgstr "" -#: cinder/network/linux_net.py:215 -#, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/network/linux_net.py:694 -#, python-format -msgid "Hupping dnsmasq threw %s" +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" msgstr "" -#: cinder/network/linux_net.py:696 +#: cinder/volume/drivers/rbd.py:724 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "not cloneable: %s" msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "killing radvd threw %s" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/network/linux_net.py:758 -#, python-format -msgid "Pid %d is stale, relaunching radvd" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/volume/drivers/rbd.py:747 #, python-format -msgid "Starting VLAN inteface %s" +msgid "Unable to open image %(loc)s: %(err)s" msgstr "" -#: cinder/network/linux_net.py:999 -#, python-format -msgid "Starting Bridge interface for %s" +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/network/linux_net.py:1142 -#, python-format -msgid "Starting bridge %s " +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/network/linux_net.py:1149 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "Done starting bridge %s" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/network/linux_net.py:1167 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Failed unplugging gateway interface '%s'" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/network/linux_net.py:1170 -#, python-format -msgid "Unplugged gateway interface '%s'" +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/network/manager.py:291 +#: cinder/volume/drivers/scality.py:78 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 -#, python-format -msgid "Interface %(interface)s not found" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/network/manager.py:315 -#, python-format -msgid "floating IP allocation for instance |%s|" +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/network/manager.py:353 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "floating IP deallocation for instance |%s|" +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/volume/drivers/sheepdog.py:59 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/network/manager.py:390 -#, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/network/manager.py:402 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/network/manager.py:614 +#: cinder/volume/drivers/solidfire.py:151 #, python-format msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/network/manager.py:660 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/volume/drivers/solidfire.py:161 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/network/manager.py:778 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/network/manager.py:896 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "network allocations for instance |%s|" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/network/manager.py:901 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "network deallocation for instance |%s|" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "API response: %s" msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "Leased IP |%(address)s|" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "IP %s leased that is not associated" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/network/manager.py:1256 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "IP |%s| leased that isn't allocated" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "Released IP |%(address)s|" +msgid "Failed volume create: %s" msgstr "" -#: cinder/network/manager.py:1265 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "IP %s released that is not associated" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/network/manager.py:1268 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "IP %s released that was not leased" +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/network/quantum/client.py:180 -#, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/network/quantum/client.py:196 -#, python-format -msgid "Quantum entity not found: %s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/network/quantum/client.py:206 -#, python-format -msgid "Server %(status_code)s error: %(data)s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/network/quantum/client.py:210 -#, python-format -msgid "Unable to connect to server. Got error: %s" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/network/quantum/client.py:228 -#, python-format -msgid "unable to deserialize object of type = '%s'" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/volume/drivers/zadara.py:438 #, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +msgid "Create snapshot: %s" msgstr "" -#: cinder/network/quantum/manager.py:301 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "network allocations for instance %s" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/volume/drivers/zadara.py:464 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 +#: cinder/volume/drivers/zadara.py:472 #, python-format -msgid "Server returned error: %s" +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 -#, python-format -msgid "No network with net_id = %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, python-format -msgid "No fixed IPs to deallocate for vif %s" +msgid "Create Volume: %(volume)s Size: %(size)lu" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/notifier/api.py:115 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "%s not in valid priorities" +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/notifier/list_notifier.py:65 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Returning exception %s to caller" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/rpc/amqp.py:231 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "received %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/rpc/amqp.py:236 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format -msgid "no method for message: %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/rpc/amqp.py:237 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format -msgid "No method for message: %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/rpc/amqp.py:321 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format -msgid "Making asynchronous call on %s ..." +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format -msgid "MSG_ID is %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/rpc/amqp.py:379 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "Sending notification on %s..." +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/rpc/impl_kombu.py:407 -#, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/rpc/impl_qpid.py:346 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Connected to AMQP server on %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/scheduler/driver.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" -msgstr "" - -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "No host selection for %s defined." +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 #, python-format -msgid "Filtered %(hosts)s" +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "Weighted %(weighted_host)s" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format -msgid "Host filter passes for %(host)s" +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "Received %(service_name)s service update from %(host)s." +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "No service for compute ID %s" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/drivers/emc/emc_smis_common.py:674 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "Error mapping volume %s." msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/drivers/emc/emc_smis_common.py:678 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "ExposePaths for volume %s completed successfully." msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/tests/fake_utils.py:72 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Faking execution of cmd (subprocess): %s" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/drivers/emc/emc_smis_common.py:744 #, python-format -msgid "Faked command matched %s" +msgid "AddMembers for volume %s completed successfully." msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format -msgid "Faked command raised an exception %s" +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "Running instances: %s" +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "After terminating instances: %s" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "After force-killing instances: %s" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "Target %s allocated" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/tests/test_volume_types.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "Given data: %s" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/tests/test_volume_types.py:59 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "Result data: %s" +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Quota exceeded: code=%(code)s" +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "_create: %s" +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "_delete: %s" +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "_get: %s" +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "_get_all: %s" +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "Error finding %s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "flavor: %s" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "Body: %s" +msgid "Device info: %(data)s." msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format -msgid "Decoding JSON: %s" +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Nested received %(queue)s, %(value)s" +msgid "Add target WWN: %s." msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Nested return %s" +msgid "Target WWNs: %s." msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Received %s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "Compute_service record created for %s " +msgid "Found iSCSI endpoint: %s" msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "Compute_service record updated for %s " +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "Filters added to instance %s" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "Adding security group rule: %r" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "Adding provider rule: %s" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/hds/hds.py:197 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/hds/hds.py:250 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "HDP not found: %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/hds/hds.py:327 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/hds/hds.py:355 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/hds/hds.py:372 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/hds/hds.py:395 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/hds/hds.py:480 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "Got exception: %s" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format -msgid "============= initial domains =========== : %s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "No such domain (%s)" +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "Failed power down Bare-metal node %s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "No such domain %s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Domains: %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Nodes: %s" +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/huawei/huawei_utils.py:129 #, python-format -msgid "After storing domains: %s" +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:198 -#, python-format -msgid "Created new domain: %s" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/baremetal/dom.py:226 +#: cinder/volume/drivers/huawei/rest_common.py:173 #, python-format -msgid "change_domain_state: to new state %s" +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/baremetal/dom.py:233 -#, python-format -msgid "Stored fake domains to the file: %s" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/huawei/rest_common.py:354 #, python-format -msgid "instance %s: rebooted" +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 #, python-format -msgid "instance %s: rescued" +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "instance %s: is building" +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/huawei/rest_common.py:874 #, python-format -msgid "instance %s: booted" +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/huawei/rest_common.py:937 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/huawei/rest_common.py:964 #, python-format -msgid "instance %s spawned successfully" +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format -msgid "instance %s:not booted" +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/huawei/rest_common.py:1101 #, python-format -msgid "instance %s: Creating image" +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/huawei/rest_common.py:1124 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "instance %s: starting toXML method" +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "instance %s: finished toXML method" +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 -msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/huawei/rest_common.py:1256 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/baremetal/tilera.py:216 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/huawei/ssh_common.py:501 #, python-format -msgid "unknown disk image handler: %s" +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/huawei/ssh_common.py:516 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/huawei/ssh_common.py:570 #, python-format -msgid "Failed to remove container: %s" +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/huawei/ssh_common.py:580 #, python-format -msgid "User %(username)s not found in password file." +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "unsupported partition: %s" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Failed to map partitions: %s" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "qemu-nbd error: %s" +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/huawei/ssh_common.py:865 #, python-format -msgid "nbd device %s did not show up" +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/huawei/ssh_common.py:873 #, python-format -msgid "Connecting to libvirt: %s" +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/huawei/ssh_common.py:933 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/huawei/ssh_common.py:1079 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/huawei/ssh_common.py:1102 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format -msgid "Deleting instance files %(target)s" +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." msgstr "" -#: cinder/virt/libvirt/connection.py:692 -msgid "Instance soft rebooted successfully." +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/huawei/ssh_common.py:1413 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Automatically confirming migration %d" +msgid "Failed getting details for pool %s" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/libvirt/connection.py:926 -#, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -msgid "Guest does not have a console available" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/libvirt/connection.py:1020 -#, python-format -msgid "Path '%(path)s' supports direct I/O" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 -#, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "%s is not set" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 -#, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "block_device_list %s" +msgid "ensure_export: Volume %s not found on storage" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 -#, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "'' must be 1, but %d\n" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 #, python-format msgid "" -"Instance launched has CPU info:\n" -"%s" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 #, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 #, python-format -msgid "Timeout migrating for %s. nwfilter not found." +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 -#, python-format -msgid "Instance %s: Starting finish_revert_migration" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "%s is a valid instance name" +msgid "Could not find key in output of command %(cmd)s: %(out)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "%s has a disk file" +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "Failed to find host %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "enter: get_host_from_connector: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Base file too young to remove: %s" +msgid "enter: create_host: host %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 -#, python-format -msgid "Removing base file: %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 #, python-format msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 -#, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 -#, python-format -msgid "Unknown base file: %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 -#, python-format -msgid "Active base files: %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 -#, python-format -msgid "Corrupt base files: %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 #, python-format -msgid "Removable base files: %s" +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." msgstr "" -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/virt/libvirt/vif.py:99 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 #, python-format -msgid "Ensuring bridge %s" +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 #, python-format -msgid "Failed while unplugging vif of instance '%s'" +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/virt/libvirt/volume.py:163 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 #, python-format -msgid "iSCSI device not found at %s" +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." msgstr "" -#: cinder/virt/libvirt/volume.py:166 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 #, python-format msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "%(text)s: _db_content => %(content)s" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" -msgstr "" - -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "enter: delete_vdisk: vdisk %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Tried to delete non-existant vdisk %s." msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 #, python-format -msgid "Glance image %s is in killed state" +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 -#, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 -#, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "httplib error in %s: " +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 #, python-format -msgid "Socket error in %s: " +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/netapp/api.py:419 #, python-format -msgid "Type error in %s: " +msgid "No element by given name %s." msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 -#, python-format -msgid "Exception in %s " +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 -#, python-format -msgid "Got total of %s instances" +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/netapp/common.py:103 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/netapp/common.py:109 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "Storage family %s is not supported" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/netapp/common.py:116 #, python-format -msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgid "No default storage protocol found for storage family %(storage_family)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/netapp/common.py:123 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/netapp/common.py:130 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 -#, python-format -msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/netapp/common.py:158 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/netapp/iscsi.py:69 #, python-format -msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +msgid "No metadata property %(prop)s defined for the LUN %(name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/netapp/iscsi.py:105 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "Using NetApp filer: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 -#, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/netapp/iscsi.py:166 #, python-format -msgid "Powering on the VM instance %s" +msgid "Created LUN with name %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/netapp/iscsi.py:175 #, python-format -msgid "Powered on the VM instance %s" +msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/netapp/iscsi.py:191 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "Destroyed LUN %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/netapp/iscsi.py:227 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/netapp/iscsi.py:232 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/netapp/iscsi.py:238 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "Failed to get LUN target details for the LUN %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/netapp/iscsi.py:249 #, python-format -msgid "Uploading image %s" +msgid "Failed to get target portal for the LUN %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/netapp/iscsi.py:252 #, python-format -msgid "Uploaded image %s" +msgid "Failed to get target IQN for the LUN %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/netapp/iscsi.py:290 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "Snapshot %s deletion successful" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 #, python-format -msgid "Deleted temporary vmdk file %s" +msgid "Resizing %s failed. Cleaning volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/netapp/iscsi.py:412 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/netapp/iscsi.py:431 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/netapp/iscsi.py:543 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "Message: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/netapp/iscsi.py:545 #, python-format -msgid "Did hard reboot of VM %s" +msgid "Error getting lun attribute. Exception: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/netapp/iscsi.py:600 #, python-format -msgid "instance - %s not present" +msgid "No need to extend volume %s as it is already the requested new size." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/netapp/iscsi.py:606 #, python-format -msgid "Powering off the VM %s" +msgid "Resizing lun %s directly to new size." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/netapp/iscsi.py:633 #, python-format -msgid "Powered off the VM %s" +msgid "Lun %(path)s geometry failed. Message - %(msg)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/netapp/iscsi.py:662 #, python-format -msgid "Unregistering the VM %s" +msgid "Moving lun %(name)s to %(new_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/netapp/iscsi.py:677 #, python-format -msgid "Unregistered the VM %s" +msgid "Resizing lun %s using sub clone to new size." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/netapp/iscsi.py:684 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/netapp/iscsi.py:690 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "%s cannot be sub clone resized as it contains no blocks." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:707 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Post clone resize lun %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/netapp/iscsi.py:718 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +msgid "Failure staging lun %s to tmp." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/netapp/iscsi.py:730 #, python-format -msgid "Suspending the VM %s " +msgid "Unknown exception in post clone resize lun %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/netapp/iscsi.py:732 #, python-format -msgid "Suspended the VM %s " +msgid "Exception details: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/netapp/iscsi.py:741 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Failure getting lun info for %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/netapp/iscsi.py:785 #, python-format -msgid "Resuming the VM %s" +msgid "Failed to get vol with required size and extra specs for volume: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/netapp/iscsi.py:796 #, python-format -msgid "Resumed the VM %s " +msgid "Error provisioning vol %(name)s on %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/netapp/iscsi.py:986 #, python-format -msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +msgid "No clonned lun named %s found on the filer" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 -#, python-format -msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 -#, python-format -msgid "Creating directory with path %s" +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 -#, python-format -msgid "Created directory with path %s" +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/netapp/iscsi.py:1164 #, python-format -msgid "Downloading image %s from glance image server" +msgid "Failed to get vol with required size for volume: %s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/netapp/iscsi.py:1273 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "Error finding luns for volume %s. Verify volume exists." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/netapp/iscsi.py:1390 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/netapp/iscsi.py:1393 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 -#, python-format -msgid "Getting image size for the image %s" +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/netapp/iscsi.py:1462 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "Error refreshing vol capacity. Message: %s" msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." msgstr "" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "NFS file %s not discovered." msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/netapp/nfs.py:224 #, python-format -msgid "Calling %(localname)s %(impl)s" +msgid "Copied image to volume %s" msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/netapp/nfs.py:230 #, python-format -msgid "Calling getter %s" +msgid "Registering image in cache %s" msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/netapp/nfs.py:237 #, python-format msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/netapp/nfs.py:250 #, python-format -msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +msgid "Found cache file for image %(image_id)s on share %(share)s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/netapp/nfs.py:263 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Cloning img from cache for %s" msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " msgstr "" -#: cinder/virt/xenapi/host.py:172 -#, python-format -msgid "The call to %(method)s returned an error: %(e)s." +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/netapp/nfs.py:293 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Cleaning cache for share %s." msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/netapp/nfs.py:298 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Files to be queued for deletion %s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/netapp/nfs.py:305 #, python-format -msgid "Found no network for bridge %s" +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" msgstr "" -#: cinder/virt/xenapi/pool.py:111 -#, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/netapp/nfs.py:336 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Bytes to free %s" msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/netapp/nfs.py:343 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "Delete file path %s" msgstr "" -#: cinder/virt/xenapi/pool.py:146 +#: cinder/volume/drivers/netapp/nfs.py:358 #, python-format -msgid "Unable to join %(host)s in the pool" +msgid "Deleting file at path %s" msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/netapp/nfs.py:363 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Exception during deleting %s" msgstr "" -#: cinder/virt/xenapi/pool.py:174 +#: cinder/volume/drivers/netapp/nfs.py:395 #, python-format -msgid "Unable to set up pool: %(e)s." +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" msgstr "" -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/netapp/nfs.py:411 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "Cloning image %s from cache" msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/netapp/nfs.py:415 #, python-format -msgid "Found no PIF for device %s" +msgid "Cache share: %s" msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/netapp/nfs.py:425 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Unexpected exception during image cloning in share %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" -msgstr "" - -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/netapp/nfs.py:431 #, python-format -msgid "VBD not found in instance %s" +msgid "Cloning image %s directly in share" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/netapp/nfs.py:436 #, python-format -msgid "VBD %s already detached" +msgid "Share is cloneable %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/netapp/nfs.py:443 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Image is raw %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/netapp/nfs.py:450 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Image will locally be converted to raw %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/netapp/nfs.py:457 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Converted to raw, but format is now %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/netapp/nfs.py:467 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Performing post clone for %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 -#, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 -#, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/netapp/nfs.py:482 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Resizing file to %sG" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 -#, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:345 -#, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/netapp/nfs.py:529 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Image location not in the expected format %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/netapp/nfs.py:557 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "Found possible share matches %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 -#, python-format -msgid "Snapshotting with label '%(label)s'" +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/netapp/nfs.py:599 #, python-format -msgid "Created snapshot %(template_vm_ref)s" +msgid "Extending volume %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/netapp/nfs.py:710 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "Shares on vserver %s will only be used for provisioning." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 -#, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/netapp/nfs.py:757 #, python-format -msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgid "Exception creating vol %(name)s on share %(share)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/netapp/nfs.py:765 #, python-format -msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +msgid "Volume %s could not be created on shares." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/netapp/nfs.py:815 #, python-format -msgid "download_vhd failed: %r" +msgid "No interface found on cluster for ip %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/netapp/nfs.py:856 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/netapp/nfs.py:862 #, python-format msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 -#, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 -#, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 #, python-format -msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +msgid "Shortlisted del elg files %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "Getting file usage for %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "file-usage for path %(path)s is %(bytes)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 #, python-format -msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +msgid "Share match found for ip %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" +msgid "No share match found for ip %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/netapp/nfs.py:1038 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" +msgid "Found volume %(vol)s for share %(share)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/netapp/nfs.py:1139 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/netapp/ssc_utils.py:241 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Unexpected error while creating ssc vol list. Message - %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/netapp/ssc_utils.py:272 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "Exception querying aggr options. %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/netapp/ssc_utils.py:313 #, python-format -msgid "VDI %s is still available" +msgid "Exception querying sis information. %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/netapp/ssc_utils.py:347 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Exception querying mirror information. %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/netapp/ssc_utils.py:379 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Exception querying storage disk. %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/netapp/ssc_utils.py:421 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/netapp/ssc_utils.py:455 #, python-format -msgid "Re-scanning SR %s" +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/netapp/ssc_utils.py:482 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 -#, python-format -msgid "ISO: looking at SR %(sr_rec)s" +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/netapp/utils.py:124 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "Failed to invoke ems. Message : %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 -#, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 -#, python-format -msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/nexenta/iscsi.py:99 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "Volume %s does not exist in Nexenta SA" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/nexenta/iscsi.py:150 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "Extending volume: %(id)s New size: %(size)s GB" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/nexenta/iscsi.py:166 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "Volume %s does not exist, it seems it was already deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/nexenta/iscsi.py:179 #, python-format -msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +msgid "Cannot delete snapshot %(origin): %(exc)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/nexenta/iscsi.py:190 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 #, python-format -msgid "Plugging VBD %s ... " +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/nexenta/iscsi.py:223 #, python-format -msgid "Plugging VBD %s done." +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/nexenta/iscsi.py:250 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "Remote NexentaStor appliance at %s should be SSH-bound." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/nexenta/iscsi.py:267 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 +#: cinder/volume/drivers/nexenta/iscsi.py:275 #, python-format -msgid "Destroying VBD for VDI %s ... " +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/nexenta/iscsi.py:281 #, python-format -msgid "Destroying VBD for VDI %s done." +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/nexenta/iscsi.py:318 #, python-format -msgid "Running pygrub against %s" +msgid "Snapshot %s does not exist, it seems it was already deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 #, python-format -msgid "Found Xen kernel %s" -msgstr "" - -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +msgid "Ignored target creation error \"%s\" while ensuring export" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/nexenta/iscsi.py:461 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "Ignored target group member addition error \"%s\" while ensuring export" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/nexenta/iscsi.py:471 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "Ignored LU creation error \"%s\" while ensuring export" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/nexenta/iscsi.py:481 #, python-format -msgid "Writing partition table %s done." +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/nexenta/iscsi.py:514 #, python-format msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/nexenta/iscsi.py:522 #, python-format msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 -msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Got response: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/nexenta/nfs.py:85 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/nexenta/nfs.py:89 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -msgid "Starting instance" +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/nexenta/nfs.py:302 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "Creating regular file: %s.This may take some time." msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/nexenta/nfs.py:313 #, python-format -msgid "Injecting file path: '%s'" +msgid "Regular file: %s created." msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/san/hp_lefthand.py:99 #, python-format -msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/san/hp_lefthand.py:107 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/san/hp_lefthand.py:246 #, python-format -msgid "Instance agent version: %s" +msgid "Snapshot info: %(name)s => %(attributes)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:581 -#, python-format -msgid "Updating Agent to %s" +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" msgstr "" -#: cinder/virt/xenapi/vmops.py:616 -#, python-format -msgid "No opaque_ref could be determined for '%s'." +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:686 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 #, python-format -msgid "Unable to Snapshot instance: %(exc)s" +msgid "Invalid hp3parclient version. Version %s or greater required." msgstr "" -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 #, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "CPG (%s) doesn't exist on array" msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "Error extending volume %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "command %s failed" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "Error running ssh command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "VV Set %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/vmware/api.py:75 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/vmware/api.py:162 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "Error while terminating session: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/vmware/api.py:172 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "Error while logging out the user: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/vmware/api.py:218 #, python-format msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/vmware/api.py:258 #, python-format -msgid "Instance %(instance_uuid)s not found" -msgstr "" - -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +msgid "Task: %(task)s progress: %(prog)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/vmware/api.py:262 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "Task %s status: success." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." -msgstr "" - -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +msgid "Task: %(task)s failed with error: %(err)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 +#: cinder/volume/drivers/vmware/api.py:304 #, python-format -msgid "Creating VIF for network %(network_ref)s" +msgid "Error: unknown lease state %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/vmware/io_util.py:51 #, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/vmware/io_util.py:102 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/vmware/io_util.py:117 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "Glance image: %s is now active." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/vmware/io_util.py:123 #, python-format -msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +msgid "Glance image: %s is in killed state." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/vmware/io_util.py:132 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "Glance image %(id)s is in unknown state - %(state)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 +#: cinder/volume/drivers/vmware/read_write_util.py:171 #, python-format -msgid "OpenSSL error: %s" +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/vmware/read_write_util.py:206 #, python-format -msgid "type is = %s" +msgid "Opening vmdk url: %s for write." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/vmware/read_write_util.py:231 #, python-format -msgid "name = %s" +msgid "Written %s bytes to vmdk." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "Updating progress to %s percent." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/vmware/read_write_util.py:295 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Opening vmdk url: %s for read." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/vmware/vim.py:209 #, python-format -msgid "Forgetting SR %s..." +msgid "Socket error in %(attr)s: %(excep)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/vmware/vim.py:225 #, python-format -msgid "Introducing %s..." +msgid "Error in %(attr)s. Detailed error: %(excep)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/vmware/vmdk.py:112 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "Returning spec value %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/vmware/vmdk.py:115 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "Invalid spec value: %s specified." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/vmware/vmdk.py:118 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgid "Returning default spec value: %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/vmware/vmdk.py:169 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "%s not set." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/vmware/vmdk.py:174 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/vmware/vmdk.py:287 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/vmware/vmdk.py:293 #, python-format -msgid "Error finding vdis in SR %s" +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/vmware/vmdk.py:375 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/vmware/vmdk.py:385 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/vmware/vmdk.py:412 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/vmware/vmdk.py:441 #, python-format -msgid "Creating SR %s" +msgid "The instance: %s for which initialize connection is called, exists." msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/vmware/vmdk.py:461 #, python-format -msgid "Introducing SR %s" +msgid "Trying to boot from an empty volume: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/vmware/vmdk.py:523 #, python-format -msgid "Checking for SR %s" +msgid "There is no backing, so will not create snapshot: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/vmware/vmdk.py:528 #, python-format -msgid "SR %s not found in the xapi database" +msgid "Successfully created snapshot: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/vmware/vmdk.py:554 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "There is no backing, and so there is no snapshot: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 +#: cinder/volume/drivers/vmware/vmdk.py:558 #, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgid "Successfully deleted snapshot: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/vmware/vmdk.py:586 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/vmware/vmdk.py:603 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/vmware/vmdk.py:633 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/vmware/vmdk.py:678 #, python-format -msgid "Unable to locate volume %s" +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 #, python-format -msgid "Unable to detach volume %s" +msgid "Fetching glance image: %(id)s to server: %(host)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 #, python-format -msgid "Unable to destroy vbd %s" +msgid "Done copying image: %(id)s to volume: %(vol)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/vmware/vmdk.py:725 #, python-format -msgid "Error purging SR %s" +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/vmware/vmdk.py:746 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "Exception in _select_ds_for_volume: %s." msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/vmware/vmdk.py:749 #, python-format -msgid "Error in handshake: %s" +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/vmware/vmdk.py:784 #, python-format -msgid "Invalid request: %s" +msgid "Exception in copy_image_to_volume: %s." msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/vmware/vmdk.py:787 #, python-format -msgid "Request: %s" +msgid "Deleting the backing: %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/vmware/vmdk.py:808 #, python-format -msgid "Request made with missing token: %s" +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/vmware/vmdk.py:847 #, python-format -msgid "Request made with invalid token: %s" +msgid "Copy Volume: %s to new image." msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/vmware/vmdk.py:853 #, python-format -msgid "Unexpected error: %s" +msgid "Backing not found, creating for volume: %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/vmware/vmdk.py:872 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "Done copying volume %(vol)s to a new image %(img)s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "Successfully created clone: %s." msgstr "" -#: cinder/volume/api.py:137 -msgid "Volume status must be available or error" +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/vmware/vmdk.py:1010 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." msgstr "" -#: cinder/volume/api.py:325 -msgid "Volume Snapshot status must be available or error" +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/vmware/vmware_images.py:132 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "Uploading image: %s to the Glance image server using HttpNfc export." msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/vmware/vmware_images.py:158 #, python-format -msgid "volume group %s doesn't exist" +msgid "Uploaded image: %s to the Glance image server." msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/vmware/volumeops.py:87 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "Did not find any backing with name: %s" msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/vmware/volumeops.py:94 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "Deleting the VM backing: %s." msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/vmware/volumeops.py:97 #, python-format -msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +msgid "Initiated deletion of VM backing: %s." msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/vmware/volumeops.py:237 #, python-format -msgid "Could not find iSCSI export for volume %s" +msgid "There are no valid datastores attached to %s." msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/vmware/volumeops.py:289 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/vmware/volumeops.py:306 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "Child folder already present: %s." msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/vmware/volumeops.py:314 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Created child folder: %s." msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/vmware/volumeops.py:365 #, python-format -msgid "rbd has no pool %s" +msgid "Spec for creating the backing: %s." msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/vmware/volumeops.py:383 #, python-format -msgid "Sheepdog is not working: %s" +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/vmware/volumeops.py:398 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "Successfully created volume backing: %s." msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/vmware/volumeops.py:438 #, python-format -msgid "Re-exporting %s volumes" +msgid "Spec for relocating the backing: %s." msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/vmware/volumeops.py:452 #, python-format -msgid "volume %s: skipping export" +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/vmware/volumeops.py:462 #, python-format -msgid "volume %s: creating" +msgid "Initiated relocation of volume backing: %s." msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/vmware/volumeops.py:464 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/vmware/volumeops.py:474 #, python-format -msgid "volume %s: creating export" +msgid "Moving backing: %(backing)s to folder: %(fol)s." msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/vmware/volumeops.py:479 #, python-format -msgid "volume %s: created successfully" +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/vmware/volumeops.py:501 #, python-format -msgid "volume %s: removing export" +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/vmware/volumeops.py:505 #, python-format -msgid "volume %s: deleting" +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/vmware/volumeops.py:553 #, python-format -msgid "volume %s: volume is busy" +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/vmware/volumeops.py:558 #, python-format -msgid "volume %s: deleted successfully" +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/vmware/volumeops.py:565 #, python-format -msgid "snapshot %s: creating" +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/vmware/volumeops.py:569 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/vmware/volumeops.py:597 #, python-format -msgid "snapshot %s: created successfully" +msgid "Spec for cloning the backing: %s." msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/vmware/volumeops.py:613 #, python-format -msgid "snapshot %s: deleting" +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" msgstr "" -#: cinder/volume/manager.py:214 +#: cinder/volume/drivers/vmware/volumeops.py:627 #, python-format -msgid "snapshot %s: snapshot is busy" +msgid "Initiated clone of backing: %s." msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/vmware/volumeops.py:638 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "Deleting file: %(file)s under datacenter: %(dc)s." msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/vmware/volumeops.py:648 #, python-format -msgid "New capabilities found: %s" +msgid "Successfully deleted file: %s." msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/vmware/volumeops.py:711 #, python-format -msgid "Notification {%s} received" +msgid "Initiated copying disk data via task: %s." msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/vmware/volumeops.py:713 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "Successfully copied disk at: %(src)s to: %(dest)s." msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/vmware/volumeops.py:722 #, python-format -msgid "%s is not set" +msgid "Deleting vmdk file: %s." msgstr "" -#: cinder/volume/netapp.py:128 -msgid "Connected to DFM server" +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/vmware/volumeops.py:731 #, python-format -msgid "Job failed: %s" +msgid "Deleted vmdk file: %s." msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 -#, python-format -msgid "Failed to find LUN ID for volume %s" +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#: cinder/volume/drivers/windows/windows_utils.py:73 #, python-format -msgid "No LUN ID for volume %s" +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#: cinder/volume/drivers/windows/windows_utils.py:105 #, python-format -msgid "Failed to get LUN details for LUN ID %s" +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/netapp.py:614 +#: cinder/volume/drivers/windows/windows_utils.py:123 #, python-format -msgid "Failed to get host details for host ID %s" +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/netapp.py:620 +#: cinder/volume/drivers/windows/windows_utils.py:139 #, python-format -msgid "Failed to get target portal for filer: %s" +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/netapp.py:625 +#: cinder/volume/drivers/windows/windows_utils.py:157 #, python-format -msgid "Failed to get target IQN for filer: %s" +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" msgstr "" -#: cinder/volume/san.py:320 +#: cinder/volume/drivers/windows/windows_utils.py:208 #, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/san.py:452 +#: cinder/volume/drivers/windows/windows_utils.py:223 #, python-format -msgid "CLIQ command returned %s" +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/san.py:458 +#: cinder/volume/drivers/windows/windows_utils.py:240 #, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/san.py:466 +#: cinder/volume/drivers/windows/windows_utils.py:255 #, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/san.py:496 +#: cinder/volume/drivers/windows/windows_utils.py:273 #, python-format msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/san.py:549 +#: cinder/volume/drivers/windows/windows_utils.py:290 #, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" msgstr "" -#: cinder/volume/san.py:626 +#: cinder/volume/flows/common.py:58 #, python-format -msgid "Could not determine project for volume %s, can't export" +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" msgstr "" -#: cinder/volume/san.py:696 +#: cinder/volume/flows/common.py:83 #, python-format -msgid "Payload for SolidFire API call: %s" +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" msgstr "" -#: cinder/volume/san.py:713 +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 #, python-format -msgid "Call to json.loads() raised an exception: %s" +msgid "Failed updating volume %(volume_id)s with %(update)s" msgstr "" -#: cinder/volume/san.py:718 +#: cinder/volume/flows/api/create_volume.py:81 #, python-format -msgid "Results of SolidFire API call: %s" +msgid "Originating snapshot status must be one of %s values" msgstr "" -#: cinder/volume/san.py:732 +#: cinder/volume/flows/api/create_volume.py:103 #, python-format -msgid "Found solidfire account: %s" +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" msgstr "" -#: cinder/volume/san.py:746 +#: cinder/volume/flows/api/create_volume.py:126 #, python-format -msgid "solidfire account: %s does not exist, create it..." +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." msgstr "" -#: cinder/volume/san.py:880 +#: cinder/volume/flows/api/create_volume.py:194 #, python-format -msgid "Deleting volumeID: %s " +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." msgstr "" -#: cinder/volume/xensm.py:56 +#: cinder/volume/flows/api/create_volume.py:463 #, python-format -msgid "Params: %s" +msgid "Failed destroying volume entry %s" msgstr "" -#: cinder/volume/xensm.py:60 +#: cinder/volume/flows/api/create_volume.py:546 #, python-format -msgid "Failed to create sr %s...continuing" +msgid "Failed rolling back quota for %s reservations" msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" msgstr "" -#: cinder/volume/xensm.py:64 +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 #, python-format -msgid "SR UUID of new SR is: %s" +msgid "Volume %s: create failed" msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" msgstr "" -#: cinder/volume/xensm.py:80 +#: cinder/volume/flows/manager/create_volume.py:105 #, python-format -msgid "Failed to introduce sr %s...continuing" +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" msgstr "" -#: cinder/volume/xensm.py:91 +#: cinder/volume/flows/manager/create_volume.py:124 #, python-format -msgid "Failed to reach backend %d" -msgstr "" - -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" - -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" +msgid "Volume %s: re-scheduled" msgstr "" -#: cinder/volume/xensm.py:142 +#: cinder/volume/flows/manager/create_volume.py:141 #, python-format -msgid "Volume will be created in backend - %d" +msgid "Updating volume %(volume_id)s with %(update)s." msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 +#: cinder/volume/flows/manager/create_volume.py:421 #, python-format -msgid "Sending JSON data: %s" +msgid "Failed updating volume %(volume_id)s bootable flag to true" msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 +#: cinder/volume/flows/manager/create_volume.py:448 #, python-format -msgid "Auto switching to HTTPS connection to %s" +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 +#: cinder/volume/flows/manager/create_volume.py:475 #, python-format -msgid "Got response: %s" +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." msgstr "" -#: cinder/volume/nexenta/volume.py:96 +#: cinder/volume/flows/manager/create_volume.py:512 #, python-format -msgid "Volume %s does not exist in Nexenta SA" +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." msgstr "" -#: cinder/volume/nexenta/volume.py:180 +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." msgstr "" -#: cinder/volume/nexenta/volume.py:202 +#: cinder/volume/flows/manager/create_volume.py:552 #, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" +msgid "Failed updating volume %(volume_id)s with %(updates)s" msgstr "" -#: cinder/volume/nexenta/volume.py:210 +#: cinder/volume/flows/manager/create_volume.py:574 #, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" +msgid "Unable to create volume. Volume driver %s not initialized" msgstr "" -#: cinder/volume/nexenta/volume.py:219 +#: cinder/volume/flows/manager/create_volume.py:588 #, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" msgstr "" -#: cinder/volume/nexenta/volume.py:227 +#: cinder/volume/flows/manager/create_volume.py:611 #, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" msgstr "" -#: cinder/volume/nexenta/volume.py:237 +#: cinder/volume/flows/manager/create_volume.py:619 #, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgid "Volume %s: creating export" msgstr "" -#: cinder/volume/nexenta/volume.py:273 +#: cinder/volume/flows/manager/create_volume.py:633 #, python-format msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" msgstr "" -#: cinder/volume/nexenta/volume.py:280 +#: cinder/volume/flows/manager/create_volume.py:680 #, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" msgstr "" diff --git a/cinder/locale/cs/LC_MESSAGES/cinder.po b/cinder/locale/cs/LC_MESSAGES/cinder.po index ebca597162..59699aae6b 100644 --- a/cinder/locale/cs/LC_MESSAGES/cinder.po +++ b/cinder/locale/cs/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2012-04-04 20:28+0000\n" "Last-Translator: Zbyněk Schwarz \n" "Language-Team: Czech \n" @@ -16,408 +16,202 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "Jméno souboru kořenové CA" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Jméno souboru se soukromým klíčem" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" -msgstr "Název souboru seznamu zrušení kořenového certifikátu" - -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "Kde uchováváme naše klíče" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "Kde uchováváme naši kořenovou CA" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "Vyskytla se neočekávaná výjimka." -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "Použijeme CA pro každý projekt?" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" -#: cinder/crypto.py:67 +#: cinder/exception.py:107 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -"Předmět certifikátu pro uživatele, %s pro projekt, uživatel, časové " -"razítko" -#: cinder/crypto.py:72 +#: cinder/exception.py:112 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" -msgstr "Předmět certifikátu pro projekty, %s pro projekt, časové razítko" +msgid "Volume driver reported an error: %(message)s" +msgstr "" -#: cinder/crypto.py:292 +#: cinder/exception.py:116 #, python-format -msgid "Flags path: %s" -msgstr "Cesta příznaků: %s" - -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Při spuštění příkazu došlo k nečekané chybě." +msgid "Backup driver reported an error: %(message)s" +msgstr "" -#: cinder/exception.py:59 +#: cinder/exception.py:120 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +msgid "Connection to glance failed: %(reason)s" msgstr "" -"%(description)s\n" -"Příkaz: %(cmd)s\n" -"Kód ukončení: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" - -#: cinder/exception.py:94 -msgid "DB exception wrapped." -msgstr "Vyjímka DB zabalena." - -#: cinder/exception.py:155 -msgid "An unknown exception occurred." -msgstr "Vyskytla se neočekávaná výjimka." - -#: cinder/exception.py:178 -msgid "Failed to decrypt text" -msgstr "Nelze dešifrovat text" - -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" -msgstr "Nelze stránkovat skrze obrázky ze služby obrázků" - -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" -msgstr "Vytvoření virtuálního rozhraní selhalo" - -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" -msgstr "Selhalo 5 pokusů o vytvoření virtuálního rozhraní s jedinečnou mac adresou" - -#: cinder/exception.py:195 -msgid "Connection to glance failed" -msgstr "Připojení k glance selhalo" - -#: cinder/exception.py:199 -msgid "Connection to melange failed" -msgstr "Připojení k melange selhalo" -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "Neschváleno." -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "Uživatel nemá správcovská oprávnění" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Pravidla nedovolují, aby bylo %(action)s provedeno." -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, fuzzy, python-format msgid "Not authorized for image %(image_id)s." msgstr "Kernel nenalezen v obrazu %(image_id)s." -#: cinder/exception.py:220 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "Nepřijatelné parametry." -#: cinder/exception.py:225 -msgid "Invalid snapshot" -msgstr "Neplatný snímek" +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:154 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" -msgstr "Svazek %(volume_id)s není k ničemu připojen" +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" -msgstr "Data páru klíčů jsou neplatná" +#: cinder/exception.py:159 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Svazek %(volume_id)s není k ničemu připojen" -#: cinder/exception.py:237 +#: cinder/exception.py:163 msgid "Failed to load data into json format" msgstr "Nelze načíst data do formátu json" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:167 +#, fuzzy +msgid "The results are invalid." msgstr "Požadavek je neplatný." -#: cinder/exception.py:245 -#, python-format -msgid "Invalid signature %(signature)s for user %(user)s." -msgstr "Neplatný podpis %(signature)s pro uživatele %(user)s." - -#: cinder/exception.py:249 -msgid "Invalid input received" -msgstr "Obdržen neplatný vstup" - -#: cinder/exception.py:253 +#: cinder/exception.py:171 #, python-format -msgid "Invalid instance type %(instance_type)s." -msgstr "Neplatná instance typu %(instance_type)s." - -#: cinder/exception.py:257 -msgid "Invalid volume type" -msgstr "Neplatný typ svazku" - -#: cinder/exception.py:261 -msgid "Invalid volume" -msgstr "Neplatný svazek" +msgid "Invalid input received: %(reason)s" +msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:175 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" -msgstr "Neplatný rozsah portů %(from_port)s:%(to_port)s. %(msg)s" +msgid "Invalid volume type: %(reason)s" +msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:179 #, python-format -msgid "Invalid IP protocol %(protocol)s." -msgstr "Neplatný protokol IP %(protocol)s." +msgid "Invalid volume: %(reason)s" +msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:183 #, python-format msgid "Invalid content type %(content_type)s." msgstr "Neplatný typ obsahu %(content_type)s." -#: cinder/exception.py:277 +#: cinder/exception.py:187 #, python-format -msgid "Invalid cidr %(cidr)s." -msgstr "Neplatný cidr %(cidr)s." - -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." -msgstr "Neplatné znovu použití připojení RPC." - -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format msgid "%(err)s" msgstr "%(err)s" -#: cinder/exception.py:296 +#: cinder/exception.py:197 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Invalid auth key: %(reason)s" msgstr "" -"Nelze provést činnost '%(action)s' na agregátu %(aggregate_id)s. Důvod: " -"%(reason)s." - -#: cinder/exception.py:301 -#, fuzzy, python-format -msgid "Group not valid. Reason: %(reason)s" -msgstr "Nebyl nalezen žádný platný hostitel. %(reason)s" -#: cinder/exception.py:305 +#: cinder/exception.py:201 #, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -"Instance %(instance_uuid)s v %(attr)s %(state)s. Nelze %(method)s " -"zatímco je instance v tomto stavu." - -#: cinder/exception.py:310 -#, python-format -msgid "Instance %(instance_id)s is not running." -msgstr "Instance %(instance_id)s není spuštěna." -#: cinder/exception.py:314 -#, python-format -msgid "Instance %(instance_id)s is not suspended." -msgstr "Instance %(instance_id)s není pozastavena." - -#: cinder/exception.py:318 -#, python-format -msgid "Instance %(instance_id)s is not in rescue mode" -msgstr "Instance %(instance_id)s není v nouzovém režimu." - -#: cinder/exception.py:322 -msgid "Failed to suspend instance" -msgstr "Nelze pozastavit instanci" - -#: cinder/exception.py:326 -msgid "Failed to resume server" -msgstr "Server nemůže pokračovat" - -#: cinder/exception.py:330 -msgid "Failed to reboot instance" -msgstr "Nelze restartovat instanci" - -#: cinder/exception.py:334 -#, fuzzy -msgid "Failed to terminate instance" -msgstr "Nelze restartovat instanci" - -#: cinder/exception.py:338 +#: cinder/exception.py:206 msgid "Service is unavailable at this time." msgstr "Služba je v tuto chvíli nedostupná." -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." -msgstr "Služba svazku je v tuto chvíli nedostupná." - -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." -msgstr "Služba výpočtu je v tuto chvíli nedostupná." - -#: cinder/exception.py:350 -#, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." -msgstr "" -"Nelze přesunout instanci (%(instance_id)s) na současného hostitele " -"(%(host)s)." - -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." -msgstr "Cílový výpočetní hostitel je v současnosti nedostupný." - -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." -msgstr "Původní výpočetní hostitel je v současnosti nedostupný." - -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." -msgstr "Zadaný typ hypervizoru je neplatný." - -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." -msgstr "Instance vyžaduje novější verzi hypervizoru, než byla poskytnuta." - -#: cinder/exception.py:372 -#, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." -msgstr "Zadaná cesta disku (%(path)s) již existuje, očekává se, že nebude." - -#: cinder/exception.py:377 -#, python-format -msgid "The supplied device path (%(path)s) is invalid." -msgstr "Zadaná cesta zařízení (%(path)s) je neplatná." - -#: cinder/exception.py:381 -#, fuzzy, python-format -msgid "The supplied device (%(device)s) is busy." -msgstr "Zadaná cesta zařízení (%(path)s) je neplatná." - -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" -msgstr "Nepřijatelné informace o procesoru" - -#: cinder/exception.py:389 -#, python-format -msgid "%(address)s is not a valid IP v4/6 address." -msgstr "%(address)s není platná IP adresa v4/6." - -#: cinder/exception.py:393 -#, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." -msgstr "" -"Značka VLAN není vhodná pro skupinu portů %(bridge)s. Očekávaná značka " -"VLAN je %(tag)s, ale značka připojená ke skupině portů je %(pgroup)s." - -#: cinder/exception.py:399 -#, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." -msgstr "" -"vSwitch který obsahuje skupinu portů %(bridge)s není spojen s požadovaným" -" fyzickým adaptérem. Očekávaný vSwitch je %(expected)s, ale spojený je " -"%(actual)s." - -#: cinder/exception.py:406 -#, python-format -msgid "Disk format %(disk_format)s is not acceptable" -msgstr "Formát disku %(disk_format)s není přijatelný" - -#: cinder/exception.py:410 +#: cinder/exception.py:210 #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "Obraz %(image_id)s je nepřijatelný: %(reason)s" -#: cinder/exception.py:414 +#: cinder/exception.py:214 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" -msgstr "Instance %(instance_id)s je nepřijatelná: %(reason)s" +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:218 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." -msgstr "Id Ec2 %(ec2_id)s je nepřijatelné." +msgid "Expected a uuid but received %(uuid)s." +msgstr "" -#: cinder/exception.py:422 +#: cinder/exception.py:222 cinder/brick/exception.py:68 msgid "Resource could not be found." msgstr "Zdroj nemohl být nalezen." -#: cinder/exception.py:427 -#, python-format -msgid "Required flag %(flag)s not set." -msgstr "Požadovaný příznak %(flag)s není nastaven." - -#: cinder/exception.py:431 +#: cinder/exception.py:228 #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Svazek %(volume_id)s nemohl být nastaven." -#: cinder/exception.py:435 -#, fuzzy, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "Nelze nalézt účet %(account_name) on zařízení Solidfire" +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "Svazek %(volume_id)s nemá žádná metadata s klíčem %(metadata_key)s." -#: cinder/exception.py:440 +#: cinder/exception.py:237 #, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "Svazek není nalezen v instanci %(instance_id)s." +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:242 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "Svazek %(volume_id)s nemá žádná metadata s klíčem %(metadata_key)s." +msgid "Invalid metadata: %(reason)s" +msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." -msgstr "Nalezeno nula typů svazku." +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:250 +#, fuzzy, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "Instance %(instance_id)s nemá žádná metadata s klíčem %(metadata_key)s." + +#: cinder/exception.py:255 #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "Typ svazku %(volume_type_id)s nemohl být nalezen." -#: cinder/exception.py:457 +#: cinder/exception.py:259 #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "Typ svazku s názvem %(volume_type_name)s nemohl být nalezen." -#: cinder/exception.py:462 +#: cinder/exception.py:264 #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " @@ -426,1655 +220,1223 @@ msgstr "" "Typ svazku %(volume_type_id)s nemá žádné dodatečné parametry s klíčem " "%(extra_specs_key)s." -#: cinder/exception.py:467 +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Snímek %(snapshot_id)s nemohl být nalezen." -#: cinder/exception.py:471 +#: cinder/exception.py:278 #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "mazání svazku %(volume_name)s který má snímek" -#: cinder/exception.py:475 +#: cinder/exception.py:282 #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:287 #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." -#: cinder/exception.py:484 -#, python-format -msgid "No disk at %(location)s" -msgstr "Źádný disk ve %(location)s" - -#: cinder/exception.py:488 -#, python-format -msgid "Could not find a handler for %(driver_type)s volume." -msgstr "Nelze najít obslužnou rutinu pro svazek %(driver_type)s." - -#: cinder/exception.py:492 +#: cinder/exception.py:291 #, python-format msgid "Invalid image href %(image_href)s." msgstr "Neplatný href %(image_href)s obrazu." -#: cinder/exception.py:496 -msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." -msgstr "" -"Některé obrazy byly uloženy pomocí href. Tato verze api nepodporuje " -"zobrazování href obrazů." - -#: cinder/exception.py:501 +#: cinder/exception.py:295 #, python-format msgid "Image %(image_id)s could not be found." msgstr "Obraz %(image_id)s nemohl být nalezen." -#: cinder/exception.py:505 +#: cinder/exception.py:299 #, python-format -msgid "Kernel not found for image %(image_id)s." -msgstr "Kernel nenalezen v obrazu %(image_id)s." +msgid "Service %(service_id)s could not be found." +msgstr "Služba %(service_id)s nemohla být nalezena." -#: cinder/exception.py:509 +#: cinder/exception.py:303 #, python-format -msgid "User %(user_id)s could not be found." -msgstr "Uživatel %(user_id)s nemohl být nalezen." +msgid "Host %(host)s could not be found." +msgstr "Hostitel %(host)s nemohl být nalezen." -#: cinder/exception.py:513 +#: cinder/exception.py:307 #, python-format -msgid "Project %(project_id)s could not be found." -msgstr "Projekt %(project_id)s nemohl být nalezen." +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "Filtr hostitelů plácinderče %(filter_name)s nemohl být nalezen." -#: cinder/exception.py:517 -#, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." -msgstr "Uživatel %(user_id)s není členem projektu %(project_id)s." +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "Filtr hostitelů plácinderče %(filter_name)s nemohl být nalezen." -#: cinder/exception.py:521 +#: cinder/exception.py:315 #, python-format -msgid "Role %(role_id)s could not be found." -msgstr "Role %(role_id)s nemohla být nalezena." - -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." -msgstr "Nelze najít SR pro čtení/zápis VDI." +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "Nelze najít binární soubor %(binary)s v hostiteli %(host)s." -#: cinder/exception.py:529 +#: cinder/exception.py:319 #, python-format -msgid "%(req)s is required to create a network." -msgstr "%(req)s je vyžadováno pro vytvoření sítě." +msgid "Invalid reservation expiration %(expire)s." +msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:323 #, python-format -msgid "Network %(network_id)s could not be found." -msgstr "Síť %(network_id)s nemohla být nalezena." +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" -#: cinder/exception.py:537 -#, python-format -msgid "Network could not be found for bridge %(bridge)s" -msgstr "Síť nemohla být pro most %(bridge)s nalezena." +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "Kvóta nemohla být nalezena." -#: cinder/exception.py:541 +#: cinder/exception.py:332 #, python-format -msgid "Network could not be found for uuid %(uuid)s" -msgstr "Síť nemohla být pro uuid %(uuid)s nalezena." +msgid "Unknown quota resources %(unknown)s." +msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:336 #, python-format -msgid "Network could not be found with cidr %(cidr)s." -msgstr "Síť nemohla být pro cidr %(cidr)s nalezena." +msgid "Quota for project %(project_id)s could not be found." +msgstr "Kvóta pro projekt %(project_id)s nemohla být nalezena." -#: cinder/exception.py:549 -#, python-format -msgid "Network could not be found for instance %(instance_id)s." -msgstr "Síť nemohla být pro instance %(instance_id)s nalezena." +#: cinder/exception.py:340 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "Třída %(class_name)s nemohla být nalezena: %(exception)s" + +#: cinder/exception.py:344 +#, fuzzy, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "Kvóta pro projekt %(project_id)s nemohla být nalezena." -#: cinder/exception.py:553 -msgid "No networks defined." -msgstr "Źádné sítě nejsou určeny." +#: cinder/exception.py:348 +#, fuzzy, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "Uživatel %(user_id)s nemohl být nalezen." -#: cinder/exception.py:557 -#, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." -msgstr "" -"Buď síť uuid %(network_uuid)s není přítomna nebo je přidělena projektu " -"%(project_id)s." +#: cinder/exception.py:352 +#, fuzzy, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "Kvóta překročena" -#: cinder/exception.py:562 +#: cinder/exception.py:356 #, python-format -msgid "Host is not set to the network (%(network_id)s)." -msgstr "Hostitel není v síti (%(network_id)s) nastaven." +msgid "File %(file_path)s could not be found." +msgstr "Soubor %(file_path)s nemohl být nalezen." + +#: cinder/exception.py:365 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "Typ svazku %(name)s již existuje." -#: cinder/exception.py:566 +#: cinder/exception.py:369 #, python-format -msgid "Network %(network)s has active ports, cannot delete." +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." -msgstr "Nelze najít odkazy datového úložiště, který VM používá." - -#: cinder/exception.py:574 +#: cinder/exception.py:373 #, python-format -msgid "No fixed IP associated with id %(id)s." -msgstr "Žádná pevná IP není spojena s %(id)s." +msgid "Malformed message body: %(reason)s" +msgstr "Poškozené tělo zprávy: %(reason)s" -#: cinder/exception.py:578 +#: cinder/exception.py:377 #, python-format -msgid "Fixed ip not found for address %(address)s." -msgstr "Pevná ip není pro adresu %(address)s nalezena." +msgid "Could not find config at %(path)s" +msgstr "Nelze najít nastavení v %(path)s" + +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "Nelze najít nastavení v %(path)s" -#: cinder/exception.py:582 +#: cinder/exception.py:385 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." -msgstr "Instance %(instance_id)s má nula pevných ip." +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "Nelze načíst aplikaci vložení '%(name)s' z %(path)s" -#: cinder/exception.py:586 +#: cinder/exception.py:389 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." -msgstr "Síťový hostitel %(host)s má nula pevných ip v síti %(network_id)s." +msgid "No valid host was found. %(reason)s" +msgstr "Nebyl nalezen žádný platný hostitel. %(reason)s" -#: cinder/exception.py:591 +#: cinder/exception.py:398 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." -msgstr "Instance %(instance_id)s nemá pevnou ip '%(ip)s'." +msgid "Host %(host)s is not up or doesn't exist." +msgstr "Hostitel %(host)s není dostupný nebo neexistuje." -#: cinder/exception.py:595 +#: cinder/exception.py:402 #, python-format -msgid "Host %(host)s has zero fixed ips." -msgstr "Hostitel %(host)s má nula pevných ip." +msgid "Quota exceeded: code=%(code)s" +msgstr "" -#: cinder/exception.py:599 +#: cinder/exception.py:409 #, python-format msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." -msgstr "Pevná IP adresa (%(address)s) v síti (%(network_uuid)s) neexistuje." +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:415 #, python-format -msgid "Fixed IP address %(address)s is already in use." -msgstr "Pevná IP adresa (%(address)s) je již používána." +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:419 #, python-format -msgid "Fixed IP address %(address)s is invalid." -msgstr "Pevná IP adresa (%(address)s) je neplatná." - -#: cinder/exception.py:612 -msgid "Zero fixed ips available." -msgstr "Je dostupno nula pevných ip." +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." -msgstr "Bylo nalezeno nula pevných ip." +#: cinder/exception.py:423 +#, fuzzy, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "Zjištěn více než jeden svazek s názvem %(vol_name)" -#: cinder/exception.py:620 +#: cinder/exception.py:427 #, python-format -msgid "Floating ip not found for id %(id)s." -msgstr "Plovoucí ip není nalezena pro id %(id)s." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" +"Nelze vytvořit typ_svazku s názvem %(name)s a specifikacemi " +"%(extra_specs)s" -#: cinder/exception.py:624 +#: cinder/exception.py:432 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." -msgstr "Záznam DNS %(name)s již v doméně %(domain)s existuje." +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" -#: cinder/exception.py:628 -#, python-format -msgid "Floating ip not found for address %(address)s." -msgstr "Plovoucí ip nenalezeno pro adresu %(address)s." +#: cinder/exception.py:436 +#, fuzzy, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "Poškozené tělo zprávy: %(reason)s" -#: cinder/exception.py:632 +#: cinder/exception.py:440 #, python-format -msgid "Floating ip not found for host %(host)s." -msgstr "Plovoucí ip nenalezeno pro hostitele %(host)s." - -#: cinder/exception.py:636 -msgid "Zero floating ips available." -msgstr "Je dostupných nula plovoucích ip." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" -#: cinder/exception.py:640 +#: cinder/exception.py:444 #, python-format -msgid "Floating ip %(address)s is associated." -msgstr "Plovoucí ip %(address)s je přidružena." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" -#: cinder/exception.py:644 +#: cinder/exception.py:449 #, python-format -msgid "Floating ip %(address)s is not associated." -msgstr "Plovoucí ip %(address)s není přidružena." - -#: cinder/exception.py:648 -msgid "Zero floating ips exist." -msgstr "Existuje nula plovoucích ip." +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" -#: cinder/exception.py:652 +#: cinder/exception.py:453 #, python-format -msgid "Interface %(interface)s not found." -msgstr "Rozhraní %(interface)s nenalezeno." +msgid "Failed to export for volume: %(reason)s" +msgstr "" -#: cinder/exception.py:656 +#: cinder/exception.py:457 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" -msgstr "Dvojice klíčů %(name)s nenalezena pro uživatele %(user_id)s" +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:461 #, python-format -msgid "Certificate %(certificate_id)s not found." -msgstr "Certifikát %(certificate_id)s nenalezen." +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" -#: cinder/exception.py:664 +#: cinder/exception.py:465 #, python-format -msgid "Service %(service_id)s could not be found." -msgstr "Služba %(service_id)s nemohla být nalezena." +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" -#: cinder/exception.py:668 -#, python-format -msgid "Host %(host)s could not be found." -msgstr "Hostitel %(host)s nemohl být nalezen." +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "Poškozené tělo zprávy: %(reason)s" -#: cinder/exception.py:672 -#, python-format -msgid "Compute host %(host)s could not be found." -msgstr "Hostitel výpočtu %(host)s nemohl být nalezen." +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" -#: cinder/exception.py:676 -#, python-format -msgid "Could not find binary %(binary)s on host %(host)s." -msgstr "Nelze najít binární soubor %(binary)s v hostiteli %(host)s." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" -#: cinder/exception.py:680 -#, python-format -msgid "Auth token %(token)s could not be found." -msgstr "Známka opeávnění %(token)s nemohla být nalezena." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" -#: cinder/exception.py:684 -#, python-format -msgid "Access Key %(access_key)s could not be found." -msgstr "Přístupový klíč %(access_key)s nemohl být nalezen." +#: cinder/exception.py:485 +#, fuzzy, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "Skupina LDAP %(group_id)s nemohla být nalezena." -#: cinder/exception.py:688 -msgid "Quota could not be found" -msgstr "Kvóta nemohla být nalezena." +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Neplatná podpůrná vrstva: %s" -#: cinder/exception.py:692 +#: cinder/exception.py:497 #, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "Kvóta pro projekt %(project_id)s nemohla být nalezena." +msgid "Connection to swift failed: %(reason)s" +msgstr "" -#: cinder/exception.py:696 +#: cinder/exception.py:501 #, fuzzy, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "Třída %(class_name)s nemohla být nalezena: %(exception)s" +msgid "Transfer %(transfer_id)s could not be found." +msgstr "Obraz %(image_id)s nemohl být nalezen." -#: cinder/exception.py:700 +#: cinder/exception.py:505 #, python-format -msgid "Security group %(security_group_id)s not found." -msgstr "Bezpečnostní skupina %(security_group_id)s není nalezena." +msgid "Volume migration failed: %(reason)s" +msgstr "" -#: cinder/exception.py:704 +#: cinder/exception.py:509 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "SSH command injection detected: %(command)s" msgstr "" -"Bezpečnostní skupina %(security_group_id)s není nalezena v projektu " -"%(project_id)s." -#: cinder/exception.py:709 +#: cinder/exception.py:513 #, python-format -msgid "Security group with rule %(rule_id)s not found." -msgstr "Bezpečnostní skupina s pravidlem %(rule_id)s nenalezena." +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" -#: cinder/exception.py:713 +#: cinder/exception.py:517 #, python-format -msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -"Bezpečnostní skupina %(security_group_id)s je již přidružena k instanci " -"%(instance_id)s" -#: cinder/exception.py:718 +#: cinder/exception.py:522 #, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -"Bezpečnostní skupina %(security_group_id)s není přidružena k instanci " -"%(instance_id)s" -#: cinder/exception.py:723 +#: cinder/exception.py:527 #, python-format -msgid "Migration %(migration_id)s could not be found." -msgstr "Přesun %(migration_id)s nemohl být nalezen." +msgid "No such QoS spec %(specs_id)s." +msgstr "" -#: cinder/exception.py:727 +#: cinder/exception.py:531 #, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." -msgstr "Přesun nenalezen v instanci %(instance_id)s se stavem %(status)s." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" -#: cinder/exception.py:732 +#: cinder/exception.py:536 #, python-format -msgid "Console pool %(pool_id)s could not be found." -msgstr "Zásoba konzole %(pool_id)s nemohla být nalezena." +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" -#: cinder/exception.py:736 +#: cinder/exception.py:541 #, python-format -msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -"Zásoba konzole typu %(console_type)s pro výpočetního hostitele " -"%(compute_host)s v hostitele proxy %(host)s nemohla být nalezena." -#: cinder/exception.py:742 +#: cinder/exception.py:546 #, python-format -msgid "Console %(console_id)s could not be found." -msgstr "Konzole %(console_id)s nemohla být nalezena." +msgid "Invalid qos specs: %(reason)s" +msgstr "" -#: cinder/exception.py:746 +#: cinder/exception.py:550 #, python-format -msgid "Console for instance %(instance_id)s could not be found." -msgstr "Konzole pro instanci %(instance_id)s nemohla být nalezena." +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" -#: cinder/exception.py:750 +#: cinder/exception.py:554 #, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +msgid "key manager error: %(reason)s" msgstr "" -"Konzole pro instanci %(instance_id)s v zásobě %(pool_id)s nemohla být " -"nalezena." -#: cinder/exception.py:755 -#, python-format -msgid "Invalid console type %(console_type)s " -msgstr "Neplatná konzole typu %(console_type)s " +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." -msgstr "Nalezeno nula typů instancí." +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" -#: cinder/exception.py:763 -#, python-format -msgid "Instance type %(instance_type_id)s could not be found." -msgstr "Instance typu %(instance_type_id)s nemohla být nalezena." +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" -#: cinder/exception.py:767 -#, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." -msgstr "Instance typu s názvem %(instance_type_name)s nemohla být nalezena." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" -#: cinder/exception.py:772 +#: cinder/exception.py:576 #, python-format -msgid "Flavor %(flavor_id)s could not be found." -msgstr "Konfigurace %(flavor_id)s nemohla být nalezena." - -#: cinder/exception.py:776 -#, fuzzy, python-format -msgid "Cell %(cell_id)s could not be found." -msgstr "Zóna %(zone_id)s nemohla být nalezena." +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" -#: cinder/exception.py:780 +#: cinder/exception.py:580 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "Filtr hostitelů plácinderče %(filter_name)s nemohl být nalezen." +msgid "ESM configure request failed: %(message)s." +msgstr "" -#: cinder/exception.py:784 +#: cinder/exception.py:584 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." -msgstr "Funkce nákladů plácinderče %(cost_fn_str)s nemohla být nalezena." +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" -#: cinder/exception.py:789 -#, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" -msgstr "Příznak váhy plácinderče nemohl být nalezen: %(flag_name)s" +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" -#: cinder/exception.py:793 +#: cinder/exception.py:593 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." -msgstr "Instance %(instance_id)s nemá žádná metadata s klíčem %(metadata_key)s." +msgid "Unable to create server object for initiator %(name)s" +msgstr "" -#: cinder/exception.py:798 +#: cinder/exception.py:597 #, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -"Instance typu %(instance_type_id)s nemá žádné dodatečné specifikace s " -"klíčem %(extra_specs_key)s." -#: cinder/exception.py:803 -msgid "LDAP object could not be found" -msgstr "Objekt LDAP nemohl být nalezen" +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" -#: cinder/exception.py:807 +#: cinder/exception.py:605 #, python-format -msgid "LDAP user %(user_id)s could not be found." -msgstr "Uživatel LDAP %(user_id)s nemohl být nalezen." +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" -#: cinder/exception.py:811 +#: cinder/exception.py:609 #, python-format -msgid "LDAP group %(group_id)s could not be found." -msgstr "Skupina LDAP %(group_id)s nemohla být nalezena." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" -#: cinder/exception.py:815 -#, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." -msgstr "Uživatel LDAP %(user_id)s není členem skupiny %(group_id)s." +#: cinder/exception.py:613 +#, fuzzy, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "Neplatný stav serveru: %(status)s" -#: cinder/exception.py:819 -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "Soubor %(file_path)s nemohl být nalezen." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "Špatná odpověď od SolidFire API" -#: cinder/exception.py:823 -msgid "Zero files could be found." -msgstr "Nalezeno nula souborů." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" -#: cinder/exception.py:827 +#: cinder/exception.py:626 #, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." -msgstr "Nenalezen virtuální přepínač sdružený se síťovým adaptérem %(adapter)s." +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "Chyba v odpovědi SolidFire API: data=%(data)s" -#: cinder/exception.py:832 -#, python-format -msgid "Network adapter %(adapter)s could not be found." -msgstr "Síťový adaptér %(adapter)s nemohl být nalezen." +#: cinder/exception.py:630 +#, fuzzy, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "Nelze nalézt účet %(account_name) on zařízení Solidfire" -#: cinder/exception.py:836 +#: cinder/exception.py:636 #, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" -msgstr "Třída %(class_name)s nemohla být nalezena: %(exception)s" +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Vyskytla se neočekávaná výjimka." -#: cinder/exception.py:840 -msgid "Action not allowed." -msgstr "Činnost není povolena." +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" -#: cinder/exception.py:844 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Unable to use global role %(role_id)s" -msgstr "Nelze použít globální roli %(role_id)s" +msgid "There is no share which can host %(volume_size)sG" +msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" -msgstr "Střídání není povoleno pro snímky" +#: cinder/exception.py:654 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Vyskytla se neočekávaná výjimka." -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" -msgstr "Parametr rotation je vyžadován pro backup image_type" +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" -#: cinder/exception.py:861 -#, python-format -msgid "Key pair %(key_name)s already exists." -msgstr "Dvojice klíčů %(key_name)s již existuje." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "Oznamování schopností plácinderčům ..." -#: cinder/exception.py:865 -#, python-format -msgid "User %(user)s already exists." -msgstr "Uživatel %(user)s již existuje." +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "Soubor JSON představující zásady" -#: cinder/exception.py:869 -#, python-format -msgid "LDAP user %(user)s already exists." -msgstr "LDAP uživatel %(user)s již existuje." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "Kontrolované pravidlo, když požadované není nalezeno" -#: cinder/exception.py:873 +#: cinder/quota.py:105 #, python-format -msgid "LDAP group %(group)s already exists." -msgstr "LDAP skupina %(group)s již existuje." +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" -#: cinder/exception.py:877 +#: cinder/quota.py:748 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" -msgstr "Uživatel %(uid)s již je členem skupiny %(group_dn)s" +msgid "Created reservations %s" +msgstr "" -#: cinder/exception.py:882 +#: cinder/quota.py:770 #, python-format -msgid "Project %(project)s already exists." -msgstr "Projekt %(project)s již existuje." +msgid "Failed to commit reservations %s" +msgstr "" -#: cinder/exception.py:886 +#: cinder/quota.py:790 #, python-format -msgid "Instance %(name)s already exists." -msgstr "Instance %(name)s již existuje." +msgid "Failed to roll back reservations %s" +msgstr "" -#: cinder/exception.py:890 -#, python-format -msgid "Instance Type %(name)s already exists." -msgstr "Instance typu %(name)s již existuje." +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" -#: cinder/exception.py:894 -#, python-format -msgid "Volume Type %(name)s already exists." -msgstr "Typ svazku %(name)s již existuje." +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" -#: cinder/exception.py:898 +#: cinder/quota_utils.py:46 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" -msgstr "%(path)s je ve sdíleném úložišti: %(reason)s" - -#: cinder/exception.py:902 -msgid "Migration error" -msgstr "Chyba přesunu" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" -#: cinder/exception.py:906 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "Malformed message body: %(reason)s" -msgstr "Poškozené tělo zprávy: %(reason)s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" -#: cinder/exception.py:910 -#, python-format -msgid "Could not find config at %(path)s" -msgstr "Nelze najít nastavení v %(path)s" +#: cinder/service.py:95 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Začínající uzel %(topic)s (verze %(vcs_string)s)" -#: cinder/exception.py:914 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "Nelze načíst aplikaci vložení '%(name)s' z %(path)s" +msgid "Creating Consumer connection for Service %s" +msgstr "Vytváření připojení zákazníka pro službu %s" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" -msgstr "Při změně velikosti ji musí instance změnit také!" +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" -msgstr "Obraz je větší než typ instance povoluje" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "Ukončena služba bez záznamu v databázi" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" -msgstr "1 nebo více Zón nemohlo požadavek dokončit" +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "Objekt databáze služby zmizel, je znovu vytvářen." -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." -msgstr "Typ paměti instance je pro požadovaný obraz příliš malý." +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "Obnoveno připojení modelového serveru!" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." -msgstr "Typ disku instance je pro požadovaný obraz příliš malý." +#: cinder/service.py:276 +msgid "model server went away" +msgstr "modelový server je nedostupný" -#: cinder/exception.py:938 +#: cinder/service.py:298 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." -msgstr "Pro spuštění %(uuid)s je v uzlu výpočtu nedostatek volné paměti." +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." -msgstr "Nelze získat metriky šířky pásma/procesoru/disku pro tohoto hostitele." +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" -#: cinder/exception.py:946 -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "Nebyl nalezen žádný platný hostitel. %(reason)s" +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Úplná sada PŘÍZNAKŮ:" -#: cinder/exception.py:950 +#: cinder/service.py:387 #, python-format -msgid "Host %(host)s is not up or doesn't exist." -msgstr "Hostitel %(host)s není dostupný nebo neexistuje." - -#: cinder/exception.py:954 -msgid "Quota exceeded" -msgstr "Kvóta překročena" - -#: cinder/exception.py:958 -#, fuzzy, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." -msgstr "Agregát %(aggregate_id)s nemá hostitele %(host)s." +msgid "%s : FLAG SET " +msgstr "" -#: cinder/exception.py:963 +#: cinder/utils.py:96 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." -msgstr "Agregát %(aggregate_id)s nemohl být nalezen." +msgid "Can not translate %s to integer." +msgstr "" -#: cinder/exception.py:967 +#: cinder/utils.py:127 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." -msgstr "Agregát %(aggregate_name)s již existuje." +msgid "May specify only one of %s" +msgstr "" -#: cinder/exception.py:971 -#, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." -msgstr "Agregát %(aggregate_id)s nemá hostitele %(host)s." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" -#: cinder/exception.py:975 +#: cinder/utils.py:228 #, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." -msgstr "Agregát %(aggregate_id)s nemá žádná metadata s klíčem %(metadata_key)s." +msgid "Error connecting via ssh: %s" +msgstr "" -#: cinder/exception.py:980 +#: cinder/utils.py:412 #, python-format -msgid "Host %(host)s already member of another aggregate." -msgstr "Hostitel %(host)s již je členem jiného agregátu." +msgid "Invalid backend: %s" +msgstr "Neplatná podpůrná vrstva: %s" -#: cinder/exception.py:984 +#: cinder/utils.py:423 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." -msgstr "Agregát %(aggregate_id)s již má hostitele %(host)s." - -#: cinder/exception.py:988 -#, fuzzy, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "Zjištěn více než jeden svazek s názvem %(vol_name)" +msgid "backend %s" +msgstr "podpůrná vrstva: %s" -#: cinder/exception.py:992 +#: cinder/utils.py:698 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Could not remove tmpdir: %s" msgstr "" -"Nelze vytvořit typ_svazku s názvem %(name)s a specifikacemi " -"%(extra_specs)s" - -#: cinder/exception.py:997 -msgid "Unable to create instance type" -msgstr "Nelze vytvořit typ instance" - -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" -msgstr "Špatná odpověď od SolidFire API" -#: cinder/exception.py:1005 +#: cinder/utils.py:759 #, python-format -msgid "Error in SolidFire API response: status=%(status)s" -msgstr "Chyba v odpovědi SolidFire API: stav=%(status)s" +msgid "Volume driver %s not initialized" +msgstr "" -#: cinder/exception.py:1009 +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 #, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "Chyba v odpovědi SolidFire API: data=%(data)s" - -#: cinder/exception.py:1013 -#, fuzzy, python-format -msgid "Detected existing vlan with id %(vlan)d" -msgstr "Zjišťěna existující vlan s id %(vlan)" +msgid "Unable to find cert_file : %s" +msgstr "" -#: cinder/exception.py:1017 +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 #, python-format -msgid "Instance %(instance_id)s could not be found." -msgstr "Instance %(instance_id)s nemohla být nastavena." +msgid "Unable to find ca_file : %s" +msgstr "" -#: cinder/exception.py:1021 +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 #, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgid "Unable to find key_file : %s" msgstr "" -#: cinder/exception.py:1025 -#, fuzzy, python-format -msgid "Could not fetch image %(image)s" -msgstr "Kernel nenalezen v obrazu %(image_id)s." - -#: cinder/log.py:315 -#, python-format -msgid "syslog facility must be one of: %s" -msgstr "zařízení záznamu systému musí být jedno z: %s" +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" -#: cinder/manager.py:146 +#: cinder/wsgi.py:169 #, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -"Přeskakování %(full_task_name)s, zbývá %(ticks_to_skip)s tiků do dalšího " -"spuštění" -#: cinder/manager.py:152 +#: cinder/wsgi.py:206 #, python-format -msgid "Running periodic task %(full_task_name)s" -msgstr "Spuštění pravidelné úlohy %(full_task_name)s" +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" -msgstr "Chyba při %(full_task_name)s: %(e)s" +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "Zastavování serveru WSGI." -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." -msgstr "Oznamování schopností plácinderčům ..." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "Server WSGI byl zastaven." -#: cinder/policy.py:30 -msgid "JSON file representing policy" -msgstr "Soubor JSON představující zásady" +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "Musíte zavést __call__" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" -msgstr "Kontrolované pravidlo, když požadované není nalezeno" +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/service.py:177 -#, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" -msgstr "Začínající uzel %(topic)s (verze %(vcs_string)s)" +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "parametr limit musí být celé číslo" -#: cinder/service.py:195 -#, python-format -msgid "Creating Consumer connection for Service %s" -msgstr "Vytváření připojení zákazníka pro službu %s" +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "parametr limit musí být kladný" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" -msgstr "Ukončena služba bez záznamu v databázi" +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "parametr offset musí být celé číslo" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." -msgstr "Objekt databáze služby zmizel, je znovu vytvářen." +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "parametr offset musí být kladný" -#: cinder/service.py:334 -msgid "Recovered model server connection!" -msgstr "Obnoveno připojení modelového serveru!" +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "značka [%s] nenalezena" -#: cinder/service.py:340 -msgid "model server went away" -msgstr "modelový server je nedostupný" +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s neobsahuje verzi" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" -msgstr "Úplná sada PŘÍZNAKŮ:" +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "Zavádění správce rozšíření." -#: cinder/service.py:440 +#: cinder/api/extensions.py:197 #, python-format -msgid "%(flag)s : FLAG SET " -msgstr "%(flag)s : SADA PŽÍZNAKŮ " +msgid "Loaded extension: %s" +msgstr "Načteno rozšíření: %s" -#: cinder/utils.py:79 +#: cinder/api/extensions.py:235 #, python-format -msgid "Inner Exception: %s" -msgstr "Vnitřní výjimka: %s" +msgid "Ext name: %s" +msgstr "Název roz: %s" -#: cinder/utils.py:165 +#: cinder/api/extensions.py:236 #, python-format -msgid "Fetching %s" -msgstr "Získávání %s" +msgid "Ext alias: %s" +msgstr "Přezdívká roz: %s" -#: cinder/utils.py:210 +#: cinder/api/extensions.py:237 #, python-format -msgid "Got unknown keyword args to utils.execute: %r" -msgstr "Získány neznámé argumenty klíčového slova pro utils.execute: %r" - -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Spouštění příkazu (podproces): %s" - -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" -msgstr "Výsledek byl %s" - -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." -msgstr "%r selhalo. Opakování." - -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" -msgstr "Spouštění příkazu (SSH): %s" - -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" -msgstr "Prostředí není podporováno přes SSH" - -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" -msgstr "process_input není podporován přes SSH" +msgid "Ext description: %s" +msgstr "Popis roz: %s" -#: cinder/utils.py:352 +#: cinder/api/extensions.py:239 #, python-format -msgid "debug in callback: %s" -msgstr "ladění ve zpětném volání: %s" +msgid "Ext namespace: %s" +msgstr "Jmenný prostor roz: %s" -#: cinder/utils.py:534 +#: cinder/api/extensions.py:240 #, python-format -msgid "Link Local address is not found.:%s" -msgstr "Adresa místního spojení nenalezena.: %s" +msgid "Ext updated: %s" +msgstr "Roz aktualizováno: %s" -#: cinder/utils.py:537 +#: cinder/api/extensions.py:242 #, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" -msgstr "Nelze získat IP místního spojení %(interface)s :%(ex)s" +msgid "Exception loading extension: %s" +msgstr "Výjimka při načítání rozšíření: %s" -#: cinder/utils.py:648 +#: cinder/api/extensions.py:256 #, python-format -msgid "Invalid backend: %s" -msgstr "Neplatná podpůrná vrstva: %s" +msgid "Loading extension %s" +msgstr "Načítání rozšíření %s" -#: cinder/utils.py:659 +#: cinder/api/extensions.py:262 #, python-format -msgid "backend %s" -msgstr "podpůrná vrstva: %s" - -#: cinder/utils.py:709 -msgid "in looping call" -msgstr "v opakujícím volání" - -#: cinder/utils.py:927 -#, fuzzy, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." -msgstr "Pokus získat semafor \"%(lock)s\" pro zámek metody \"%(method)s\"" - -#: cinder/utils.py:931 -#, fuzzy, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." -msgstr "Získán semafor \"%(lock)s\" pro zámek metody \"%(method)s\"" - -#: cinder/utils.py:935 -#, fuzzy, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." -msgstr "Pokus o získání zámku souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" - -#: cinder/utils.py:942 -#, fuzzy, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." -msgstr "Získán zámek souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" +msgid "Calling extension factory %s" +msgstr "Volání továrny rozšíření %s" -#: cinder/utils.py:1001 +#: cinder/api/extensions.py:276 #, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/utils.py:1008 +#: cinder/api/extensions.py:278 #, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/utils.py:1023 +#: cinder/api/extensions.py:287 #, python-format -msgid "Found lockfile %(file)s with link count %(count)d" -msgstr "" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "Nelze načít rozšížení %(ext_factory)s: %(exc)s" -#: cinder/utils.py:1028 +#: cinder/api/extensions.py:356 #, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" -msgstr "" +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "Nelze načíst rozšíření %(classpath)s: %(exc)s" -#: cinder/utils.py:1138 +#: cinder/api/extensions.py:381 #, python-format -msgid "Expected object of type: %s" -msgstr "Očekáván objekt typu: %s" +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "Nelze načíst rozšíření %(ext_name)s: %(exc)s" -#: cinder/utils.py:1169 -#, python-format -msgid "Invalid server_string: %s" -msgstr "Neplatný server_string: %s" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" -#: cinder/utils.py:1298 -#, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" -msgstr "timefunc: '%(name)s' trvalo %(total_time).2f sek" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "prvek není podřazený" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" -msgstr "Původní výjimka je zahozena" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "kořenový prvek volí seznam" -#: cinder/utils.py:1461 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" -msgstr "Třída %(fullname)s je zastaralá: %(msg)s" +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" +"Neshoda stromu šablony; přidávání sluhy %(slavetag)s k pánovi " +"%(mastertag)s" -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" -msgstr "Třída %(fullname)s je zastaralá" +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "podtřídy musí zavádět construct()!" -#: cinder/utils.py:1495 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" -msgstr "Funkce %(name)s v %(location)s je zastaralá: %(msg)s" +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" -#: cinder/utils.py:1497 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Function %(name)s in %(location)s is deprecated" -msgstr "Funkce %(name)s v %(location)s je zastaralá" +msgid "show called for member %s" +msgstr "" -#: cinder/utils.py:1681 +#: cinder/api/contrib/backups.py:140 #, python-format -msgid "Could not remove tmpdir: %s" +msgid "delete called for member %s" msgstr "" -#: cinder/wsgi.py:97 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "Started %(name)s on %(host)s:%(port)s" -msgstr "%(name)s spuštěno v %(host)s:%(port)s" - -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." -msgstr "Zastavování serveru WSGI." - -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." -msgstr "Zastavování prostého serveru TCP." +msgid "Delete backup with id: %s" +msgstr "" -#: cinder/wsgi.py:117 +#: cinder/api/contrib/backups.py:185 #, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" -msgstr "Spiuštění serveru TCP %(arg0)s na %(host)s:%(port)s" - -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." -msgstr "Server WSGI byl zastaven." - -#: cinder/wsgi.py:211 -msgid "You must implement __call__" -msgstr "Musíte zavést __call__" +msgid "Creating new backup %s" +msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" -msgstr "nedostupné" +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "Nsprávný formát těla požadavku" -#: cinder/api/direct.py:299 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Returned non-serializeable type: %s" -msgstr "Navrácen neserializovatelný typ: %s" - -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/api/validator.py:142 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" -msgstr "%(code)s: %(message)s" - -#: cinder/api/ec2/__init__.py:95 -#, python-format -msgid "FaultWrapper: %s" -msgstr "Obalovačchyb: %s" - -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." -msgstr "Příliš mnoho ověření selhalo." - -#: cinder/api/ec2/__init__.py:180 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -"Přístupový klíč %(access_key)s %(failures)d krát selhal při ověření a " -"bude zablokován na %(lock_mins)d minut." -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" -msgstr "Podpis není zadán" - -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" -msgstr "Přístupový klíč není zadán" +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +#, fuzzy +msgid "Snapshot not found." +msgstr "Server nenalezen." -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" -msgstr "Chyba při komunikaci s keystone" +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "XML nelze porozumět" -#: cinder/api/ec2/__init__.py:388 -#, python-format -msgid "Authentication Failure: %s" -msgstr "Selhání ověření: %s" +#: cinder/api/contrib/hosts.py:136 +#, fuzzy, python-format +msgid "Host '%s' could not be found." +msgstr "Hostitel %(host)s nemohl být nalezen." -#: cinder/api/ec2/__init__.py:404 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" -msgstr "Požadavek na ověření pro %(uname)s:%(pname)s)" +msgid "Invalid status: '%s'" +msgstr "Neplatný stav: '%s'" -#: cinder/api/ec2/__init__.py:435 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "action: %s" -msgstr "činnost: %s" +msgid "Invalid update setting: '%s'" +msgstr "Neplatné nastavení aktualizace: '%s'" -#: cinder/api/ec2/__init__.py:437 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "arg: %(key)s\t\tval: %(value)s" -msgstr "arg: %(key)s\t\thod: %(value)s" +msgid "Setting host %(host)s to %(state)s." +msgstr "Nastavování hostitele %(host)s na %(state)s." -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" -msgstr "Neoprávnění požadavek pro ovladač=%(controller)s a činnost=%(action)s" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "Describe-resource je funkce pouze pro správce" -#: cinder/api/ec2/__init__.py:584 -#, python-format -msgid "InstanceNotFound raised: %s" -msgstr "Vyvoláno InstanceNenalezena: %s" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" -#: cinder/api/ec2/__init__.py:590 -#, python-format -msgid "VolumeNotFound raised: %s" -msgstr "Vyvoláno SvazekNenalezen: %s" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" -#: cinder/api/ec2/__init__.py:596 -#, python-format -msgid "SnapshotNotFound raised: %s" -msgstr "Vyvoláno SnímekNenalzen: %s" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" -msgstr "Vyvoláno Nenalezeno: %s" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" -#: cinder/api/ec2/__init__.py:605 -#, python-format -msgid "EC2APIError raised: %s" -msgstr "Vyvoláno ChybaApiEC2: %s" +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" -#: cinder/api/ec2/__init__.py:613 -#, python-format -msgid "KeyPairExists raised: %s" -msgstr "Vyvoláno DvojiceKlíčůExistuje: %s" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" -#: cinder/api/ec2/__init__.py:617 -#, python-format -msgid "InvalidParameterValue raised: %s" -msgstr "Vyvoláno NeplatnáHodnotaParametru: %s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" -#: cinder/api/ec2/__init__.py:621 -#, python-format -msgid "InvalidPortRange raised: %s" -msgstr "Vyvoláno NeplatnýROzsahPortů: %s" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" -#: cinder/api/ec2/__init__.py:625 -#, python-format -msgid "NotAuthorized raised: %s" -msgstr "Vyvoláno NeníOprávněno: %s" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" -msgstr "Vyvoláno NeplatnáOperace: %s" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" -#: cinder/api/ec2/__init__.py:633 -#, fuzzy, python-format -msgid "QuotaError raised: %s" -msgstr "Vyvolána neznámá chyba: %s" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" -#: cinder/api/ec2/__init__.py:637 +#: cinder/api/contrib/quotas.py:111 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/api/ec2/__init__.py:646 -#, python-format -msgid "Unexpected error raised: %s" -msgstr "Vyvolána neznámá chyba: %s" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" -#: cinder/api/ec2/__init__.py:647 -#, python-format -msgid "Environment: %s" -msgstr "Prostředí: %s" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." -msgstr "Vyskytla se neznámá chyba. Prosím zopakujte Váš požadavek." +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" -#: cinder/api/ec2/apirequest.py:64 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -"Nepodporovaný požadavek API: ovladač = %(controller)s, činnost = " -"%(action)s" -#: cinder/api/ec2/cloud.py:336 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Create snapshot of volume %s" -msgstr "Vytvořit snímek svazku %s" - -#: cinder/api/ec2/cloud.py:372 -#, fuzzy, python-format msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -"Hodnota (%s) parametru GroupName je neplatná. Obsah je omezen na " -"alfanumerické znaky, mezery, pomlčky a podtržítka." -#: cinder/api/ec2/cloud.py:378 -#, fuzzy, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -"Hodnota (%s) parametru GroupName je neplatná. Délka překračuje maximum " -"255 znaků." - -#: cinder/api/ec2/cloud.py:382 -#, python-format -msgid "Create key pair %s" -msgstr "Vytvořit dvojici klíčů %s" -#: cinder/api/ec2/cloud.py:391 -#, python-format -msgid "Import key %s" -msgstr "Imprtovat klíč %s" +#: cinder/api/contrib/types_extra_specs.py:101 +#, fuzzy +msgid "Request body empty" +msgstr "Nsprávný formát těla požadavku" -#: cinder/api/ec2/cloud.py:409 -#, python-format -msgid "Delete key pair %s" -msgstr "Smazat dvojici klíčů %s" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "Neshoda s tělem požadavku a URI" -#: cinder/api/ec2/cloud.py:551 -#, fuzzy, python-format -msgid "Invalid CIDR" -msgstr "Neplatný cidr %(cidr)s." +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "Tělo požadavku obsahuje příliš mnoho položek" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "Revoke security group ingress %s" +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, python-format -msgid "%s Not enough parameters to build a valid rule" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." -msgstr "Pro zadané parametry není žádné pravidlo." - -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 -#, python-format -msgid "Authorize security group ingress %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/api/ec2/cloud.py:725 -#, python-format -msgid "%s - This rule already exists in group" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/api/ec2/cloud.py:769 -#, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -"Hodnota (%s) parametru GroupName je neplatná. Obsah je omezen na " -"alfanumerické znaky, mezery, pomlčky a podtržítka." -#: cinder/api/ec2/cloud.py:776 -#, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -"Hodnota (%s) parametru GroupName je neplatná. Délka překračuje maximum " -"255 znaků." - -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" -msgstr "Vytvořit bezpečnostní skupinu %s" - -#: cinder/api/ec2/cloud.py:783 -#, python-format -msgid "group %s already exists" -msgstr "skupina %s již existuje" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 -#, python-format -msgid "Delete security group %s" -msgstr "Smazat bezpečnostní skupinu %s" +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 -#, python-format -msgid "Get console output for instance %s" -msgstr "Získat výstup konzole pro instanci %s" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" -#: cinder/api/ec2/cloud.py:894 -#, python-format -msgid "Create volume from snapshot %s" -msgstr "Vytvořit svazek ze snímku %s" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 -#, python-format -msgid "Create volume of %s GB" -msgstr "Vytvořit svazek o %s GB" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/api/ec2/cloud.py:931 -#, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -msgstr "Připojit svazek %(volume_id)s k instanci %(instance_id)s na %(device)s" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 -#, python-format -msgid "Detach volume %s" -msgstr "Odpojit svazek %s" +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" -#: cinder/api/ec2/cloud.py:959 +#: cinder/api/contrib/volume_transfer.py:147 #, fuzzy, python-format -msgid "Detach Volume Failed." -msgstr "Odpojit svazek %s" - -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 -#, python-format -msgid "attribute not supported: %s" -msgstr "vlastnost není podporována: %s" - -#: cinder/api/ec2/cloud.py:1107 -#, python-format -msgid "vol = %s\n" -msgstr "svz = %s\n" - -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" -msgstr "Přidělit adresu" - -#: cinder/api/ec2/cloud.py:1267 -#, python-format -msgid "Release address %s" -msgstr "Uvolnit adresu %s" +msgid "Creating new volume transfer %s" +msgstr "Vytvořit snímek svazku %s" -#: cinder/api/ec2/cloud.py:1272 -#, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" -msgstr "Přidělit adresu %(public_ip)s k instanci %(instance_id)s" +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." -#: cinder/api/ec2/cloud.py:1282 +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "Disassociate address %s" -msgstr "Oddělit adresu %s" - -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" -msgstr "Obraz musí být dostupný" +msgid "Accepting volume transfer %s" +msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" -msgstr "BUde spuštěno ukončování insatncí" +#: cinder/api/contrib/volume_transfer.py:196 +#, fuzzy, python-format +msgid "Accepting transfer %s" +msgstr "Vytvořit snímek svazku %s" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "Reboot instance %r" -msgstr "Restratovat instanci %r" +msgid "Delete transfer with id: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" -msgstr "Instance budou zastaveny" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" -msgstr "Instance budou spuštěny" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" -#: cinder/api/ec2/cloud.py:1455 -#, python-format -msgid "De-registering image %s" -msgstr "Zrušení registrace obrazu %s" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" -msgstr "Obraz %(image_location)s registrován s id %(image_id)s" - -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" -msgstr "uživatel nebo skupina nebyly zadány" - -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" -msgstr "podporována je pouze skupina \"all\"" - -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" -msgstr "operation_type musí být add nebo remove" +msgid "Valid control location are: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:1542 -#, python-format -msgid "Updating image %s publicity" -msgstr "Aktualizace publicity obrazu %s" +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" -#: cinder/api/ec2/cloud.py:1555 -#, python-format -msgid "Not allowed to modify attributes for image %s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/api/ec2/cloud.py:1603 -#, python-format -msgid "Couldn't stop instance with in %d sec" -msgstr "Nelze zastavit instanci za %d sek" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 -#, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "Nelze získat metadata pro ip: %s" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/api/middleware/fault.py:44 #, python-format msgid "Caught error: %s" msgstr "Zachycena chyba: %s" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "%(url)s vrácena s HTTP %(status)d" -#: cinder/api/openstack/__init__.py:94 +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 msgid "Must specify an ExtensionManager class" msgstr "Musí být určena třída ExtensionManager" -#: cinder/api/openstack/__init__.py:105 +#: cinder/api/openstack/__init__.py:80 #, python-format msgid "Extended resource: %s" msgstr "Rozšířený zdroj: %s" -#: cinder/api/openstack/__init__.py:130 +#: cinder/api/openstack/__init__.py:104 #, python-format msgid "" "Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " "resource" msgstr "Rozšíření %(ext_name)s: nelze rozšířit %(collection)s: Žádný takový zdroj" -#: cinder/api/openstack/__init__.py:135 +#: cinder/api/openstack/__init__.py:110 #, python-format msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "Rozšíření %(ext_name)s: rozšiřování zdroje %(collection)s" -#: cinder/api/openstack/auth.py:90 -#, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" -msgstr "%(user_id)s nelze nalézt se známkou '%(token)s'" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" -#: cinder/api/openstack/auth.py:134 -#, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" -msgstr "%(user_id)s musí být správcem nebo členem %(project_id)s" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." -msgstr "Požadavky o ověření musí být prováděny proti verzi kořenu (např. /v2)." +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "JSON nelze porozumět" -#: cinder/api/openstack/auth.py:167 -#, python-format -msgid "Could not find %s in request." -msgstr "Nelze najít %s v požadavku." +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "příliš mnoho klíčů těla" -#: cinder/api/openstack/auth.py:191 -#, python-format -msgid "Successfully authenticated '%s'" -msgstr "'%s' úspěšně ověřeno" - -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." -msgstr "Uživatel nenalezen pomocí zadaného klíče API." - -#: cinder/api/openstack/auth.py:258 -#, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" -msgstr "Zadaný klíč API je platný, ale ne pro uživatele '%(username)s'" - -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" -msgstr "parametr limit musí být celé číslo" - -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" -msgstr "parametr limit musí být kladný" - -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" -msgstr "parametr offset musí být celé číslo" - -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" -msgstr "parametr offset musí být kladný" - -#: cinder/api/openstack/common.py:203 -#, python-format -msgid "marker [%s] not found" -msgstr "značka [%s] nenalezena" - -#: cinder/api/openstack/common.py:243 -#, python-format -msgid "href %s does not contain version" -msgstr "href %s neobsahuje verzi" - -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" -msgstr "Metadata obrazu překračují limit" - -#: cinder/api/openstack/common.py:295 -#, python-format -msgid "Converting nw_info: %s" -msgstr "" - -#: cinder/api/openstack/common.py:305 -#, python-format -msgid "Converted networks: %s" -msgstr "" - -#: cinder/api/openstack/common.py:338 -#, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" -msgstr "Nelze '%(action)s' zatímco instance je %(attr)s %(state)s" - -#: cinder/api/openstack/common.py:341 -#, python-format -msgid "Instance is in an invalid state for '%(action)s'" -msgstr "Instance je v neplatném stavu pro '%(action)s'" - -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" -msgstr "Odmítnutí žádosti o snímek, snímky jsou nyní zakázány" - -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." -msgstr "Snímky instance nejsou v současnosti povoleny." - -#: cinder/api/openstack/extensions.py:188 -#, python-format -msgid "Loaded extension: %s" -msgstr "Načteno rozšíření: %s" - -#: cinder/api/openstack/extensions.py:225 -#, python-format -msgid "Ext name: %s" -msgstr "Název roz: %s" - -#: cinder/api/openstack/extensions.py:226 -#, python-format -msgid "Ext alias: %s" -msgstr "Přezdívká roz: %s" - -#: cinder/api/openstack/extensions.py:227 -#, python-format -msgid "Ext description: %s" -msgstr "Popis roz: %s" - -#: cinder/api/openstack/extensions.py:229 -#, python-format -msgid "Ext namespace: %s" -msgstr "Jmenný prostor roz: %s" - -#: cinder/api/openstack/extensions.py:230 -#, python-format -msgid "Ext updated: %s" -msgstr "Roz aktualizováno: %s" - -#: cinder/api/openstack/extensions.py:232 -#, python-format -msgid "Exception loading extension: %s" -msgstr "Výjimka při načítání rozšíření: %s" - -#: cinder/api/openstack/extensions.py:246 -#, python-format -msgid "Loading extension %s" -msgstr "Načítání rozšíření %s" - -#: cinder/api/openstack/extensions.py:252 -#, python-format -msgid "Calling extension factory %s" -msgstr "Volání továrny rozšíření %s" - -#: cinder/api/openstack/extensions.py:264 -#, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" -msgstr "Nelze načít rozšížení %(ext_factory)s: %(exc)s" - -#: cinder/api/openstack/extensions.py:344 -#, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" -msgstr "Nelze načíst rozšíření %(classpath)s: %(exc)s" - -#: cinder/api/openstack/extensions.py:368 -#, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" -msgstr "Nelze načíst rozšíření %(ext_name)s: %(exc)s" - -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" -msgstr "JSON nelze porozumět" - -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" -msgstr "XML nelze porozumět" - -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" -msgstr "příliš mnoho klíčů těla" - -#: cinder/api/openstack/wsgi.py:582 +#: cinder/api/openstack/wsgi.py:677 #, fuzzy, python-format msgid "Exception handling resource: %s" msgstr "Rozšířený zdroj: %s" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/api/openstack/wsgi.py:682 #, python-format msgid "Fault thrown: %s" msgstr "Vyvolána chyba: %s" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/api/openstack/wsgi.py:685 #, python-format msgid "HTTP exception thrown: %s" msgstr "Vyvolána výjimka HTTP: %s" -#: cinder/api/openstack/wsgi.py:697 +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "V požadavku zadáno prázdné tělo" + +#: cinder/api/openstack/wsgi.py:799 msgid "Unrecognized Content-Type provided in request" msgstr "V požadavku zadán nerozpoznaný Content-Type" -#: cinder/api/openstack/wsgi.py:701 +#: cinder/api/openstack/wsgi.py:803 msgid "No Content-Type provided in request" msgstr "V požadavku nezadán Content-Type" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" -msgstr "V požadavku zadáno prázdné tělo" - -#: cinder/api/openstack/wsgi.py:816 +#: cinder/api/openstack/wsgi.py:914 #, python-format msgid "There is no such action: %s" msgstr "Žádná taková činnost: %s" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 msgid "Malformed request body" msgstr "Poškozené tělo požadavku" -#: cinder/api/openstack/wsgi.py:829 +#: cinder/api/openstack/wsgi.py:927 msgid "Unsupported Content-Type" msgstr "Nepodporovaný Content-Type" -#: cinder/api/openstack/wsgi.py:841 +#: cinder/api/openstack/wsgi.py:939 msgid "Malformed request url" msgstr "Poškozená url požadavku" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/api/openstack/wsgi.py:987 #, python-format msgid "%(url)s returned a fault: %(e)s" msgstr "%(url)s vrátilo chybu: %(e)s" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" -msgstr "prvek není podřazený" - -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" -msgstr "kořenový prvek volí seznam" - -#: cinder/api/openstack/xmlutil.py:739 -#, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -"Neshoda stromu šablony; přidávání sluhy %(slavetag)s k pánovi " -"%(mastertag)s" - -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" -msgstr "podtřídy musí zavádět construct()!" - -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." -msgstr "Zavádění správce rozšíření." - -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." -msgstr "Obraz nenalezen" - -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" -msgstr "Nsprávný formát těla požadavku" - -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" -msgstr "Neshoda s tělem požadavku a URI" - -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" -msgstr "Tělo požadavku obsahuje příliš mnoho položek" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" -msgstr "Neplatný klíč metadata" - -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" -msgstr "Instance neexistuje" - -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" -msgstr "Instance není členem zadané sítě" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " @@ -2083,6169 +1445,9337 @@ msgstr "" "Pouze %(value)s požadavky %(verb)s mohou být provedeny pro %(uri)s " "každých %(unit_string)s." -#: cinder/api/openstack/compute/limits.py:266 +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 msgid "This request was rate-limited." msgstr "Tento požadavek má omezen množství." -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" -msgstr "Server neexistuje" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +#, fuzzy +msgid "snapshot does not exist" +msgstr "Instance neexistuje" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 msgid "Metadata item was not found" msgstr "Položka metadat nenalezena" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Invalid server status: %(status)s" -msgstr "Neplatný stav serveru: %(status)s" - -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" -msgstr "Neplatná hodnota changes-since" - -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" -msgstr "Překročen limit osobnostního souboru" - -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" -msgstr "Cesta osobnostního souboru je příliš dlouhá" - -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" -msgstr "Obsah osobnostního souboru je příliš dlouhý" - -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" -msgstr "Název serveru není řetězec nebo unicode" +msgid "Delete snapshot with id: %s" +msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" -msgstr "Název serveru je prázdný řetězec" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "Bad personality format: missing %s" -msgstr "Špatný formát osobnosti: chybí %s" +msgid "Invalid value '%s' for force. " +msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" -msgstr "Špatný formát osobnosti" +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +#, fuzzy +msgid "volume does not exist" +msgstr "Server neexistuje" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "Personality content for %s cannot be decoded" -msgstr "Obsah osobnosti pro %s nemůže být dekódován" +msgid "vol=%s" +msgstr "" -#: cinder/api/openstack/compute/servers.py:550 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" -msgstr "Špatný formát sítí: uuid sítě není ve správném formátu (%s)" +msgid "Delete volume with id: %s" +msgstr "" -#: cinder/api/openstack/compute/servers.py:559 -#, python-format -msgid "Invalid fixed IP address (%s)" -msgstr "Neplatná pevná IP adresa (%s)" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "Zadáno neplatné imageRef." -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "Duplicate networks (%s) are not allowed" -msgstr "Duplicitní sítě (%s) nejosu povoleny" +msgid "snapshot id:%s not found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "Bad network format: missing %s" -msgstr "Špatný formát sítě: chybí %s" - -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" -msgstr "Špatný formát sítě" - -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" -msgstr "Obsah uživatelských dat nemůže být dekódován" - -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" -msgstr "accessIPv4 není ve správném formátu IPv4" - -#: cinder/api/openstack/compute/servers.py:601 -#, fuzzy -msgid "accessIPv6 is not proper IPv6 format" -msgstr "accessIPv4 není ve správném formátu IPv4" - -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" -msgstr "Název serveru není určen" - -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." -msgstr "Zadáno neplatné flavorRef." - -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" -msgstr "Nelze najít požadovaný obraz" +msgid "source vol id:%s not found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." -msgstr "Zadán neplatný název_klíče." +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "Vytvořit svazek o %s GB" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." -msgstr "Instanci nebyla změněna velikost." +#: cinder/api/v1/volumes.py:496 +#, fuzzy, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "Odstraňování voleb '%(unk_opt_str)s' z fronty" -#: cinder/api/openstack/compute/servers.py:835 -#, python-format -msgid "Error in confirm-resize %s" -msgstr "Chyba v confirm-resize %s" +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "Error in revert-resize %s" -msgstr "Chyba v revert-resize %s" +msgid "Missing required element '%s' in request body" +msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" -msgstr "Argument 'type' pro restart není HARD či SOFT" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" -msgstr "Chybí argument 'type' pro restart" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" -#: cinder/api/openstack/compute/servers.py:885 -#, python-format -msgid "Error in reboot %s" -msgstr "Chyba v restartu %s" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." -msgstr "Nelze najít požadovanou konfiguraci." +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." -msgstr "Resize vyžaduje změnu velikosti." +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" -msgstr "Poškozený objekt serveru" +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" -msgstr "Chybí vlastnost imageRef" +#: cinder/api/v2/volumes.py:472 +#, fuzzy, python-format +msgid "Removing options '%s' from query" +msgstr "Odstraňování voleb '%(unk_opt_str)s' z fronty" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." -msgstr "Zadáno neplatné imageRef." +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Obraz musí být dostupný" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" -msgstr "Chybí vlastnost flavorRef" +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Obraz musí být dostupný" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" -msgstr "Nebylo zadáno adminPass" +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "Obraz musí být dostupný" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" -msgstr "Neplatné adminPass" +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." -msgstr "Nelze zpracovat metadata dvojic hodnot/klíčů." +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." -msgstr "Požadavek na změnu velikosti má neplatnou vlastnost 'flavorRef'." +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Obraz musí být dostupný" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." -msgstr "Požadavek na změnu velikosti vyžaduje vlastnost 'flavorRef'." +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" -msgstr "Neplatné tělo požadavku" +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." -msgstr "Nelze zpracovat imageRef z požadavku." +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" -msgstr "Instance nemohla být nalezena" +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" -msgstr "Nelze najít obraz ke znovu sestavení" +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" -msgstr "Objekt createImage cyžaduje vlastnost name" +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" -msgstr "Neplatná metadata" +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/backup/manager.py:123 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" -msgstr "Odstraňování voleb '%(unk_opt_str)s' z fronty" +msgid "Volume manager for backend '%s' does not exist." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/backup/manager.py:129 #, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pauza %s" +msgid "Driver requested for volume_backend '%s'." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/backup/manager.py:147 #, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::zruš pauzu %s" +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/backup/manager.py:154 #, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::přerušení %s" +msgid "Registering default backend %s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/backup/manager.py:158 #, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::pokračování %s" +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/backup/manager.py:165 #, python-format -msgid "Error in migrate %s" -msgstr "Chyba v přesunu %s" +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 -#, python-format -msgid "Compute.api::reset_network %s" -msgstr "Compute.api::reset_sítě %s" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" -msgstr "Server nenalezen" +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/backup/manager.py:194 #, python-format -msgid "Compute.api::inject_network_info %s" -msgstr "Compute.api::vložit_info_sítě %s" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/backup/manager.py:206 #, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::uzamčení %s" +msgid "Resetting backup %s to error (was creating)." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/backup/manager.py:212 #, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::odemčení %s" +msgid "Resetting backup %s to available (was restoring)." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/backup/manager.py:217 #, python-format -msgid "createBackup entity requires %s attribute" -msgstr "Objekt createBackup vyžaduej vlastnost %s" +msgid "Resuming delete on backup: %s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" -msgstr "Poškozený objekt createBackup" +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" -msgstr "Vlastnost createBackup 'rotation' musí být celé číslo" +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" -msgstr "Instance nenalezena" +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." -msgstr "host a block_migration musí být zadány." +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/backup/manager.py:286 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" -msgstr "Migrace za chodu instance %(id)s na hostitele %(host)s selhala" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 -#, fuzzy, python-format +#: cinder/backup/manager.py:299 +#, python-format msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -"Nelze vytvořit typ_svazku s názvem %(name)s a specifikacemi " -"%(extra_specs)s" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/backup/manager.py:310 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/backup/manager.py:329 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/backup/manager.py:360 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/backup/manager.py:379 #, python-format -msgid "Aggregates does not have %s action" -msgstr "Agregáty nemají činnost %s" +msgid "Delete backup started, backup: %s." +msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/backup/manager.py:386 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/backup/manager.py:399 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/backup/manager.py:422 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." -msgstr "Může být získán pouze kořenový certifikát." - -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/backup/drivers/ceph.py:116 msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -"Nelze získat IP pro instance VPN, ujistěte se, že není spuštěna a zkuste " -"to znovu za pár minut." -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" -msgstr "Chybí určení typu" +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" -msgstr "Neplatné určení typu" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." -msgstr "%s musí být buď 'MANUAL' nebo 'AUTO'." +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." -msgstr "Server nenalezen." +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "Instance neexistuje" + +#: cinder/backup/drivers/swift.py:151 +#, fuzzy, python-format +msgid "container %s exists" +msgstr "Instance neexistuje" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, fuzzy, python-format +msgid "generated object list: %s" +msgstr "Očekáván objekt typu: %s" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "Požadavek je neplatný." + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/brick/exception.py:109 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/brick/exception.py:113 +#, fuzzy, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Nelze použít globální roli %(role_id)s" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "Obraz musí být dostupný" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, fuzzy, python-format +msgid "Original exception being dropped: %s" +msgstr "Původní výjimka je zahozena" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, fuzzy, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Získán semafor \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/lockutils.py:200 +#, fuzzy, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Pokus o získání zámku souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/lockutils.py:227 +#, fuzzy, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Získán zámek souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/lockutils.py:235 +#, fuzzy, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Získán zámek souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/openstack/common/log.py:301 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "obdrženo: %s" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "zařízení záznamu systému musí být jedno z: %s" + +#: cinder/openstack/common/log.py:623 +#, fuzzy, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "Třída %(fullname)s je zastaralá: %(msg)s" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +#, fuzzy +msgid "in fixed duration looping call" +msgstr "v opakujícím volání" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +#, fuzzy +msgid "in dynamic looping call" +msgstr "v opakujícím volání" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "Spuštění pravidelné úlohy %(full_task_name)s" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "Chyba při %(full_task_name)s: %(e)s" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "Získány neznámé argumenty klíčového slova pro utils.execute: %r" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Spouštění příkazu (podproces): %s" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "Výsledek byl %s" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r selhalo. Opakování." + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Spouštění příkazu (SSH): %s" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "Prostředí není podporováno přes SSH" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "process_input není podporován přes SSH" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Unhandled exception" +msgstr "Vnitřní výjimka: %s" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "Vyjímka DB zabalena." + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "rozbalený kontext: %s" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "obdrženo: %s" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "pro zprávu není metoda: %s" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "Pro zprávu není metoda: %s" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID je %s" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Vyskytla se neočekávaná výjimka." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Neplatné znovu použití připojení RPC." + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Volajícímu je vrácena výjimka: %s" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, fuzzy, python-format +msgid "Deserializing: %s" +msgstr "Popis roz: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "rozbalený kontext: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +#, fuzzy +msgid "Registering reactor" +msgstr "Zrušení registrace obrazu %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "obdrženo: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "Požadavek je neplatný." + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "Svazek není nalezen v instanci %(instance_id)s." + +#: cinder/tests/api/contrib/test_backups.py:741 +#, fuzzy +msgid "Invalid input" +msgstr "Neplatný snímek" + +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Odpojit svazek %s" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 #, fuzzy -msgid "Flavor not found." -msgstr "Server nenalezen." +msgid "Volume status must be available to reserve" +msgstr "Obraz musí být dostupný" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" -msgstr "Žádné tělo požadavku" +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/volume/api.py:502 #, python-format -msgid "No more floating ips in pool %s." -msgstr "Žádné další plovoucí ip v zásobníku %s." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Obraz musí být dostupný" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." -msgstr "Žádné další plovoucí ip nejsou dostupné." +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" -msgstr "Chybí parametr dict" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" -msgstr "Adresa není určena" +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" -msgstr "K instanci nejsou přidruženy žádné pevné ip" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" -msgstr "Přidružení plovoucí ip selhalo" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/volume/api.py:797 #, python-format -msgid "Invalid status: '%s'" -msgstr "Neplatný stav: '%s'" +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 #, fuzzy, python-format -msgid "Invalid mode: '%s'" -msgstr "Neplatný stav: '%s'" +msgid "Could not find iSCSI export for volume %s" +msgstr "Nelze najít obslužnou rutinu pro svazek %(driver_type)s." + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/volume/manager.py:526 #, python-format -msgid "Invalid update setting: '%s'" -msgstr "Neplatné nastavení aktualizace: '%s'" +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, fuzzy, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." -msgstr "Nastavování hostitele %(host)s na %(state)s." +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/volume/manager.py:559 #, python-format -msgid "Setting host %(host)s to %(state)s." -msgstr "Nastavování hostitele %(host)s na %(state)s." +msgid "snapshot %s: deleted successfully" +msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" -msgstr "Describe-resource je funkce pouze pro správce" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/volume/manager.py:698 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Připojit svazek %(volume_id)s k instanci %(instance_id)s na %(device)s" + +#: cinder/volume/manager.py:760 #, python-format -msgid "Key pair '%s' already exists." +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 -#, python-format -msgid "Unable to find address %r" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/volume/manager.py:880 #, python-format -msgid "Network does not have %s action" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/volume/manager.py:909 #, python-format -msgid "Disassociating network with id %s" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 +#: cinder/volume/manager.py:940 #, python-format -msgid "Showing network with id %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/volume/manager.py:976 #, python-format -msgid "Deleting network with id %s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#: cinder/volume/manager.py:1091 #, python-format -msgid "Security group %s already exists" +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#: cinder/volume/manager.py:1103 #, python-format -msgid "Security group %s is not a string or unicode" +msgid "volume %s: extending" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/volume/manager.py:1105 #, python-format -msgid "Security group %s cannot be empty." +msgid "volume %s: extended successfully" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#: cinder/volume/manager.py:1107 #, python-format -msgid "Security group %s should not be greater than 255 characters." +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 -#, python-format -msgid "Security group (%s) not found" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/volume/manager.py:1193 #, python-format -msgid "This rule already exists in group %s" +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Rule (%s) not found" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "start instance %r" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "stop instance %r" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "vol=%s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "Delete volume with id: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 #, python-format -msgid "Delete snapshot with id: %s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "Create snapshot from volume %s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/auth/ldapdriver.py:650 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/auth/manager.py:298 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "Looking up user: %r" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/auth/manager.py:302 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "Failed authorization for access key %s" +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "Using project name = user name (%s)" +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/auth/manager.py:315 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/auth/manager.py:324 -#, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "expected_signature: %s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "Invalid signature for user %s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/auth/manager.py:353 -#, python-format -msgid "host_only_signature: %s" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/auth/manager.py:493 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/auth/manager.py:613 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "modifying project %s" +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/auth/manager.py:625 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Adding user %(uid)s to project %(pid)s" +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/auth/manager.py:676 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Deleting project %s" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/auth/manager.py:734 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/auth/manager.py:753 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "Access Key change for user %s" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/auth/manager.py:755 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Secret Key change for user %s" +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/auth/manager.py:757 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/auth/manager.py:802 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "No vpn data for project %s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/cloudpipe/pipelib.py:46 +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 #, fuzzy, python-format -msgid "Instance type for vpn instances" -msgstr "Neplatná instance typu %(instance_type)s." +msgid "Error running SSH command: %s" +msgstr "Při spuštění příkazu došlo k nečekané chybě." -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "Launching VPN for %s" +msgid "Failed to create volume %s" msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/compute/api.py:144 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "Unable to find host for Instance %s" +msgid "Failed to delete volume %s" msgstr "" -#: cinder/compute/api.py:192 +#: cinder/volume/drivers/eqlx.py:348 #, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/compute/api.py:203 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/compute/api.py:259 +#: cinder/volume/drivers/eqlx.py:384 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/compute/api.py:261 +#: cinder/volume/drivers/eqlx.py:405 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/compute/api.py:312 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/compute/api.py:383 +#: cinder/volume/drivers/eqlx.py:440 #, python-format -msgid "Going to run %s instances..." +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/compute/api.py:447 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "bdm %s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/volume/drivers/glusterfs.py:86 #, python-format -msgid "block_device_mapping %s" +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/volume/drivers/glusterfs.py:91 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/compute/api.py:871 -#, fuzzy -msgid "Going to try to soft delete instance" -msgstr "Instance budou spuštěny" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/compute/api.py:939 -#, fuzzy -msgid "Going to try to terminate instance" -msgstr "BUde spuštěno ukončování insatncí" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" -#: cinder/compute/api.py:977 -#, fuzzy -msgid "Going to try to stop instance" -msgstr "Instance budou zastaveny" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" -#: cinder/compute/api.py:996 -#, fuzzy -msgid "Going to try to start instance" -msgstr "Instance budou spuštěny" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" -#: cinder/compute/api.py:1000 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Searching by: %s" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Image type not recognized %s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/compute/api.py:1377 +#: cinder/volume/drivers/glusterfs.py:403 #, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +msgid "nova call result: %s" msgstr "" -#: cinder/compute/api.py:1644 -#, python-format -msgid "multiple fixedips exist, using the first: %s" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/volume/drivers/glusterfs.py:431 #, python-format -msgid "DB error: %s" +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Instance type %s not found for deletion" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "create snapshot: %s" msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/volume/drivers/glusterfs.py:457 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "volume id: %s" msgstr "" -#: cinder/compute/manager.py:144 +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/compute/manager.py:146 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/volume/drivers/glusterfs.py:608 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "No base file found for %s." msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "No file found with %s as backing file." msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/volume/drivers/glusterfs.py:690 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "Setting up bdm %s" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "Instance %s not found." +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/compute/manager.py:523 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +msgid "creating new volume at %s" msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, fuzzy, python-format +msgid "Exception during mounting %s" +msgstr "Výjimka při načítání rozšíření: %s" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/compute/manager.py:565 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "Instance network_info: |%s|" +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/compute/manager.py:672 -#, python-format -msgid "%(action_str)s instance" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/volume/drivers/gpfs.py:160 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/volume/drivers/gpfs.py:169 #, python-format -msgid "terminating bdm %s" +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "%s must be an absolute path." msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +msgid "%s is not a directory." msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/volume/drivers/gpfs.py:197 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/volume/drivers/gpfs.py:556 #, python-format -msgid "Rebuilding instance %s" +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "Rebooting instance %s" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/volume/drivers/gpfs.py:637 #, python-format msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "instance %s: snapshotting" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Rotating out %d backups" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "Deleting image %s" +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "Failed to set admin password. Instance %s is not running" +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "Instance %s: Root password set" +msgid "volume_info:%s" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" msgstr "" -#: cinder/compute/manager.py:1079 -#, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +#: cinder/volume/drivers/lvm.py:573 +#, fuzzy, python-format +msgid "Symbolic link %s not found" +msgstr "značka [%s] nenalezena" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/volume/drivers/nfs.py:263 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "Expected volume size was %d" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid " but size is now %d" msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/volume/drivers/nfs.py:361 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "skupina %s již existuje" + +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "instance %s: rescuing" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "instance %s: unrescuing" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "instance %s: migrating" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/volume/drivers/nfs.py:526 #, python-format -msgid "instance %s: pausing" +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "instance %s: unpausing" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/volume/drivers/rbd.py:160 #, python-format -msgid "instance %s: suspending" +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/compute/manager.py:1556 -#, python-format -msgid "instance %s: resuming" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/compute/manager.py:1579 -#, python-format -msgid "instance %s: locking" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/compute/manager.py:1588 -#, python-format -msgid "instance %s: unlocking" +#: cinder/volume/drivers/rbd.py:210 +#, fuzzy, python-format +msgid "error opening rbd image %s" +msgstr "Chyba v přesunu %s" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/compute/manager.py:1596 -#, python-format -msgid "instance %s: getting locked state" +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "instance %s: reset network" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "instance %s: inject network info" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/volume/drivers/rbd.py:423 #, python-format -msgid "network_info to inject: |%s|" +msgid "flattening source volume %s" msgstr "" -#: cinder/compute/manager.py:1655 +#: cinder/volume/drivers/rbd.py:435 #, python-format -msgid "instance %s: getting vnc console" +msgid "creating snapshot='%s'" msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/compute/manager.py:1703 -#, fuzzy, python-format -msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -"Konzole pro instanci %(instance_id)s v zásobě %(pool_id)s nemohla být " -"nalezena." -#: cinder/compute/manager.py:1705 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "creating volume '%s'" msgstr "" -#: cinder/compute/manager.py:1714 -#, fuzzy, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" -msgstr "Instance %(instance_id)s není v nouzovém režimu." - -#: cinder/compute/manager.py:1724 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/compute/manager.py:1752 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/compute/manager.py:1756 -#, python-format -msgid "Detaching volume from unknown instance %s" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +msgid "volume %s is not a clone" msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "deleting parent %s" msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/volume/drivers/rbd.py:593 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/compute/manager.py:2040 -#, python-format -msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." +msgid "connection data: %s" msgstr "" -#: cinder/compute/manager.py:2075 -msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Neplatný snímek" + +#: cinder/volume/drivers/rbd.py:724 +#, fuzzy, python-format +msgid "not cloneable: %s" +msgstr "Název roz: %s" + +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "Poškozené tělo zprávy: %(reason)s" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/compute/manager.py:2305 -#, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/compute/manager.py:2458 -#, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/volume/drivers/sheepdog.py:59 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/compute/manager.py:2542 -#, fuzzy, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" -msgstr "Agregát %(aggregate_id)s již má hostitele %(host)s." - -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/console/manager.py:97 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/console/vmrc_manager.py:122 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/volume/drivers/solidfire.py:161 #, python-format -msgid "Removing console %(console_id)s." +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/console/xvp.py:116 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "Re-wrote %s" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Error starting xvp: %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." -msgstr "" +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "odpověď %s" -#: cinder/consoleauth/manager.py:63 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" -msgstr "" +#: cinder/volume/drivers/solidfire.py:398 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Nelze získat metadata pro ip: %s" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "Failed volume create: %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "No ComputeNode for %(host)s" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "No backend config with id %(sm_backend_id)s" +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "No sm_flavor called %(sm_flavor)s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/db/sqlalchemy/session.py:137 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "SQL connection failed. %s attempts left." +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "Table |%s| not created!" +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "join list for moving mac_addresses |%s|" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 -#, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" -msgstr "" +#: cinder/volume/drivers/solidfire.py:673 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Nelze vytvořit typ instance" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/image/glance.py:278 +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, fuzzy, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#: cinder/volume/drivers/zadara.py:438 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "Create snapshot: %s" msgstr "" -#: cinder/image/glance.py:281 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/image/glance.py:289 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/image/glance.py:410 +#: cinder/volume/drivers/zadara.py:472 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/image/s3.py:309 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/image/s3.py:328 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/image/s3.py:340 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/image/s3.py:379 +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "Create Volume: %(volume)s Size: %(size)lu" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +#, fuzzy +msgid "Entering create_volume_from_snapshot." +msgstr "Vytvořit svazek ze snímku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format -msgid "Unknown chain: %r" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/network/linux_net.py:694 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/network/linux_net.py:696 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format -msgid "killing radvd threw %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "Pid %d is stale, relaunching radvd" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format -msgid "Starting VLAN inteface %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "Starting Bridge interface for %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/network/linux_net.py:1142 -#, fuzzy, python-format -msgid "Starting bridge %s " -msgstr "Zrušení registrace obrazu %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" -#: cinder/network/linux_net.py:1149 -#, fuzzy, python-format -msgid "Done starting bridge %s" -msgstr "Zrušení registrace obrazu %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" -#: cinder/network/linux_net.py:1167 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Failed unplugging gateway interface '%s'" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/network/linux_net.py:1170 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Unplugged gateway interface '%s'" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/network/manager.py:291 -#, fuzzy, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" -msgstr "Certifikát %(certificate_id)s nenalezen." - -#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Interface %(interface)s not found" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/network/manager.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "floating IP allocation for instance |%s|" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/network/manager.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "floating IP deallocation for instance |%s|" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/network/manager.py:390 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/network/manager.py:402 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/network/manager.py:614 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/network/manager.py:660 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/network/manager.py:670 -#, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/network/manager.py:778 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, fuzzy, python-format +msgid "Delete Volume: %(volume)s" +msgstr "mazání svazku %(volume_name)s který má snímek" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" -msgstr "" - -#: cinder/network/manager.py:896 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "network allocations for instance |%s|" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/network/manager.py:901 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "network deallocation for instance |%s|" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/network/manager.py:1244 -#, python-format -msgid "Leased IP |%(address)s|" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, fuzzy, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "IP %s leased that is not associated" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/network/manager.py:1256 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "IP |%s| leased that isn't allocated" +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format -msgid "Released IP |%(address)s|" +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/network/manager.py:1265 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format -msgid "IP %s released that is not associated" +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/network/manager.py:1268 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, fuzzy, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "IP %s released that was not leased" +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, fuzzy, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/network/manager.py:1832 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Vytvořit snímek svazku %s" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "Připojit svazek %(volume_id)s k instanci %(instance_id)s na %(device)s" -#: cinder/network/quantum/client.py:180 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Quantum entity not found: %s" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Chyba v přesunu %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "ExposePaths for volume %s completed successfully." msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/network/quantum/manager.py:301 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "network allocations for instance %s" +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/network/quantum/melange_connection.py:96 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Server returned error: %s" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +#, fuzzy +msgid "Storage type not found." +msgstr "Obraz nenalezen" -#: cinder/network/quantum/cinder_ipam_lib.py:90 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "No network with net_id = %s" +msgid "Found Masking View: %s" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 -#, fuzzy, python-format -msgid "No fixed IPs to deallocate for vif %s" -msgstr "K instanci nejsou přidruženy žádné pevné ip" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +#, fuzzy +msgid "Masking View not found." +msgstr "Obraz nenalezen" -#: cinder/network/quantum/quantum_connection.py:99 -#, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +#, fuzzy +msgid "Ecom user not found." +msgstr "Server nenalezen." -#: cinder/network/quantum/quantum_connection.py:113 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/notifier/api.py:115 +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +#, fuzzy +msgid "Ecom server not found." +msgstr "Server nenalezen." + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Znovu připojeno k frontě" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "%s not in valid priorities" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/notifier/list_notifier.py:65 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/rpc/amqp.py:146 -#, python-format -msgid "Returning exception %s to caller" -msgstr "Volajícímu je vrácena výjimka: %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, fuzzy, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "Role %(role_id)s nemohla být nalezena." -#: cinder/rpc/amqp.py:188 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "unpacked context: %s" -msgstr "rozbalený kontext: %s" +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" -#: cinder/rpc/amqp.py:231 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "received %s" -msgstr "obdrženo: %s" +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" -#: cinder/rpc/amqp.py:236 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "no method for message: %s" -msgstr "pro zprávu není metoda: %s" +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" -#: cinder/rpc/amqp.py:237 -#, python-format -msgid "No method for message: %s" -msgstr "Pro zprávu není metoda: %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, fuzzy, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Svazek není nalezen v instanci %(instance_id)s." -#: cinder/rpc/amqp.py:321 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Making asynchronous call on %s ..." +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID je %s" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, fuzzy, python-format +msgid "Error finding %s." +msgstr "Chyba v přesunu %s" -#: cinder/rpc/amqp.py:379 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "Sending notification on %s..." +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." -msgstr "" - -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." -msgstr "" - -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "Device info: %(data)s." msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/rpc/impl_qpid.py:346 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "Connected to AMQP server on %s" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "Add target WWN: %s." msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +msgid "Target WWNs: %s." msgstr "" -#: cinder/scheduler/driver.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "Vyvoláno Nenalezeno: %s" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, fuzzy, python-format +msgid "XML exception reading parameter: %s" +msgstr "Výjimka při načítání rozšíření: %s" + +#: cinder/volume/drivers/hds/hds.py:178 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/volume/drivers/hds/hds.py:197 #, python-format -msgid "No host selection for %s defined." +msgid "No configuration found for service: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/drivers/hds/hds.py:250 +#, fuzzy, python-format +msgid "HDP not found: %s" +msgstr "Instance nenalezena" + +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/drivers/hds/hds.py:355 #, python-format -msgid "Filtered %(hosts)s" +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 +#: cinder/volume/drivers/hds/hds.py:372 #, python-format -msgid "Weighted %(weighted_host)s" +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/drivers/hds/hds.py:395 +#, fuzzy, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "mazání svazku %(volume_name)s který má snímek" + +#: cinder/volume/drivers/hds/hds.py:480 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format -msgid "Host filter passes for %(host)s" +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Received %(service_name)s service update from %(host)s." +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "No service for compute ID %s" +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/scheduler/manager.py:159 -#, fuzzy, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." -msgstr "Nastavování hostitele %(host)s na %(state)s." +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/tests/fake_utils.py:72 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Faking execution of cmd (subprocess): %s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Faked command matched %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Faked command raised an exception %s" +msgid "parse_xml_file: %s" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/drivers/huawei/huawei_utils.py:129 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "Running instances: %s" +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "After terminating instances: %s" +msgid "Bad response from server: %s" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "After force-killing instances: %s" +msgid "Login error, reason is %s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/drivers/huawei/rest_common.py:166 #, python-format msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" -msgstr "" - -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 #, python-format -msgid "Target %s allocated" +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/drivers/huawei/rest_common.py:527 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "host lun id is %s" msgstr "" -#: cinder/tests/test_volume_types.py:58 +#: cinder/volume/drivers/huawei/rest_common.py:553 #, python-format -msgid "Given data: %s" +msgid "the free wwns %s" msgstr "" -#: cinder/tests/test_volume_types.py:59 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Result data: %s" +msgid "the fc server properties is:%s" msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/drivers/huawei/rest_common.py:874 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 -#, fuzzy, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "Kvóta překročena" - -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/drivers/huawei/rest_common.py:937 #, python-format -msgid "_create: %s" +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/drivers/huawei/rest_common.py:964 #, python-format -msgid "_delete: %s" +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 -#, python-format -msgid "_get: %s" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format -msgid "_get_all: %s" +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/drivers/huawei/rest_common.py:1101 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/drivers/huawei/rest_common.py:1124 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "The config parameters are: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/tests/integrated/test_login.py:31 -#, python-format -msgid "flavor: %s" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/huawei/rest_common.py:1256 #, python-format msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/tests/integrated/api/client.py:105 -#, python-format -msgid "Doing %(method)s on %(relative_url)s" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Body: %s" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "_get_login_info: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" -msgstr "" - -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Decoding JSON: %s" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Nested received %(queue)s, %(value)s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Nested return %s" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/huawei/ssh_common.py:421 #, python-format -msgid "Received %s" +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/huawei/ssh_common.py:466 #, python-format -msgid "Compute_service record created for %s " +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/drivers/huawei/ssh_common.py:501 #, python-format -msgid "Compute_service record updated for %s " +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/huawei/ssh_common.py:511 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/huawei/ssh_common.py:516 #, python-format -msgid "Filters added to instance %s" +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/huawei/ssh_common.py:580 #, python-format -msgid "Adding security group rule: %r" +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Adding provider rule: %s" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/huawei/ssh_common.py:697 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/huawei/ssh_common.py:865 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/huawei/ssh_common.py:873 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi_conn.py:140 +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "Got exception: %s" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "============= initial domains =========== : %s" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "No such domain (%s)" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "Failed power down Bare-metal node %s" -msgstr "" - -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/huawei/ssh_common.py:1413 #, python-format -msgid "No such domain %s" +msgid "create_snapshot: %(device)s does not support snapshot." msgstr "" -#: cinder/virt/baremetal/dom.py:161 -#, python-format -msgid "Domains: %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Nodes: %s" +msgid "Failed getting details for pool %s" msgstr "" -#: cinder/virt/baremetal/dom.py:166 -#, python-format -msgid "After storing domains: %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" msgstr "" -#: cinder/virt/baremetal/dom.py:198 -#, python-format -msgid "Created new domain: %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "%s is not set" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/virt/baremetal/dom.py:226 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "change_domain_state: to new state %s" +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "ensure_export: Volume %s not found on storage" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:148 -#, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "instance %s: rebooted" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 #, python-format -msgid "instance %s: rescued" +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 #, python-format -msgid "instance %s: is building" +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 #, python-format -msgid "instance %s: booted" +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:268 -#, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "instance %s spawned successfully" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:272 -#, python-format -msgid "instance %s:not booted" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "instance %s: Creating image" +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 -#, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "instance %s: starting toXML method" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "instance %s: finished toXML method" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:216 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "enter: get_host_from_connector: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "unknown disk image handler: %s" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 #, python-format -msgid "Failed to remove container: %s" +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "User %(username)s not found in password file." +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "unsupported partition: %s" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" msgstr "" -#: cinder/virt/disk/loop.py:30 -#, python-format -msgid "Could not attach image to loopback: %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" msgstr "" -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" msgstr "" -#: cinder/virt/disk/mount.py:77 -#, python-format -msgid "Failed to map partitions: %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 #, python-format -msgid "qemu-nbd error: %s" +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" msgstr "" -#: cinder/virt/disk/nbd.py:93 -#, python-format -msgid "nbd device %s did not show up" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Connecting to libvirt: %s" +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 #, python-format msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "Deleting instance files %(target)s" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -msgid "Instance soft rebooted successfully." +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." msgstr "" -#: cinder/virt/libvirt/connection.py:696 -#, fuzzy -msgid "Failed to soft reboot instance." -msgstr "Nelze restartovat instanci" - -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 -#, python-format -msgid "Automatically confirming migration %d" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" +"%(description)s\n" +"Příkaz: %(cmd)s\n" +"Kód ukončení: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -#, fuzzy -msgid "Guest does not have a console available" -msgstr "Uživatel nemá správcovská oprávnění" - -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 -#, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "Did not find success message nor error for %(fun)s: %(out)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/netapp/api.py:419 #, python-format -msgid "block_device_list %s" +msgid "No element by given name %s." msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." msgstr "" -#: cinder/virt/libvirt/connection.py:1679 -#, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" msgstr "" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/netapp/common.py:103 #, python-format -msgid "'' must be 1, but %d\n" +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/netapp/common.py:109 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "Storage family %s is not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 +#: cinder/volume/drivers/netapp/common.py:116 #, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +msgid "No default storage protocol found for storage family %(storage_family)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 +#: cinder/volume/drivers/netapp/common.py:123 #, python-format msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 +#: cinder/volume/drivers/netapp/common.py:130 #, python-format -msgid "Timeout migrating for %s. nwfilter not found." +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." msgstr "" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/netapp/iscsi.py:69 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "No metadata property %(prop)s defined for the LUN %(name)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/drivers/netapp/iscsi.py:105 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/netapp/iscsi.py:191 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Destroyed LUN %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/netapp/iscsi.py:227 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" msgstr "" -#: cinder/virt/libvirt/firewall.py:42 +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." msgstr "" -#: cinder/virt/libvirt/firewall.py:171 +#: cinder/volume/drivers/netapp/iscsi.py:325 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/netapp/iscsi.py:412 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/netapp/iscsi.py:543 #, python-format -msgid "%s is a valid instance name" +msgid "Message: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/netapp/iscsi.py:545 #, python-format -msgid "%s has a disk file" +msgid "Error getting lun attribute. Exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/netapp/iscsi.py:600 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "No need to extend volume %s as it is already the requested new size." msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/netapp/iscsi.py:606 #, python-format -msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +msgid "Resizing lun %s directly to new size." msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/netapp/iscsi.py:633 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "Lun %(path)s geometry failed. Message - %(msg)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/netapp/iscsi.py:662 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "Moving lun %(name)s to %(new_name)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/netapp/iscsi.py:677 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Resizing lun %s using sub clone to new size." msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/netapp/iscsi.py:684 #, python-format -msgid "Base file too young to remove: %s" +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/netapp/iscsi.py:690 #, python-format -msgid "Removing base file: %s" +msgid "%s cannot be sub clone resized as it contains no blocks." msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/netapp/iscsi.py:707 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Post clone resize lun %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/netapp/iscsi.py:718 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "Failure staging lun %s to tmp." msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/netapp/iscsi.py:723 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "Failure moving new cloned lun to %s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/netapp/iscsi.py:727 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "Failure deleting staged tmp lun %s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/netapp/iscsi.py:730 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "Unknown exception in post clone resize lun %s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/netapp/iscsi.py:732 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "Exception details: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 -#, python-format -msgid "%(id)s (%(base_file)s): image is in use" +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/netapp/iscsi.py:741 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "Failure getting lun info for %s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/netapp/iscsi.py:796 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Error provisioning vol %(name)s on %(volume)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/netapp/iscsi.py:841 #, python-format -msgid "Unknown base file: %s" +msgid "No iscsi service found for vserver %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/netapp/iscsi.py:982 #, python-format -msgid "Active base files: %s" +msgid "Cloned LUN with new name %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/netapp/iscsi.py:986 #, python-format -msgid "Corrupt base files: %s" +msgid "No clonned lun named %s found on the filer" msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 -#, python-format -msgid "Removable base files: %s" +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." msgstr "" -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "Error finding luns for volume %s. Verify volume exists." msgstr "" -#: cinder/virt/libvirt/vif.py:99 +#: cinder/volume/drivers/netapp/iscsi.py:1390 #, python-format -msgid "Ensuring bridge %s" +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#: cinder/volume/drivers/netapp/iscsi.py:1393 #, python-format -msgid "Failed while unplugging vif of instance '%s'" +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." msgstr "" -#: cinder/virt/libvirt/volume.py:166 +#: cinder/volume/drivers/netapp/iscsi.py:1462 #, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +msgid "Error refreshing vol capacity. Message: %s" msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/netapp/iscsi.py:1470 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "Refreshing capacity info for %s." msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "NFS file %s not discovered." msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/netapp/nfs.py:224 #, python-format -msgid "%(text)s: _db_content => %(content)s" +msgid "Copied image to volume %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/netapp/nfs.py:230 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +msgid "Registering image in cache %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/netapp/nfs.py:250 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Found cache file for image %(image_id)s on share %(share)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/netapp/nfs.py:263 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Cloning img from cache for %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/netapp/nfs.py:298 #, python-format -msgid "Glance image %s is in killed state" +msgid "Files to be queued for deletion %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/netapp/nfs.py:305 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/netapp/nfs.py:336 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "Bytes to free %s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/netapp/nfs.py:343 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "Delete file path %s" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/netapp/nfs.py:358 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" -msgstr "" - -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." -msgstr "" - -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +msgid "Deleting file at path %s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/netapp/nfs.py:363 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "Exception during deleting %s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/netapp/nfs.py:395 #, python-format -msgid "httplib error in %s: " +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/netapp/nfs.py:411 #, python-format -msgid "Socket error in %s: " +msgid "Cloning image %s from cache" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/netapp/nfs.py:415 #, python-format -msgid "Type error in %s: " +msgid "Cache share: %s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/netapp/nfs.py:425 #, python-format -msgid "Exception in %s " -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +msgid "Unexpected exception during image cloning in share %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/netapp/nfs.py:431 #, python-format -msgid "Got total of %s instances" +msgid "Cloning image %s directly in share" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/netapp/nfs.py:443 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "Image is raw %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/netapp/nfs.py:450 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "Image will locally be converted to raw %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/netapp/nfs.py:457 #, python-format -msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgid "Converted to raw, but format is now %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/netapp/nfs.py:467 #, python-format -msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +msgid "Performing post clone for %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 -#, python-format -msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 -#, python-format -msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/netapp/nfs.py:482 #, python-format -msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +msgid "Resizing file to %sG" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 -#, python-format -msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 -#, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/netapp/nfs.py:529 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +msgid "Image location not in the expected format %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/netapp/nfs.py:557 #, python-format -msgid "Powering on the VM instance %s" +msgid "Found possible share matches %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 -#, python-format -msgid "Powered on the VM instance %s" +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/netapp/nfs.py:599 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "Extending volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/netapp/nfs.py:710 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "Shares on vserver %s will only be used for provisioning." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 -#, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/netapp/nfs.py:757 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "Exception creating vol %(name)s on share %(share)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/netapp/nfs.py:765 #, python-format -msgid "Uploading image %s" +msgid "Volume %s could not be created on shares." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/netapp/nfs.py:815 #, python-format -msgid "Uploaded image %s" +msgid "No interface found on cluster for ip %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/netapp/nfs.py:856 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/netapp/nfs.py:862 #, python-format -msgid "Deleted temporary vmdk file %s" +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 -#, python-format -msgid "Rebooting guest OS of VM %s" +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "Shortlisted del elg files %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "Getting file usage for %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 #, python-format -msgid "Did hard reboot of VM %s" +msgid "file-usage for path %(path)s is %(bytes)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 #, python-format -msgid "instance - %s not present" +msgid "Share match found for ip %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 #, python-format -msgid "Powering off the VM %s" +msgid "No share match found for ip %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/netapp/nfs.py:1038 #, python-format -msgid "Powered off the VM %s" +msgid "Found volume %(vol)s for share %(share)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/netapp/nfs.py:1129 #, python-format -msgid "Unregistering the VM %s" +msgid "No storage path found for export path %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/netapp/nfs.py:1139 #, python-format -msgid "Unregistered the VM %s" +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/netapp/ssc_utils.py:241 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "Unexpected error while creating ssc vol list. Message - %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/netapp/ssc_utils.py:272 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Exception querying aggr options. %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/netapp/ssc_utils.py:313 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Exception querying sis information. %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/netapp/ssc_utils.py:347 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +msgid "Exception querying mirror information. %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/netapp/ssc_utils.py:421 #, python-format -msgid "Suspending the VM %s " +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/netapp/ssc_utils.py:455 #, python-format -msgid "Suspended the VM %s " +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/netapp/ssc_utils.py:488 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 -#, python-format -msgid "Resuming the VM %s" +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 -#, python-format -msgid "Resumed the VM %s " +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 -#, python-format -msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/netapp/ssc_utils.py:623 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 -#, python-format -msgid "Creating directory with path %s" +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/netapp/utils.py:124 #, python-format -msgid "Created directory with path %s" +msgid "Failed to invoke ems. Message : %s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 -#, python-format -msgid "Downloading image %s from glance image server" +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 -#, python-format -msgid "Downloaded image %s from glance image server" +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 -#, python-format -msgid "Uploading image %s to the Glance image server" +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/nexenta/iscsi.py:99 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "Volume %s does not exist in Nexenta SA" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/nexenta/iscsi.py:150 #, python-format -msgid "Getting image size for the image %s" +msgid "Extending volume: %(id)s New size: %(size)s GB" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/nexenta/iscsi.py:166 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" -msgstr "" - -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" +msgid "Volume %s does not exist, it seems it was already deleted." msgstr "" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/nexenta/iscsi.py:179 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "Cannot delete snapshot %(origin): %(exc)s" msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/nexenta/iscsi.py:190 #, python-format -msgid "Calling %(localname)s %(impl)s" +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 #, python-format -msgid "Calling getter %s" +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/nexenta/iscsi.py:223 #, python-format -msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/nexenta/iscsi.py:250 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Remote NexentaStor appliance at %s should be SSH-bound." msgstr "" -#: cinder/virt/xenapi/host.py:169 -#, fuzzy -msgid "Unable to get updated status" -msgstr "Nelze vytvořit typ instance" - -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/nexenta/iscsi.py:267 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/nexenta/iscsi.py:275 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/nexenta/iscsi.py:281 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/nexenta/iscsi.py:318 #, python-format -msgid "Found no network for bridge %s" +msgid "Snapshot %s does not exist, it seems it was already deleted." msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "Ignored target creation error \"%s\" while ensuring export" msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/nexenta/iscsi.py:449 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Ignored target group creation error \"%s\" while ensuring export" msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/nexenta/iscsi.py:461 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "Ignored target group member addition error \"%s\" while ensuring export" msgstr "" -#: cinder/virt/xenapi/pool.py:146 +#: cinder/volume/drivers/nexenta/iscsi.py:471 #, python-format -msgid "Unable to join %(host)s in the pool" +msgid "Ignored LU creation error \"%s\" while ensuring export" msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/nexenta/iscsi.py:481 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "Nelze použít globální roli %(role_id)s" - -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/nexenta/iscsi.py:514 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/nexenta/iscsi.py:522 #, python-format -msgid "Found no PIF for device %s" +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Sending JSON data: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +#, fuzzy +msgid "Bad response from server" +msgstr "Špatná odpověď od SolidFire API" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 #, python-format -msgid "VBD not found in instance %s" +msgid "Auto switching to HTTPS connection to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 #, fuzzy, python-format -msgid "VBD %s already detached" -msgstr "skupina %s již existuje" +msgid "Got response: %s" +msgstr "odpověď %s" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/nexenta/nfs.py:85 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/nexenta/nfs.py:89 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/nexenta/nfs.py:114 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Creating folder on Nexenta Store %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/nexenta/nfs.py:146 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/nexenta/nfs.py:176 #, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/nexenta/nfs.py:227 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Folder %s does not exist, it was already deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Snapshot %s does not exist, it was already deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/nexenta/nfs.py:302 #, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +msgid "Creating regular file: %s.This may take some time." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/nexenta/nfs.py:313 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "Regular file: %s created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/nexenta/nfs.py:365 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Sharing folder %s on Nexenta Store" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 -#, fuzzy, python-format -msgid "No primary VDI found for %(vm_ref)s" -msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." - -#: cinder/virt/xenapi/vm_utils.py:379 +#: cinder/volume/drivers/nexenta/nfs.py:393 #, python-format -msgid "Snapshotting with label '%(label)s'" +msgid "Shares loaded: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/nexenta/utils.py:46 #, python-format -msgid "Created snapshot %(template_vm_ref)s" +msgid "Invalid value: \"%s\"" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/san/hp_lefthand.py:93 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "CLIQ command returned %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/san/hp_lefthand.py:99 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/san/hp_lefthand.py:107 #, python-format -msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/san/hp_lefthand.py:137 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/san/hp_lefthand.py:190 #, python-format -msgid "download_vhd failed: %r" +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/san/hp_lefthand.py:246 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "Snapshot info: %(name)s => %(attributes)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 -#, python-format -msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 -#, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 -#, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/san/solaris.py:79 #, python-format -msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +msgid "Cannot parse list-view output: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/san/solaris.py:174 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "Invalid hp3parclient version. Version %s or greater required." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 #, python-format -msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" +msgid "CPG (%s) doesn't exist on array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -#, fuzzy -msgid "Failed to fetch glance image" -msgstr "Nelze restartovat instanci" - -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Error extending volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "command %s failed" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Při spuštění příkazu došlo k nečekané chybě." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 #, python-format -msgid "VDI %s is still available" +msgid "VV Set %s does not exist." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "skupina %s již existuje" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "Failure in update_volume_key_value_pair:%s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 #, python-format -msgid "Re-scanning SR %s" +msgid "Failure in clear_volume_key_value_pair:%s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "Error attaching volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/vmware/api.py:172 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "Error while logging out the user: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/vmware/api.py:218 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/vmware/api.py:262 #, python-format -msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +msgid "Task %s status: success." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "Task: %(task)s failed with error: %(err)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 -#, python-format -msgid "Invalid statistics data from Xenserver: %s" +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 -#, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/vmware/api.py:304 #, python-format -msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +msgid "Error: unknown lease state %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/vmware/io_util.py:51 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/vmware/io_util.py:56 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "Completed transfer of size %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/vmware/io_util.py:102 #, python-format -msgid "Plugging VBD %s ... " +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/vmware/io_util.py:117 #, python-format -msgid "Plugging VBD %s done." +msgid "Glance image: %s is now active." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/vmware/io_util.py:123 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "Glance image: %s is in killed state." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/vmware/io_util.py:132 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "Glance image %(id)s is in unknown state - %(state)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 +#: cinder/volume/drivers/vmware/read_write_util.py:171 #, python-format -msgid "Destroying VBD for VDI %s ... " +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 -#, python-format -msgid "Destroying VBD for VDI %s done." +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/vmware/read_write_util.py:206 #, python-format -msgid "Running pygrub against %s" +msgid "Opening vmdk url: %s for write." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/vmware/read_write_util.py:231 #, python-format -msgid "Found Xen kernel %s" +msgid "Written %s bytes to vmdk." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "Lease is already in state: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/vmware/read_write_util.py:295 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "Opening vmdk url: %s for read." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/vmware/read_write_util.py:307 #, python-format -msgid "Writing partition table %s done." +msgid "Read %s bytes from vmdk." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/vmware/vim.py:150 #, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/vmware/vim.py:189 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 -msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/vmware/vmdk.py:112 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Returning spec value %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:115 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Invalid spec value: %s specified." msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/vmware/vmdk.py:118 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "Returning default spec value: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -#, fuzzy -msgid "Starting instance" -msgstr "Instance budou spuštěny" - -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -#, fuzzy -msgid "Failed to spawn, rolling back" -msgstr "Nelze pozastavit instanci" - -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/vmware/vmdk.py:287 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/vmware/vmdk.py:293 #, python-format -msgid "Injecting file path: '%s'" +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/vmware/vmdk.py:441 #, python-format -msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +msgid "The instance: %s for which initialize connection is called, exists." msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/vmware/vmdk.py:448 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "There is no backing for the volume: %s. Need to create one." msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/vmware/vmdk.py:470 #, python-format -msgid "Instance agent version: %s" +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/vmware/vmdk.py:518 #, python-format -msgid "Updating Agent to %s" +msgid "Snapshot of volume not supported in state: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/vmware/vmdk.py:523 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "There is no backing, so will not create snapshot: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -#, fuzzy, python-format -msgid "Starting snapshot for VM" -msgstr "Vytvořit snímek svazku %s" - -#: cinder/virt/xenapi/vmops.py:686 -#, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "Nelze vytvořit typ instance" - -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/vmware/vmdk.py:554 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "There is no backing, and so there is no snapshot: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:893 +#: cinder/volume/drivers/vmware/vmdk.py:558 #, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "Successfully deleted snapshot: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/vmware/vmdk.py:603 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/vmware/vmdk.py:633 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/vmware/vmdk.py:678 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "Fetching glance image: %(id)s to server: %(host)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "Done copying image: %(id)s to volume: %(vol)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/vmware/vmdk.py:872 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "Done copying volume %(vol)s to a new image %(img)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/vmware/vmdk.py:922 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/vmware/vmdk.py:974 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/vmware/vmdk.py:1010 #, python-format msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 -#, fuzzy, python-format -msgid "Instance %(instance_uuid)s not found" -msgstr "Instance %(instance_id)s nemohla být nastavena." - -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/vmware/vmware_images.py:94 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "Downloading image: %s from glance image server as a flat vmdk file." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "Downloaded image: %s from glance image server." msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 +#: cinder/volume/drivers/vmware/volumeops.py:87 #, python-format -msgid "Creating VIF for network %(network_ref)s" +msgid "Did not find any backing with name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/vmware/volumeops.py:94 #, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgid "Deleting the VM backing: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/vmware/volumeops.py:99 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "Deleted the VM backing: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/vmware/volumeops.py:237 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "There are no valid datastores attached to %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/vmware/volumeops.py:289 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/vmware/volumeops.py:306 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "Child folder already present: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 +#: cinder/volume/drivers/vmware/volumeops.py:314 #, python-format -msgid "OpenSSL error: %s" +msgid "Created child folder: %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/vmware/volumeops.py:383 #, python-format -msgid "type is = %s" +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/vmware/volumeops.py:395 #, python-format -msgid "name = %s" +msgid "Initiated creation of volume backing: %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/vmware/volumeops.py:398 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "Successfully created volume backing: %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/vmware/volumeops.py:462 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Initiated relocation of volume backing: %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/vmware/volumeops.py:494 #, python-format -msgid "Forgetting SR %s..." +msgid "Snapshoting backing: %(backing)s with name: %(name)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/vmware/volumeops.py:505 #, python-format -msgid "Introducing %s..." +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/vmware/volumeops.py:553 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/vmware/volumeops.py:558 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/vmware/volumeops.py:565 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/vmware/volumeops.py:569 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/vmware/volumeops.py:597 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "Spec for cloning the backing: %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/vmware/volumeops.py:613 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/vmware/volumeops.py:627 #, python-format -msgid "Error finding vdis in SR %s" +msgid "Initiated clone of backing: %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/vmware/volumeops.py:638 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "Deleting file: %(file)s under datacenter: %(dc)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/vmware/volumeops.py:646 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "Initiated deletion via task: %s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/vmware/volumeops.py:648 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "Successfully deleted file: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/vmware/volumeops.py:711 #, python-format -msgid "Creating SR %s" -msgstr "" - -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +msgid "Initiated copying disk data via task: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/vmware/volumeops.py:722 #, python-format -msgid "Introducing SR %s" +msgid "Deleting vmdk file: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/windows/windows.py:102 #, python-format -msgid "Checking for SR %s" +msgid "Creating folder %s " msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/windows/windows_utils.py:47 #, python-format -msgid "SR %s not found in the xapi database" +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/windows/windows_utils.py:63 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 +#: cinder/volume/drivers/windows/windows_utils.py:73 #, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/windows/windows_utils.py:105 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/windows/windows_utils.py:123 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/windows/windows_utils.py:139 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/windows/windows_utils.py:157 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/windows/windows_utils.py:177 #, python-format -msgid "Unable to locate volume %s" +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/windows/windows_utils.py:193 #, python-format -msgid "Unable to detach volume %s" +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/windows/windows_utils.py:208 #, python-format -msgid "Unable to destroy vbd %s" +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/windows/windows_utils.py:223 #, python-format -msgid "Error purging SR %s" +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/windows/windows_utils.py:240 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/windows/windows_utils.py:255 #, python-format -msgid "Error in handshake: %s" +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/windows/windows_utils.py:273 #, python-format -msgid "Invalid request: %s" +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/windows/windows_utils.py:290 #, python-format -msgid "Request: %s" +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/flows/common.py:52 #, python-format -msgid "Request made with missing token: %s" +msgid "Restoring source %(source_volid)s status to %(status)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/flows/common.py:58 #, python-format -msgid "Request made with invalid token: %s" +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/flows/common.py:83 #, python-format -msgid "Unexpected error: %s" +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "Failed updating volume %(volume_id)s with %(update)s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" -msgstr "" - -#: cinder/volume/api.py:85 +#: cinder/volume/flows/api/create_volume.py:81 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "Originating snapshot status must be one of %s values" msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "Obraz musí být dostupný" - -#: cinder/volume/api.py:142 +#: cinder/volume/flows/api/create_volume.py:103 #, python-format -msgid "Volume still has %d dependent snapshots" -msgstr "" - -#: cinder/volume/api.py:223 -msgid "already attached" +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." msgstr "" -#: cinder/volume/api.py:325 -msgid "Volume Snapshot status must be available or error" +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/flows/api/create_volume.py:186 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/flows/api/create_volume.py:194 #, python-format -msgid "volume group %s doesn't exist" +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/flows/api/create_volume.py:212 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "Metadata property key %s greater than 255 characters" msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/flows/api/create_volume.py:217 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "Metadata property key %s value greater than 255 characters" msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/flows/api/create_volume.py:254 #, python-format -msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +msgid "Availability zone '%s' is invalid" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" msgstr "" -#: cinder/volume/driver.py:384 -#, python-format -msgid "Could not find iSCSI export for volume %s" +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" msgstr "" -#: cinder/volume/driver.py:388 -#, python-format -msgid "ISCSI Discovery: Found %s" +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/flows/api/create_volume.py:463 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "Failed destroying volume entry %s" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/flows/api/create_volume.py:546 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Failed rolling back quota for %s reservations" msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/flows/api/create_volume.py:590 #, python-format -msgid "rbd has no pool %s" +msgid "Failed to update quota for deleting volume: %s" msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 #, python-format -msgid "Sheepdog is not working: %s" +msgid "Volume %s: create failed" msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/flows/manager/create_volume.py:105 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/flows/manager/create_volume.py:124 #, python-format -msgid "Re-exporting %s volumes" +msgid "Volume %s: re-scheduled" msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/flows/manager/create_volume.py:141 #, python-format -msgid "volume %s: skipping export" +msgid "Updating volume %(volume_id)s with %(update)s." msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/flows/manager/create_volume.py:146 #, python-format -msgid "volume %s: creating" +msgid "Volume %s: resetting 'creating' status failed." msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/flows/manager/create_volume.py:165 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "Volume %s: rescheduling failed" msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/flows/manager/create_volume.py:308 #, python-format -msgid "volume %s: creating export" +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/flows/manager/create_volume.py:345 #, python-format -msgid "volume %s: created successfully" +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/flows/manager/create_volume.py:418 #, python-format -msgid "volume %s: removing export" +msgid "Marking volume %s as bootable." msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/flows/manager/create_volume.py:421 #, python-format -msgid "volume %s: deleting" +msgid "Failed updating volume %(volume_id)s bootable flag to true" msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/flows/manager/create_volume.py:448 #, python-format -msgid "volume %s: volume is busy" +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 #, python-format -msgid "volume %s: deleted successfully" +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/flows/manager/create_volume.py:461 #, python-format -msgid "snapshot %s: creating" +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/flows/manager/create_volume.py:475 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/flows/manager/create_volume.py:512 #, python-format -msgid "snapshot %s: created successfully" +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/flows/manager/create_volume.py:526 #, python-format -msgid "snapshot %s: deleting" +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." msgstr "" -#: cinder/volume/manager.py:214 +#: cinder/volume/flows/manager/create_volume.py:552 #, python-format -msgid "snapshot %s: snapshot is busy" +msgid "Failed updating volume %(volume_id)s with %(updates)s" msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/flows/manager/create_volume.py:574 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "Unable to create volume. Volume driver %s not initialized" msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/flows/manager/create_volume.py:611 #, python-format -msgid "New capabilities found: %s" +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/flows/manager/create_volume.py:633 #, python-format -msgid "Notification {%s} received" +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" msgstr "" -#: cinder/volume/netapp.py:79 -#, fuzzy, python-format -msgid "API %(name)sfailed: %(reason)s" -msgstr "Obraz %(image_id)s je nepřijatelný: %(reason)s" - -#: cinder/volume/netapp.py:109 +#: cinder/volume/flows/manager/create_volume.py:680 #, python-format -msgid "%s is not set" +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" msgstr "" -#: cinder/volume/netapp.py:128 -#, fuzzy -msgid "Connected to DFM server" -msgstr "Znovu připojeno k frontě" +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" -#: cinder/volume/netapp.py:159 -#, fuzzy, python-format -msgid "Job failed: %s" -msgstr "Vyvoláno Nenalezeno: %s" +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" -msgstr "" +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "Obraz musí být dostupný" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "Zachycena chyba: %s" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Při spuštění příkazu došlo k nečekané chybě." + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "Vytvořit svazek ze snímku %s" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "Získávání %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "Adresa místního spojení nenalezena.: %s" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "Nelze získat IP místního spojení %(interface)s :%(ex)s" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "%(name)s spuštěno v %(host)s:%(port)s" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "Nelze získat metadata pro ip: %s" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "%(code)s: %(message)s" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "Nelze použít globální roli %(role_id)s" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "Chyba přesunu" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "odpověď %s" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "Nelze restartovat instanci" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "Vytvořit svazek ze snímku %s" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 -#, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "" +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" -msgstr "" +#~ msgid "Error populating default encryption types!" +#~ msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, python-format -msgid "No LUN ID for volume %s" -msgstr "" +#~ msgid "Unexpected error while running command." +#~ msgstr "Při spuštění příkazu došlo k nečekané chybě." -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, fuzzy, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "Nelze získat metadata pro ip: %s" +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" -#: cinder/volume/netapp.py:614 -#, fuzzy, python-format -msgid "Failed to get host details for host ID %s" -msgstr "Nelze získat metadata pro ip: %s" +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:620 -#, fuzzy, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "Nelze získat metadata pro ip: %s" +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:625 -#, fuzzy, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "Nelze získat metadata pro ip: %s" +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Connection to glance failed" +#~ msgstr "Připojení k glance selhalo" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid snapshot" +#~ msgstr "Neplatný snímek" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid input received" +#~ msgstr "Obdržen neplatný vstup" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid volume type" +#~ msgstr "Neplatný typ svazku" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "Neplatný svazek" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "Neplatný snímek" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "Neplatná metadata" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "Neplatný klíč metadata" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "Chyba přesunu" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "Kvóta překročena" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "Připojení k glance selhalo" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "ladění ve zpětném volání: %s" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "Očekáván objekt typu: %s" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "timefunc: '%(name)s' trvalo %(total_time).2f sek" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "Instance neexistuje" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "Instance nenalezena" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "Vytvořit snímek svazku %s" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -#, fuzzy -msgid "Bad response from server" -msgstr "Špatná odpověď od SolidFire API" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, fuzzy, python-format -msgid "Got response: %s" -msgstr "odpověď %s" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#~ msgid "Cinder access parameters were not specified." -#~ msgstr "Přístupové parametry Cinder nebyly zadány." +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." -#~ msgstr "Pole virtuálního úložiště %(id)d nebylo nalezeno." +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." -#~ msgstr "Pole virtuálního úložiště %(name)s nebylo nalezeno." +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "The request is invalid." +#~ msgstr "Požadavek je neplatný." + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#~ msgid "No disk at %(location)s" +#~ msgstr "Źádný disk ve %(location)s" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "Třída %(class_name)s nemohla být nalezena: %(exception)s" + +#~ msgid "Action not allowed." +#~ msgstr "Činnost není povolena." + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "Dvojice klíčů %(key_name)s již existuje." + +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "topic is %s" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "message %s" -#~ msgstr "zpráva %s" +#~ msgid "Create snapshot error." +#~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Publishing to route %s" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Declaring queue %s" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "Declaring exchange %s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "zfs send/recv done, new volume %s created" #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "rbd export-diff failed - %s" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Invalid request body" +#~ msgstr "Neplatné tělo požadavku" + +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "Created VM %s..." +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Can't find lun or lun goup in array" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgid "in looping call" +#~ msgstr "v opakujícím volání" + +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "Could not find another host" +#~ msgstr "Nelze najít %s v požadavku." + +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "migrate_volume: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "Přesun %(migration_id)s nemohl být nalezen." + +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "Instance %(instance_id)s nemohla být nastavena." + +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "_wait_child %d" #~ msgstr "" +#~ msgid "wait wrap.failed %s" +#~ msgstr "Vyvoláno Nenalezeno: %s" + #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "Nastavování hostitele %(host)s na %(state)s." + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "mazání svazku %(volume_name)s který má snímek" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "volume %s mapping to multi host" #~ msgstr "" #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" #~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "Volume status must be available" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" -#~ msgstr "" - -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" -#~ msgstr "" - -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/da/LC_MESSAGES/cinder.po b/cinder/locale/da/LC_MESSAGES/cinder.po index d3e0765d17..3688a95563 100644 --- a/cinder/locale/da/LC_MESSAGES/cinder.po +++ b/cinder/locale/da/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2011-01-15 21:46+0000\n" "Last-Translator: Soren Hansen \n" "Language-Team: Danish \n" @@ -15,8189 +15,10721 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." msgstr "" -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Filnavn for privatnøgle" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/crypto.py:57 -msgid "Where we keep our keys" +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:67 +#: cinder/exception.py:120 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." msgstr "" -#: cinder/crypto.py:72 +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" +msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/crypto.py:292 +#: cinder/exception.py:137 #, python-format -msgid "Flags path: %s" +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." msgstr "" -#: cinder/exception.py:56 -msgid "Unexpected error while running command." +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." msgstr "" -#: cinder/exception.py:59 +#: cinder/exception.py:150 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +msgid "Invalid snapshot: %(reason)s" msgstr "" -#: cinder/exception.py:94 -msgid "DB exception wrapped." +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:155 -msgid "An unknown exception occurred." +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" +#: cinder/exception.py:163 +msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" +#: cinder/exception.py:167 +msgid "The results are invalid." msgstr "" -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:195 -msgid "Connection to glance failed" +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:203 -msgid "Not authorized." +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:208 -msgid "User does not have admin privileges" +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" msgstr "" -#: cinder/exception.py:212 +#: cinder/exception.py:197 #, python-format -msgid "Policy doesn't allow %(action)s to be performed." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:216 +#: cinder/exception.py:201 #, python-format -msgid "Not authorized for image %(image_id)s." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:220 -msgid "Unacceptable parameters." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." msgstr "" -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:214 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: cinder/exception.py:237 -msgid "Failed to load data into json format" +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:245 +#: cinder/exception.py:242 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:253 +#: cinder/exception.py:250 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:264 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" #: cinder/exception.py:269 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:274 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:277 +#: cinder/exception.py:278 #, python-format -msgid "Invalid cidr %(cidr)s." +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:291 #, python-format -msgid "%(err)s" +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:295 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:299 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:303 #, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:307 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:311 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:315 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:334 -msgid "Failed to terminate instance" +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:348 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:373 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "Malformed message body: %(reason)s" msgstr "" #: cinder/exception.py:377 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "Could not find config at %(path)s" msgstr "" #: cinder/exception.py:381 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "Could not find parameter %(param)s" msgstr "" #: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" #: cinder/exception.py:389 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:398 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:402 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:409 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:415 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:419 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:423 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." -msgstr "" - -#: cinder/exception.py:422 -msgid "Resource could not be found." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" #: cinder/exception.py:427 #, python-format -msgid "Required flag %(flag)s not set." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:432 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:436 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" #: cinder/exception.py:440 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" #: cinder/exception.py:444 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" #: cinder/exception.py:449 -msgid "Zero volume types found." +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" #: cinder/exception.py:453 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Failed to export for volume: %(reason)s" msgstr "" #: cinder/exception.py:457 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:461 #, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:465 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:469 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Failed to copy image to volume: %(reason)s" msgstr "" -#: cinder/exception.py:475 -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:480 -#, python-format -msgid "No target id found for volume %(volume_id)s." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:484 -#, python-format -msgid "No disk at %(location)s" +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:485 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:493 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Invalid backup: %(reason)s" msgstr "" -#: cinder/exception.py:496 -msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" msgstr "" #: cinder/exception.py:501 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Transfer %(transfer_id)s could not be found." msgstr "" #: cinder/exception.py:505 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" #: cinder/exception.py:509 #, python-format -msgid "User %(user_id)s could not be found." +msgid "SSH command injection detected: %(command)s" msgstr "" #: cinder/exception.py:513 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" #: cinder/exception.py:517 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:522 #, python-format -msgid "Role %(role_id)s could not be found." -msgstr "" - -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:529 +#: cinder/exception.py:527 #, python-format -msgid "%(req)s is required to create a network." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:531 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:536 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" #: cinder/exception.py:541 #, python-format -msgid "Network could not be found for uuid %(uuid)s" +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:546 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:550 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:557 -#, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:562 -#, python-format -msgid "Host is not set to the network (%(network_id)s)." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:566 -#, python-format -msgid "Network %(network)s has active ports, cannot delete." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:576 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:580 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:584 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:586 -#, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:593 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "Unable to create server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:597 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:599 -#, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:605 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:609 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:620 -#, python-format -msgid "Floating ip not found for id %(id)s." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:626 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:630 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:636 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:641 +msgid "Unknown NFS exception" msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:644 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Floating ip %(address)s is not associated." -msgstr "" - -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:652 -#, python-format -msgid "Interface %(interface)s not found." +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" msgstr "" -#: cinder/exception.py:656 -#, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:660 -#, python-format -msgid "Certificate %(certificate_id)s not found." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:668 -#, python-format -msgid "Host %(host)s could not be found." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:672 +#: cinder/quota.py:105 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:676 +#: cinder/quota.py:748 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:680 +#: cinder/quota.py:770 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:684 +#: cinder/quota.py:790 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:692 -#, python-format -msgid "Quota for project %(project_id)s could not be found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:696 +#: cinder/quota_utils.py:46 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:700 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:704 +#: cinder/service.py:95 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:709 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Security group with rule %(rule_id)s not found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:713 +#: cinder/service.py:148 #, python-format msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:718 -#, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" msgstr "" -#: cinder/exception.py:723 -#, python-format -msgid "Migration %(migration_id)s could not be found." +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." msgstr "" -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +#: cinder/service.py:270 +msgid "Recovered model server connection!" msgstr "" -#: cinder/exception.py:732 -#, python-format -msgid "Console pool %(pool_id)s could not be found." +#: cinder/service.py:276 +msgid "model server went away" msgstr "" -#: cinder/exception.py:736 +#: cinder/service.py:298 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:742 -#, python-format -msgid "Console %(console_id)s could not be found." +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:746 -#, python-format -msgid "Console for instance %(instance_id)s could not be found." +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" msgstr "" -#: cinder/exception.py:750 +#: cinder/service.py:387 #, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:755 +#: cinder/utils.py:96 #, python-format -msgid "Invalid console type %(console_type)s " +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:763 -#, python-format -msgid "Instance type %(instance_type_id)s could not be found." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:767 +#: cinder/utils.py:228 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "Error connecting via ssh: %s" msgstr "" -#: cinder/exception.py:772 +#: cinder/utils.py:412 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "Invalid backend: %s" msgstr "" -#: cinder/exception.py:776 +#: cinder/utils.py:423 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "backend %s" msgstr "" -#: cinder/exception.py:780 +#: cinder/utils.py:698 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:784 +#: cinder/utils.py:759 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:789 +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 #, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +msgid "Unable to find cert_file : %s" msgstr "" -#: cinder/exception.py:793 +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgid "Unable to find ca_file : %s" msgstr "" -#: cinder/exception.py:798 +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 #, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Unable to find key_file : %s" msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:807 +#: cinder/wsgi.py:169 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:811 +#: cinder/wsgi.py:206 #, python-format -msgid "LDAP group %(group_id)s could not be found." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:815 -#, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:819 -#, python-format -msgid "File %(file_path)s could not be found." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/wsgi.py:313 +msgid "You must implement __call__" msgstr "" -#: cinder/exception.py:827 -#, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:832 -#, python-format -msgid "Network adapter %(adapter)s could not be found." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:844 -#, python-format -msgid "Unable to use global role %(role_id)s" +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:861 +#: cinder/api/common.py:189 #, python-format -msgid "Key pair %(key_name)s already exists." +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:865 -#, python-format -msgid "User %(user)s already exists." +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:869 +#: cinder/api/extensions.py:197 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:873 +#: cinder/api/extensions.py:235 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:877 +#: cinder/api/extensions.py:236 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:882 +#: cinder/api/extensions.py:237 #, python-format -msgid "Project %(project)s already exists." +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:886 +#: cinder/api/extensions.py:239 #, python-format -msgid "Instance %(name)s already exists." +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:890 +#: cinder/api/extensions.py:240 #, python-format -msgid "Instance Type %(name)s already exists." +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:894 +#: cinder/api/extensions.py:242 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:898 +#: cinder/api/extensions.py:256 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:906 +#: cinder/api/extensions.py:276 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:910 +#: cinder/api/extensions.py:278 #, python-format -msgid "Could not find config at %(path)s" +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/extensions.py:287 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:938 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" msgstr "" -#: cinder/exception.py:958 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +msgid "Delete backup with id: %s" msgstr "" -#: cinder/exception.py:963 +#: cinder/api/contrib/backups.py:185 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +msgid "Creating new backup %s" msgstr "" -#: cinder/exception.py:967 -#, python-format -msgid "Aggregate %(aggregate_name)s already exists." +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/exception.py:971 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/exception.py:975 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/exception.py:980 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/exception.py:984 -#, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/exception.py:988 -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/exception.py:992 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" - -#: cinder/exception.py:997 -msgid "Unable to create instance type" +msgid "Host '%s' could not be found." msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" msgstr "" -#: cinder/exception.py:1005 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Error in SolidFire API response: status=%(status)s" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/exception.py:1009 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Error in SolidFire API response: data=%(data)s" +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/exception.py:1013 -#, python-format -msgid "Detected existing vlan with id %(vlan)d" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/exception.py:1017 -#, python-format -msgid "Instance %(instance_id)s could not be found." +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/exception.py:1025 -#, python-format -msgid "Could not fetch image %(image)s" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/log.py:315 -#, python-format -msgid "syslog facility must be one of: %s" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/manager.py:146 -#, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/service.py:177 +#: cinder/api/contrib/quotas.py:111 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/service.py:195 -#, python-format -msgid "Creating Consumer connection for Service %s" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -#: cinder/service.py:340 -msgid "model server went away" +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -#: cinder/service.py:440 -#, python-format -msgid "%(flag)s : FLAG SET " +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/utils.py:220 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "Running cmd (subprocess): %s" +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:534 -#, python-format -msgid "Link Local address is not found.:%s" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/utils.py:537 -#, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:648 -#, python-format -msgid "Invalid backend: %s" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/utils.py:659 -#, python-format -msgid "backend %s" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/utils.py:935 +#: cinder/api/contrib/volume_transfer.py:147 #, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgid "Creating new volume transfer %s" msgstr "" -#: cinder/utils.py:942 +#: cinder/api/contrib/volume_transfer.py:162 #, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgid "Creating transfer of volume %s" msgstr "" -#: cinder/utils.py:1001 +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/utils.py:1008 +#: cinder/api/contrib/volume_transfer.py:196 #, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgid "Accepting transfer %s" msgstr "" -#: cinder/utils.py:1023 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/utils.py:1169 -#, python-format -msgid "Invalid server_string: %s" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/utils.py:1298 +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgid "Valid control location are: %s" msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1495 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/utils.py:1497 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "Function %(name)s in %(location)s is deprecated" +msgid "Caught error: %s" msgstr "" -#: cinder/utils.py:1681 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Could not remove tmpdir: %s" +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/wsgi.py:97 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/wsgi.py:117 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgid "Extended resource: %s" msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/api/direct.py:299 -#, python-format -msgid "Returned non-serializeable type: %s" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/api/validator.py:142 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgid "Exception handling resource: %s" msgstr "" -#: cinder/api/ec2/__init__.py:73 +#: cinder/api/openstack/wsgi.py:682 #, python-format -msgid "%(code)s: %(message)s" +msgid "Fault thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:95 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "FaultWrapper: %s" +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:180 +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "Authentication Failure: %s" +msgid "%(url)s returned a fault: %(e)s" msgstr "" -#: cinder/api/ec2/__init__.py:404 -#, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/__init__.py:435 -#, python-format -msgid "action: %s" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/__init__.py:437 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/__init__.py:584 -#, python-format -msgid "InstanceNotFound raised: %s" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" msgstr "" -#: cinder/api/ec2/__init__.py:590 -#, python-format -msgid "VolumeNotFound raised: %s" +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/__init__.py:596 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "SnapshotNotFound raised: %s" +msgid "Delete snapshot with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" msgstr "" -#: cinder/api/ec2/__init__.py:605 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "EC2APIError raised: %s" +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "KeyPairExists raised: %s" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/__init__.py:617 -#, python-format -msgid "InvalidParameterValue raised: %s" +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" msgstr "" -#: cinder/api/ec2/__init__.py:621 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "InvalidPortRange raised: %s" +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "NotAuthorized raised: %s" +msgid "Delete volume with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/__init__.py:633 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "QuotaError raised: %s" +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:637 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:646 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "Unexpected error raised: %s" +msgid "Create volume of %s GB" msgstr "" -#: cinder/api/ec2/__init__.py:647 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "Environment: %s" +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/apirequest.py:64 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/cloud.py:336 -#, python-format -msgid "Create snapshot of volume %s" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:372 -#, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" msgstr "" -#: cinder/api/ec2/cloud.py:378 -#, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/ec2/cloud.py:382 -#, python-format -msgid "Create key pair %s" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" msgstr "" -#: cinder/api/ec2/cloud.py:391 -#, python-format -msgid "Import key %s" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Delete key pair %s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 -#, python-format -msgid "Revoke security group ingress %s" +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, python-format -msgid "%s Not enough parameters to build a valid rule" +#: cinder/backup/api.py:140 +msgid "Backup status must be available" msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/backup/api.py:154 #, python-format -msgid "Authorize security group ingress %s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:725 -#, python-format -msgid "%s - This rule already exists in group" +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" msgstr "" -#: cinder/api/ec2/cloud.py:769 +#: cinder/backup/api.py:176 #, python-format msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/backup/api.py:181 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/backup/manager.py:100 #, python-format -msgid "group %s already exists" +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/backup/manager.py:107 #, python-format -msgid "Delete security group %s" +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#: cinder/backup/manager.py:117 #, python-format -msgid "Get console output for instance %s" +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/ec2/cloud.py:894 +#: cinder/backup/manager.py:123 #, python-format -msgid "Create volume from snapshot %s" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/backup/manager.py:129 #, python-format -msgid "Create volume of %s GB" +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/backup/manager.py:154 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "Registering default backend %s." msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/backup/manager.py:165 #, python-format -msgid "Detach volume %s" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/ec2/cloud.py:959 -msgid "Detach Volume Failed." +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/backup/manager.py:189 #, python-format -msgid "attribute not supported: %s" +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/backup/manager.py:194 #, python-format -msgid "vol = %s\n" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/ec2/cloud.py:1267 +#: cinder/backup/manager.py:212 #, python-format -msgid "Release address %s" +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/manager.py:217 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/manager.py:225 #, python-format -msgid "Disassociate address %s" +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/manager.py:282 #, python-format -msgid "Reboot instance %r" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/manager.py:310 #, python-format -msgid "De-registering image %s" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/manager.py:360 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/manager.py:422 #, python-format -msgid "Updating image %s publicity" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/ec2/cloud.py:1555 -#, python-format -msgid "Not allowed to modify attributes for image %s" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/drivers/ceph.py:147 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "invalid user '%s'" msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 -#, python-format -msgid "Failed to get metadata for ip: %s" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "Caught error: %s" +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/drivers/ceph.py:250 #, python-format -msgid "Extended resource: %s" +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/drivers/ceph.py:361 #, python-format -msgid "Could not find %s in request." +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "Successfully authenticated '%s'" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "marker [%s] not found" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "href %s does not contain version" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "Converting nw_info: %s" +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/drivers/ceph.py:528 #, python-format -msgid "Converted networks: %s" +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/drivers/ceph.py:543 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:555 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/extensions.py:188 -#, python-format -msgid "Loaded extension: %s" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "Ext name: %s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:226 -#, python-format -msgid "Ext alias: %s" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "Ext description: %s" +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/drivers/ceph.py:697 #, python-format -msgid "Ext namespace: %s" +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/drivers/ceph.py:704 #, python-format -msgid "Ext updated: %s" +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/ceph.py:708 #, python-format -msgid "Exception loading extension: %s" +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/drivers/ceph.py:713 #, python-format -msgid "Loading extension %s" +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:252 -#, python-format -msgid "Calling extension factory %s" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:344 -#, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "Exception handling resource: %s" +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/ceph.py:916 #, python-format -msgid "Fault thrown: %s" +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/wsgi.py:589 -#, python-format -msgid "HTTP exception thrown: %s" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:1005 #, python-format -msgid "There is no such action: %s" +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:1039 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/swift.py:209 #, python-format msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" -msgstr "" - -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" -msgstr "" - -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/swift.py:219 #, python-format -msgid "Invalid server status: %(status)s" +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Bad personality format: missing %s" +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/swift.py:304 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 -#, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:345 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/swift.py:350 #, python-format -msgid "Bad network format: missing %s" +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/swift.py:455 #, python-format -msgid "Error in confirm-resize %s" +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "Error in revert-resize %s" +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/tsm.py:143 #, python-format -msgid "Error in reboot %s" +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/brick/exception.py:93 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/brick/exception.py:97 #, python-format -msgid "Compute.api::pause %s" +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/brick/exception.py:101 #, python-format -msgid "Compute.api::unpause %s" +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/brick/exception.py:105 #, python-format -msgid "compute.api::suspend %s" +msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/brick/exception.py:109 #, python-format -msgid "compute.api::resume %s" +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/brick/exception.py:113 #, python-format -msgid "Error in migrate %s" +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/brick/exception.py:117 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/brick/initiator/connector.py:127 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Compute.api::lock %s" +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/brick/initiator/connector.py:229 #, python-format -msgid "Compute.api::unlock %s" +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "createBackup entity requires %s attribute" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 -#, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "Aggregates does not have %s action" +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/brick/initiator/connector.py:858 msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "No more floating ips in pool %s." -msgstr "" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Invalid status: '%s'" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Invalid mode: '%s'" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/iscsi/iscsi.py:280 #, python-format -msgid "Invalid update setting: '%s'" +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Key pair '%s' already exists." +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Unable to find address %r" +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "Network does not have %s action" +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "Disassociating network with id %s" +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 -#, python-format -msgid "Showing network with id %s" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/local_dev/lvm.py:370 #, python-format -msgid "Deleting network with id %s" +msgid "Unable to find VG: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 -#, python-format -msgid "Security group %s already exists" +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 -#, python-format -msgid "Security group %s is not a string or unicode" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Security group %s cannot be empty." +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 -#, python-format -msgid "Security group %s should not be greater than 255 characters." +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 -#, python-format -msgid "Security group (%s) not found" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "This rule already exists in group %s" +msgid "Already mounted: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 -#, python-format -msgid "Rule (%s) not found" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 -#, python-format -msgid "start instance %r" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "stop instance %r" +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "vol=%s" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Delete volume with id: %s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "Delete snapshot with id: %s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "Create snapshot from volume %s" +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/auth/ldapdriver.py:650 -#, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/auth/manager.py:302 -#, python-format -msgid "Failed authorization for access key %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Using project name = user name (%s)" +msgid "Table |%s| not created!" msgstr "" -#: cinder/auth/manager.py:315 -#, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/auth/manager.py:324 -#, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 -#, python-format -msgid "expected_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:353 -#, python-format -msgid "host_only_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgid "Table |%s| not created" msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +msgid "Exception while dropping table %s." msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +msgid "Exception while creating table %s." msgstr "" -#: cinder/auth/manager.py:613 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "modifying project %s" +msgid "Column |%s| not created!" msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/auth/manager.py:676 -#, python-format -msgid "Deleting project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/auth/manager.py:753 -#, python-format -msgid "Access Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/auth/manager.py:755 -#, python-format -msgid "Secret Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -msgid "Instance type for vpn instances" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Launching VPN for %s" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/compute/api.py:144 +#: cinder/image/image_utils.py:101 #, python-format -msgid "Unable to find host for Instance %s" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/api.py:192 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/api.py:203 +#: cinder/image/image_utils.py:157 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:259 +#: cinder/image/image_utils.py:178 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/api.py:261 +#: cinder/image/image_utils.py:206 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/compute/api.py:312 +#: cinder/image/image_utils.py:260 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/api.py:383 -#, python-format -msgid "Going to run %s instances..." +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/compute/api.py:447 -#, python-format -msgid "bdm %s" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "block_device_mapping %s" +msgid "Not deleting key %s" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/compute/api.py:871 -msgid "Going to try to soft delete instance" +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/api.py:939 -msgid "Going to try to terminate instance" +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/api.py:977 -msgid "Going to try to stop instance" +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/api.py:996 -msgid "Going to try to start instance" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/api.py:1000 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "Searching by: %s" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "Image type not recognized %s" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1377 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/openstack/common/log.py:301 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "Deprecated: %s" msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "DB error: %s" +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/openstack/common/log.py:623 #, python-format -msgid "Instance type %s not found for deletion" +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/compute/manager.py:140 -#, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/compute/manager.py:144 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/manager.py:146 -#, python-format -msgid "check_instance_lock: admin: |%s|" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Setting up bdm %s" +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "Running cmd (subprocess): %s" msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "Instance %s not found." +msgid "Result was %s" msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Running cmd (SSH): %s" msgstr "" -#: cinder/compute/manager.py:528 -#, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:565 -#, python-format -msgid "Instance network_info: |%s|" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/service.py:337 #, python-format -msgid "%(action_str)s instance" +msgid "Starting %d workers" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "terminating bdm %s" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "Rebuilding instance %s" +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "Rebooting instance %s" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "instance %s: snapshotting" +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/versionutils.py:69 #, python-format msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:1001 -#, python-format -msgid "Rotating out %d backups" +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "Deleting image %s" +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "Failed to set admin password. Instance %s is not running" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1041 -#, python-format -msgid "Instance %s: Root password set" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:1079 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/notifier/api.py:145 #, python-format msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "instance %s: rescuing" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format -msgid "instance %s: unrescuing" +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "instance %s: migrating" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "instance %s: pausing" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "instance %s: unpausing" +msgid "unpacked context: %s" msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "instance %s: suspending" +msgid "received %s" msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "instance %s: resuming" +msgid "no method for message: %s" msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "instance %s: locking" +msgid "No method for message: %s" msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "instance %s: unlocking" +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/compute/manager.py:1596 -#, python-format -msgid "instance %s: getting locked state" +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "instance %s: reset network" +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "instance %s: inject network info" +msgid "MSG_ID is %s" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "network_info to inject: |%s|" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:1655 -#, python-format -msgid "instance %s: getting vnc console" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:1714 -#, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:1752 -#, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/rpc/common.py:151 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/common.py:156 #, python-format msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." -msgstr "" - -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "Returning exception %s to caller" msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." -msgstr "" - -#: cinder/compute/manager.py:2075 msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +msgid "Deserializing: %s" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/console/manager.py:97 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/console/vmrc_manager.py:122 +#: cinder/openstack/common/rpc/impl_zmq.py:267 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Running func with context: %s" msgstr "" -#: cinder/console/vmrc_manager.py:125 -#, python-format -msgid "Removing console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/console/xvp.py:116 -#, python-format -msgid "Re-wrote %s" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "Error starting xvp: %s" +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/openstack/common/rpc/impl_zmq.py:506 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 -#, python-format -msgid "No ComputeNode for %(host)s" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 -#, python-format -msgid "No backend config with id %(sm_backend_id)s" -msgstr "" - -#: cinder/db/sqlalchemy/api.py:4103 -#, python-format -msgid "No sm_flavor called %(sm_flavor)s" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 -#, python-format -msgid "SQL connection failed. %s attempts left." +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#: cinder/openstack/common/rpc/impl_zmq.py:681 #, python-format -msgid "Table |%s| not created!" +msgid "Received message: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 -#, python-format -msgid "join list for moving mac_addresses |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +msgid "topic is %s." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/image/glance.py:281 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/image/glance.py:289 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/image/glance.py:410 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/image/s3.py:309 -#, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/image/s3.py:328 -#, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/image/s3.py:379 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/scheduler/filter_scheduler.py:114 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "Filtered %s" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "Unknown chain: %r" +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/linux_net.py:694 -#, python-format -msgid "Hupping dnsmasq threw %s" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/linux_net.py:696 -#, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "killing radvd threw %s" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "Pid %d is stale, relaunching radvd" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Starting VLAN inteface %s" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Starting Bridge interface for %s" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/network/linux_net.py:1142 -#, python-format -msgid "Starting bridge %s " +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/network/linux_net.py:1149 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format -msgid "Done starting bridge %s" +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/network/linux_net.py:1167 -#, python-format -msgid "Failed unplugging gateway interface '%s'" +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/network/linux_net.py:1170 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Unplugged gateway interface '%s'" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/network/manager.py:291 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "Interface %(interface)s not found" +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/network/manager.py:315 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "floating IP allocation for instance |%s|" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/network/manager.py:353 -#, python-format -msgid "floating IP deallocation for instance |%s|" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/network/manager.py:402 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "Faked command matched %s" msgstr "" -#: cinder/network/manager.py:614 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/manager.py:660 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/manager.py:778 +#: cinder/tests/test_netapp_nfs.py:360 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/manager.py:896 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "network allocations for instance |%s|" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/manager.py:901 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "network deallocation for instance |%s|" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Given data: %s" msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Leased IP |%(address)s|" +msgid "Result data: %s" msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "IP %s leased that is not associated" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/manager.py:1256 -#, python-format -msgid "IP |%s| leased that isn't allocated" +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/tests/integrated/test_login.py:29 #, python-format -msgid "Released IP |%(address)s|" +msgid "volume: %s" msgstr "" -#: cinder/network/manager.py:1265 +#: cinder/tests/integrated/api/client.py:32 #, python-format -msgid "IP %s released that is not associated" +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/manager.py:1268 -#, python-format -msgid "IP %s released that was not leased" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +msgid "Body: %s" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +msgid "Decoding JSON: %s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/network/quantum/client.py:180 +#: cinder/transfer/api.py:119 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "Failed to create transfer record for %s" msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/transfer/api.py:136 #, python-format -msgid "Quantum entity not found: %s" +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/transfer/api.py:182 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/transfer/api.py:199 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Volume %s has been transferred." msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" msgstr "" -#: cinder/network/quantum/manager.py:204 -#, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/network/quantum/manager.py:301 -#, python-format -msgid "network allocations for instance %s" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/network/quantum/manager.py:588 -#, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" msgstr "" -#: cinder/network/quantum/manager.py:606 -#, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 -#, python-format -msgid "Server returned error: %s" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/api.py:502 #, python-format msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 -#, python-format -msgid "No network with net_id = %s" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 -#, python-format -msgid "No fixed IPs to deallocate for vif %s" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 -#, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 -#, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/api.py:757 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/notifier/list_notifier.py:65 -#, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/volume/api.py:797 #, python-format -msgid "Returning exception %s to caller" +msgid "No available service named %s" msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/rpc/amqp.py:231 -#, python-format -msgid "received %s" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." msgstr "" -#: cinder/rpc/amqp.py:236 -#, python-format -msgid "no method for message: %s" +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." msgstr "" -#: cinder/rpc/amqp.py:237 +#: cinder/volume/api.py:842 #, python-format -msgid "No method for message: %s" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" -#: cinder/rpc/amqp.py:321 -#, python-format -msgid "Making asynchronous call on %s ..." +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/volume/api.py:862 #, python-format -msgid "MSG_ID is %s" +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/api.py:868 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/rpc/amqp.py:379 +#: cinder/volume/api.py:887 #, python-format -msgid "Sending notification on %s..." +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/api.py:900 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/driver.py:282 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/driver.py:327 #, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/driver.py:340 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/driver.py:358 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/volume/driver.py:394 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/driver.py:433 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/driver.py:451 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/rpc/impl_qpid.py:346 -#, python-format -msgid "Connected to AMQP server on %s" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/scheduler/driver.py:80 -#, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/scheduler/driver.py:89 -#, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/manager.py:203 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/manager.py:264 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/manager.py:271 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgid "volume %s: skipping export" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/manager.py:273 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 -#, python-format -msgid "No host selection for %s defined." +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/manager.py:286 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "Filtered %(hosts)s" +msgid "volume %s: deleting" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 -#, python-format -msgid "Weighted %(weighted_host)s" +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/manager.py:389 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "volume %s: removing export" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/manager.py:394 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/scheduler/host_manager.py:157 -#, python-format -msgid "Host filter function %(func)s failed for %(host)s" +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/manager.py:427 #, python-format -msgid "Host filter passes for %(host)s" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/manager.py:430 #, python-format -msgid "Received %(service_name)s service update from %(host)s." +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "bind %s: slettet" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/manager.py:462 #, python-format -msgid "No service for compute ID %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/manager.py:490 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/manager.py:496 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "snapshot %s: deleting" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/manager.py:526 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/manager.py:559 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/tests/fake_utils.py:72 -#, python-format -msgid "Faking execution of cmd (subprocess): %s" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/manager.py:698 #, python-format -msgid "Faked command matched %s" +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:760 #, python-format -msgid "Faked command raised an exception %s" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:807 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:880 #, python-format -msgid "Running instances: %s" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/manager.py:909 #, python-format -msgid "After terminating instances: %s" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:940 #, python-format -msgid "After force-killing instances: %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:976 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Target %s allocated" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/tests/test_volume_types.py:58 -#, python-format -msgid "Given data: %s" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/tests/test_volume_types.py:59 -#, python-format -msgid "Result data: %s" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "DB error: %s" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 -#, python-format -msgid "Quota exceeded: code=%(code)s" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "_create: %s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "_delete: %s" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "_get: %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "_get_all: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 -#, python-format -msgid "test_snapshot_create: resp_dict=%s" +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/utils.py:144 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "flavor: %s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" - -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Creating clone of volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:107 -#, python-format -msgid "Body: %s" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:125 -#, python-format -msgid "%(auth_uri)s => code %(http_status)s" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "Decoding JSON: %s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/tests/rpc/common.py:133 -#, python-format -msgid "Nested received %(queue)s, %(value)s" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/tests/rpc/common.py:142 -#, python-format -msgid "Nested return %s" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Received %s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "Compute_service record created for %s " +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "Compute_service record updated for %s " +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Filters added to instance %s" +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Adding security group rule: %r" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Adding provider rule: %s" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Error running SSH command: %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "Got exception: %s" +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/eqlx.py:384 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/eqlx.py:440 #, python-format -msgid "No such domain (%s)" +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "Failed power down Bare-metal node %s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/dom.py:154 -#, python-format -msgid "No such domain %s" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "Domains: %s" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 -#, python-format -msgid "Nodes: %s" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 #, python-format -msgid "After storing domains: %s" +msgid "casted to %s" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "Created new domain: %s" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/glusterfs.py:403 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "nova call result: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/baremetal/dom.py:226 -#, python-format -msgid "change_domain_state: to new state %s" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/glusterfs.py:431 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/glusterfs.py:457 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "volume id: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 -#, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "instance %s: rebooted" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "instance %s: rescued" +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "instance %s: is building" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/glusterfs.py:690 #, python-format -msgid "instance %s: booted" +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/glusterfs.py:701 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "instance %s spawned successfully" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/baremetal/proxy.py:272 -#, python-format -msgid "instance %s:not booted" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "instance %s: Creating image" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "instance %s: starting toXML method" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "instance %s: finished toXML method" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 -msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:216 +#: cinder/volume/drivers/gpfs.py:97 #, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/gpfs.py:160 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/gpfs.py:169 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "unknown disk image handler: %s" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "Failed to remove container: %s" +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "User %(username)s not found in password file." +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "unsupported partition: %s" +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Failed to map partitions: %s" +msgid "%s" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/nfs.py:263 #, python-format -msgid "qemu-nbd error: %s" +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "nbd device %s did not show up" +msgid " but size is now %d" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/nfs.py:361 #, python-format -msgid "Connecting to libvirt: %s" +msgid "%s is already mounted" msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/nfs.py:526 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Deleting instance files %(target)s" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -#, fuzzy, python-format -msgid "Instance soft rebooted successfully." -msgstr "bind %s: slettet" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Automatically confirming migration %d" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -msgid "Guest does not have a console available" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "block_device_list %s" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/rbd.py:625 #, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "'' must be 1, but %d\n" +msgid "connection data: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 -#, python-format -msgid "topology (%(topology)s) must have %(ks)s" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 -#, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 +#: cinder/volume/drivers/rbd.py:724 #, python-format -msgid "Timeout migrating for %s. nwfilter not found." +msgid "not cloneable: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 -#, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/drivers/rbd.py:747 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +msgid "Unable to open image %(loc)s: %(err)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 -#, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "%s is a valid instance name" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "%s has a disk file" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Base file too young to remove: %s" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Removing base file: %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "API response: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 -#, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "Unknown base file: %s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Active base files: %s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 -#, python-format -msgid "Corrupt base files: %s" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Removable base files: %s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/vif.py:99 -#, python-format -msgid "Ensuring bridge %s" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 -#, python-format -msgid "Failed while unplugging vif of instance '%s'" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/libvirt/volume.py:166 -#, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/libvirt/volume.py:178 -#, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 -#, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 -#, python-format -msgid "%(text)s: _db_content => %(content)s" +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 -#, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Glance image %s is in killed state" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/zadara.py:472 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 -#, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, python-format -msgid "httplib error in %s: " +msgid "Create Volume: %(volume)s Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Socket error in %s: " +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format -msgid "Type error in %s: " +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "Exception in %s " +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Got total of %s instances" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 -#, python-format -msgid "Created VM with the name %s on the ESX host" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "Powered on the VM instance %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 -#, python-format -msgid "Created Snapshot of the VM instance %s " +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Uploading image %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Uploaded image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Deleted temporary vmdk file %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format -msgid "Did hard reboot of VM %s" +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 -#, python-format -msgid "instance - %s not present" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Powering off the VM %s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Powered off the VM %s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Unregistering the VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Unregistered the VM %s" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:592 -#, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "Suspending the VM %s " +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Suspended the VM %s " +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Resuming the VM %s" +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Resumed the VM %s " +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format -msgid "Creating directory with path %s" +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format -msgid "Created directory with path %s" +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format -msgid "Downloading image %s from glance image server" +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Create export: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." msgstr "" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Calling %(localname)s %(impl)s" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "Calling getter %s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "bind %s: slettet" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "bind %s: slettet" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "Found no network for bridge %s" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:146 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Unable to join %(host)s in the pool" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:162 -#, python-format -msgid "Pool-eject failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/pool.py:174 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "Unable to set up pool: %(e)s." +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:185 -#, python-format -msgid "Pool-set_name_label failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/vif.py:103 -#, python-format -msgid "Found no PIF for device %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "VBD not found in instance %s" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "VBD %s already detached" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "Snapshotting with label '%(label)s'" +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "Created snapshot %(template_vm_ref)s" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format -msgid "download_vhd failed: %r" +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" +msgid "Add target WWN: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Cannot find device number for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "Found iSCSI endpoint: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "VDI %s is still available" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "Re-scanning SR %s" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "LUN %s is deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "Plugging VBD %s ... " +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Plugging VBD %s done." +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 -#, python-format -msgid "Destroying VBD for VDI %s ... " +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Destroying VBD for VDI %s done." +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Running pygrub against %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Found Xen kernel %s" +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "Writing partition table %s done." +msgid "Bad response from server: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 -#, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -msgid "Starting instance" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Injecting file path: '%s'" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" -msgstr "" - -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/rest_common.py:1124 #, python-format -msgid "Instance agent version: %s" +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Updating Agent to %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:686 +#: cinder/volume/drivers/huawei/rest_common.py:1256 #, python-format -msgid "Unable to Snapshot instance: %(exc)s" +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 -#, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/ssh_common.py:792 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 +#: cinder/volume/drivers/huawei/ssh_common.py:933 #, python-format -msgid "Creating VIF for network %(network_ref)s" +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/huawei/ssh_common.py:1079 #, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 -#, python-format -msgid "OpenSSL error: %s" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "type is = %s" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "name = %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Failed getting details for pool %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 -#, python-format -msgid "Forgetting SR %s..." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Introducing %s..." +msgid "%s is not set" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 -#, python-format -msgid "Unable to find SR from VBD %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 #, python-format -msgid "Error finding vdis in SR %s" +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "Creating SR %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "Introducing SR %s" +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 #, python-format -msgid "Checking for SR %s" +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "SR %s not found in the xapi database" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "Could not find key in output of command %(cmd)s: %(out)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgid "Failed to find host %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "enter: get_host_from_connector: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "Unable to locate volume %s" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Unable to detach volume %s" +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Unable to destroy vbd %s" +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "Error purging SR %s" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 #, python-format -msgid "Error in handshake: %s" +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 #, python-format -msgid "Invalid request: %s" +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "Request: %s" +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "Request made with missing token: %s" +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "Request made with invalid token: %s" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 #, python-format -msgid "Unexpected error: %s" +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/volume/api.py:137 -msgid "Volume status must be available or error" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" msgstr "" -#: cinder/volume/api.py:325 -msgid "Volume Snapshot status must be available or error" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "volume group %s doesn't exist" +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "enter: delete_vdisk: vdisk %s" msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "Tried to delete non-existant vdisk %s." msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 #, python-format -msgid "Could not find iSCSI export for volume %s" +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/netapp/api.py:419 #, python-format -msgid "rbd has no pool %s" +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/netapp/common.py:103 #, python-format -msgid "Sheepdog is not working: %s" +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/netapp/common.py:116 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "No default storage protocol found for storage family %(storage_family)s" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/netapp/common.py:123 #, python-format -msgid "Re-exporting %s volumes" +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/netapp/common.py:130 #, python-format -msgid "volume %s: skipping export" +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/netapp/common.py:158 #, python-format -msgid "volume %s: creating" +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/netapp/iscsi.py:69 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "No metadata property %(prop)s defined for the LUN %(name)s" msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/netapp/iscsi.py:105 #, python-format -msgid "volume %s: creating export" +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/netapp/iscsi.py:166 #, python-format -msgid "volume %s: created successfully" +msgid "Created LUN with name %s" msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/netapp/iscsi.py:227 #, python-format -msgid "volume %s: removing export" +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/netapp/iscsi.py:232 #, python-format -msgid "volume %s: deleting" +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/netapp/iscsi.py:238 #, python-format -msgid "volume %s: volume is busy" +msgid "Failed to get LUN target details for the LUN %s" msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/netapp/iscsi.py:249 #, python-format -msgid "volume %s: deleted successfully" +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" msgstr "bind %s: slettet" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 #, python-format -msgid "snapshot %s: creating" +msgid "Resizing %s failed. Cleaning volume." msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/netapp/iscsi.py:325 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/netapp/iscsi.py:412 #, python-format -msgid "snapshot %s: created successfully" +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/netapp/iscsi.py:431 #, python-format -msgid "snapshot %s: deleting" +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." msgstr "" -#: cinder/volume/manager.py:214 +#: cinder/volume/drivers/netapp/iscsi.py:543 #, python-format -msgid "snapshot %s: snapshot is busy" +msgid "Message: %s" msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/netapp/iscsi.py:545 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "Error getting lun attribute. Exception: %s" msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/netapp/iscsi.py:606 #, python-format -msgid "New capabilities found: %s" +msgid "Resizing lun %s directly to new size." msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/netapp/iscsi.py:662 #, python-format -msgid "Notification {%s} received" +msgid "Moving lun %(name)s to %(new_name)s." msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/netapp/iscsi.py:677 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "Resizing lun %s using sub clone to new size." msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/netapp/iscsi.py:684 #, python-format -msgid "%s is not set" +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" msgstr "" -#: cinder/volume/netapp.py:128 -msgid "Connected to DFM server" +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/netapp/nfs.py:395 #, python-format -msgid "Job failed: %s" +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#: cinder/volume/drivers/netapp/nfs.py:425 #, python-format -msgid "Failed to find LUN ID for volume %s" +msgid "Unexpected exception during image cloning in share %s" msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" -msgstr "" +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, python-format -msgid "No LUN ID for volume %s" -msgstr "" +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "" +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:614 -#, python-format -msgid "Failed to get host details for host ID %s" -msgstr "" +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:620 -#, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "" +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:625 -#, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "" +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#~ msgid "Connection to glance failed" +#~ msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Invalid snapshot" +#~ msgstr "" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Invalid input received" +#~ msgstr "" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid volume type" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, python-format -msgid "Got response: %s" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "The request is invalid." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "Reconnected to queue" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "response %s" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "topic is %s" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "message %s" +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "Publishing to route %s" +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "Declaring queue %s" +#~ msgid "zfs send/recv done, new volume %s created" #~ msgstr "" -#~ msgid "Declaring exchange %s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgid "rbd export-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "Created VM %s..." +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Can't find lun or lun goup in array" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " +#~ msgid "migrate_volume: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "_wait_child %d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "wait wrap.failed %s" #~ msgstr "" #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "volume %s mapping to multi host" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" #~ msgstr "" -#~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Volume status must be available" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/de/LC_MESSAGES/cinder.po b/cinder/locale/de/LC_MESSAGES/cinder.po index e5b8feee4b..747945fe92 100644 --- a/cinder/locale/de/LC_MESSAGES/cinder.po +++ b/cinder/locale/de/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2011-08-23 11:23+0000\n" "Last-Translator: Thierry Carrez \n" "Language-Team: German \n" @@ -15,8194 +15,10728 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "Dateiname der Root CA" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Dateiname des Private Key" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." msgstr "" -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "Wo wir unsere Schlüssel aufbewahren" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "Soll eine eigenständige CA für jedes Projekt verwendet werden?" - -#: cinder/crypto.py:67 +#: cinder/exception.py:107 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -#: cinder/crypto.py:72 +#: cinder/exception.py:112 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:292 +#: cinder/exception.py:116 #, python-format -msgid "Flags path: %s" +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." - -#: cinder/exception.py:59 +#: cinder/exception.py:120 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" -msgstr "" -"%(description)s\n" -"Befehl: %(cmd)s\n" -"Exit-Code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" - -#: cinder/exception.py:94 -msgid "DB exception wrapped." -msgstr "" - -#: cinder/exception.py:155 -msgid "An unknown exception occurred." -msgstr "" - -#: cinder/exception.py:178 -msgid "Failed to decrypt text" -msgstr "" - -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" -msgstr "" - -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" -msgstr "" - -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" -msgstr "" - -#: cinder/exception.py:195 -msgid "Connection to glance failed" +msgid "Connection to glance failed: %(reason)s" msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" -msgstr "" - -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "" -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, fuzzy, python-format msgid "Not authorized for image %(image_id)s." msgstr "keine Methode für diese Nachricht gefunden: %s" -#: cinder/exception.py:220 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "" -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:154 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:237 +#: cinder/exception.py:163 msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:167 +msgid "The results are invalid." msgstr "" -#: cinder/exception.py:245 +#: cinder/exception.py:171 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:253 +#: cinder/exception.py:179 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Invalid volume: %(reason)s" msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "%(err)s" msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:197 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:201 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:277 -#, python-format -msgid "Invalid cidr %(cidr)s." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:218 #, python-format -msgid "%(err)s" +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:228 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Volume %(volume_id)s could not be found." msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:232 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:237 #, python-format msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:242 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:246 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:250 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" -#: cinder/exception.py:334 -msgid "Failed to terminate instance" +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:287 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:307 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:377 +#: cinder/exception.py:311 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:381 +#: cinder/exception.py:315 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:389 +#: cinder/exception.py:323 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:332 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:336 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:340 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:344 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:348 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:352 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:422 -msgid "Resource could not be found." +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:427 +#: cinder/exception.py:365 #, python-format -msgid "Required flag %(flag)s not set." +msgid "Volume Type %(id)s already exists." msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:369 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:373 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Malformed message body: %(reason)s" msgstr "" -#: cinder/exception.py:440 +#: cinder/exception.py:377 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Could not find config at %(path)s" msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/exception.py:385 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:398 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:457 +#: cinder/exception.py:402 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:409 #, python-format msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:415 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:419 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:475 +#: cinder/exception.py:423 #, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:427 #, python-format -msgid "No target id found for volume %(volume_id)s." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:484 +#: cinder/exception.py:432 #, python-format -msgid "No disk at %(location)s" +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:436 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:440 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" -#: cinder/exception.py:496 +#: cinder/exception.py:444 +#, python-format msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" -#: cinder/exception.py:501 +#: cinder/exception.py:449 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" -#: cinder/exception.py:505 +#: cinder/exception.py:453 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Failed to export for volume: %(reason)s" msgstr "" -#: cinder/exception.py:509 +#: cinder/exception.py:457 #, python-format -msgid "User %(user_id)s could not be found." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:513 +#: cinder/exception.py:461 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:517 +#: cinder/exception.py:465 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:521 -#, python-format -msgid "Role %(role_id)s could not be found." -msgstr "" +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "Nicht möglich volume %s zufinden" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:529 -#, python-format -msgid "%(req)s is required to create a network." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:533 -#, python-format -msgid "Network %(network_id)s could not be found." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:485 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:541 -#, python-format -msgid "Network could not be found for uuid %(uuid)s" +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:493 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Invalid backup: %(reason)s" msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:497 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "Connection to swift failed: %(reason)s" msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." msgstr "" -#: cinder/exception.py:557 +#: cinder/exception.py:505 #, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" -#: cinder/exception.py:562 +#: cinder/exception.py:509 #, python-format -msgid "Host is not set to the network (%(network_id)s)." +msgid "SSH command injection detected: %(command)s" msgstr "" -#: cinder/exception.py:566 +#: cinder/exception.py:513 #, python-format -msgid "Network %(network)s has active ports, cannot delete." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:522 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:527 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:531 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:536 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:541 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:546 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:599 +#: cinder/exception.py:550 #, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:554 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:608 -#, python-format -msgid "Fixed IP address %(address)s is invalid." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:620 -#, python-format -msgid "Floating ip not found for id %(id)s." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:576 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:580 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:584 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:640 +#: cinder/exception.py:593 #, python-format -msgid "Floating ip %(address)s is associated." +msgid "Unable to create server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:644 +#: cinder/exception.py:597 #, python-format -msgid "Floating ip %(address)s is not associated." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:652 +#: cinder/exception.py:605 #, python-format -msgid "Interface %(interface)s not found." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:656 +#: cinder/exception.py:609 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:613 #, python-format -msgid "Certificate %(certificate_id)s not found." +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:668 -#, python-format -msgid "Host %(host)s could not be found." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:672 +#: cinder/exception.py:626 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:676 +#: cinder/exception.py:630 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:680 +#: cinder/exception.py:636 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:684 -#, python-format -msgid "Access Key %(access_key)s could not be found." +#: cinder/exception.py:641 +msgid "Unknown NFS exception" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:692 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Quota for project %(project_id)s could not be found." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:696 -#, python-format -msgid "Quota class %(class_name)s could not be found." +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" msgstr "" -#: cinder/exception.py:700 -#, python-format -msgid "Security group %(security_group_id)s not found." +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:704 -#, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:709 -#, python-format -msgid "Security group with rule %(rule_id)s not found." +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:713 +#: cinder/quota.py:105 #, python-format msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:718 +#: cinder/quota.py:748 #, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:723 +#: cinder/quota.py:770 #, python-format -msgid "Migration %(migration_id)s could not be found." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:727 +#: cinder/quota.py:790 #, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:732 +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 #, python-format -msgid "Console pool %(pool_id)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:736 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:742 +#: cinder/service.py:95 #, python-format -msgid "Console %(console_id)s could not be found." +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:746 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:750 +#: cinder/service.py:148 #, python-format msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:755 -#, python-format -msgid "Invalid console type %(console_type)s " +#: cinder/service.py:216 +msgid "Service killed that has no database entry" msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "Das Service-Datenbank-Objekt ist verschwunden, es wird erneut erzeugt." + +#: cinder/service.py:270 +msgid "Recovered model server connection!" msgstr "" -#: cinder/exception.py:763 -#, python-format -msgid "Instance type %(instance_type_id)s could not be found." +#: cinder/service.py:276 +msgid "model server went away" msgstr "" -#: cinder/exception.py:767 +#: cinder/service.py:298 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:772 -#, python-format -msgid "Flavor %(flavor_id)s could not be found." +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:776 +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Alle vorhandenen FLAGS:" + +#: cinder/service.py:387 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:780 +#: cinder/utils.py:96 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:784 +#: cinder/utils.py:127 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:789 -#, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:793 +#: cinder/utils.py:228 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgid "Error connecting via ssh: %s" msgstr "" -#: cinder/exception.py:798 +#: cinder/utils.py:412 #, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Invalid backend: %s" msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" msgstr "" -#: cinder/exception.py:807 +#: cinder/utils.py:698 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:811 +#: cinder/utils.py:759 #, python-format -msgid "LDAP group %(group_id)s could not be found." +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:815 +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 #, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgid "Unable to find cert_file : %s" msgstr "" -#: cinder/exception.py:819 +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 #, python-format -msgid "File %(file_path)s could not be found." +msgid "Unable to find key_file : %s" msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:827 +#: cinder/wsgi.py:169 #, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:832 +#: cinder/wsgi.py:206 #, python-format -msgid "Network adapter %(adapter)s could not be found." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:844 -#, python-format -msgid "Unable to use global role %(role_id)s" +#: cinder/wsgi.py:313 +msgid "You must implement __call__" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:861 -#, python-format -msgid "Key pair %(key_name)s already exists." +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:865 -#, python-format -msgid "User %(user)s already exists." +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:869 -#, python-format -msgid "LDAP user %(user)s already exists." +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:873 -#, python-format -msgid "LDAP group %(group)s already exists." +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:877 +#: cinder/api/common.py:162 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:882 +#: cinder/api/common.py:189 #, python-format -msgid "Project %(project)s already exists." +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:886 -#, python-format -msgid "Instance %(name)s already exists." +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:890 +#: cinder/api/extensions.py:197 #, python-format -msgid "Instance Type %(name)s already exists." +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:894 +#: cinder/api/extensions.py:235 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:898 +#: cinder/api/extensions.py:236 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" -msgstr "" - -#: cinder/exception.py:902 -msgid "Migration error" +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:906 +#: cinder/api/extensions.py:237 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:910 +#: cinder/api/extensions.py:239 #, python-format -msgid "Could not find config at %(path)s" +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/extensions.py:240 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "" - -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" -msgstr "" - -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:938 +#: cinder/api/extensions.py:276 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/extensions.py:287 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/extensions.py:356 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:958 -#, python-format +#: cinder/api/sizelimit.py:25 msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:963 -#, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:967 -#, python-format -msgid "Aggregate %(aggregate_name)s already exists." +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:971 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:975 -#, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:980 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:984 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:988 +#: cinder/api/contrib/backups.py:140 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "delete called for member %s" msgstr "" -#: cinder/exception.py:992 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Delete backup with id: %s" msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/exception.py:1005 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Error in SolidFire API response: status=%(status)s" +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/exception.py:1009 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Error in SolidFire API response: data=%(data)s" +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/exception.py:1013 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Detected existing vlan with id %(vlan)d" +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/exception.py:1017 -#, python-format -msgid "Instance %(instance_id)s could not be found." +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/exception.py:1025 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Could not fetch image %(image)s" +msgid "Host '%s' could not be found." msgstr "" -#: cinder/log.py:315 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "syslog facility must be one of: %s" +msgid "Invalid status: '%s'" msgstr "" -#: cinder/manager.py:146 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/manager.py:152 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Running periodic task %(full_task_name)s" +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/service.py:177 -#, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/service.py:195 -#, python-format -msgid "Creating Consumer connection for Service %s" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." -msgstr "Das Service-Datenbank-Objekt ist verschwunden, es wird erneut erzeugt." +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/service.py:340 -msgid "model server went away" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" -msgstr "Alle vorhandenen FLAGS:" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" -#: cinder/service.py:440 +#: cinder/api/contrib/quotas.py:111 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" -msgstr "Hole %s" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/utils.py:220 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Führe Kommando (subprocess) aus: %s" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Result was %s" -msgstr "Ergebnis war %s" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/utils.py:534 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "Link Local address is not found.:%s" +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/utils.py:537 -#, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/utils.py:648 -#, python-format -msgid "Invalid backend: %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/utils.py:659 -#, python-format -msgid "backend %s" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/utils.py:942 -#, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/utils.py:1169 +#: cinder/api/contrib/volume_transfer.py:147 #, python-format -msgid "Invalid server_string: %s" +msgid "Creating new volume transfer %s" msgstr "" -#: cinder/utils.py:1298 +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" msgstr "" -#: cinder/utils.py:1461 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/utils.py:1495 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/wsgi.py:97 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +msgid "Valid control location are: %s" msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/wsgi.py:117 -#, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/api/direct.py:299 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "Returned non-serializeable type: %s" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/api/sizelimit.py:51 +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 msgid "Request is too large." msgstr "" -#: cinder/api/validator.py:142 -#, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/api/ec2/__init__.py:73 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "%(code)s: %(message)s" +msgid "Extended resource: %s" msgstr "" -#: cinder/api/ec2/__init__.py:95 +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "FaultWrapper: %s" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/api/ec2/__init__.py:180 -#, python-format +#: cinder/api/openstack/__init__.py:126 msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "Authentication Failure: %s" +msgid "Exception handling resource: %s" msgstr "" -#: cinder/api/ec2/__init__.py:404 +#: cinder/api/openstack/wsgi.py:682 #, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgid "Fault thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:435 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "action: %s" +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:437 -#, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:584 -#, python-format -msgid "InstanceNotFound raised: %s" +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:590 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "VolumeNotFound raised: %s" +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/__init__.py:596 -#, python-format -msgid "SnapshotNotFound raised: %s" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" msgstr "" -#: cinder/api/ec2/__init__.py:605 -#, python-format -msgid "EC2APIError raised: %s" +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "KeyPairExists raised: %s" +msgid "%(url)s returned a fault: %(e)s" msgstr "" -#: cinder/api/ec2/__init__.py:617 -#, python-format -msgid "InvalidParameterValue raised: %s" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/__init__.py:621 -#, python-format -msgid "InvalidPortRange raised: %s" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "NotAuthorized raised: %s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/__init__.py:633 -#, python-format -msgid "QuotaError raised: %s" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/__init__.py:637 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" msgstr "" -#: cinder/api/ec2/__init__.py:646 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "Unexpected error raised: %s" +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/__init__.py:647 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "Environment: %s" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" msgstr "" -#: cinder/api/ec2/apirequest.py:64 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/cloud.py:336 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "Create snapshot of volume %s" +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/cloud.py:372 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:378 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:382 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "Create key pair %s" +msgid "Create volume of %s GB" msgstr "" -#: cinder/api/ec2/cloud.py:391 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "Import key %s" +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "Delete key pair %s" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 -#, python-format -msgid "Revoke security group ingress %s" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, python-format -msgid "%s Not enough parameters to build a valid rule" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Authorize security group ingress %s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:725 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "%s - This rule already exists in group" +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:769 -#, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" msgstr "" -#: cinder/api/ec2/cloud.py:776 -#, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" +#: cinder/backup/api.py:140 +msgid "Backup status must be available" msgstr "" -#: cinder/api/ec2/cloud.py:783 -#, python-format -msgid "group %s already exists" +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/backup/api.py:154 #, python-format -msgid "Delete security group %s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 -#, python-format -msgid "Get console output for instance %s" +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" msgstr "" -#: cinder/api/ec2/cloud.py:894 +#: cinder/backup/api.py:176 #, python-format -msgid "Create volume from snapshot %s" +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/backup/api.py:181 #, python-format -msgid "Create volume of %s GB" +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/backup/manager.py:100 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/backup/manager.py:117 #, python-format -msgid "Detach volume %s" +msgid "Manager requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:959 -msgid "Detach Volume Failed." +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/backup/manager.py:123 #, python-format -msgid "attribute not supported: %s" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/backup/manager.py:129 #, python-format -msgid "vol = %s\n" +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/ec2/cloud.py:1267 +#: cinder/backup/manager.py:154 #, python-format -msgid "Release address %s" +msgid "Registering default backend %s." msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/manager.py:158 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/manager.py:165 #, python-format -msgid "Disassociate address %s" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/manager.py:194 #, python-format -msgid "Reboot instance %r" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/manager.py:217 #, python-format -msgid "De-registering image %s" +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/manager.py:237 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/manager.py:299 #, python-format -msgid "Updating image %s publicity" +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/backup/manager.py:310 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/manager.py:329 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/backup/manager.py:360 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/backup/manager.py:379 #, python-format -msgid "Caught error: %s" +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/manager.py:386 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/manager.py:422 #, python-format -msgid "Extended resource: %s" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/openstack/__init__.py:130 -#, python-format +#: cinder/backup/drivers/ceph.py:116 msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/drivers/ceph.py:147 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "invalid user '%s'" msgstr "" -#: cinder/api/openstack/auth.py:90 -#, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "Could not find %s in request." +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/drivers/ceph.py:250 #, python-format -msgid "Successfully authenticated '%s'" +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "marker [%s] not found" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/drivers/ceph.py:377 #, python-format -msgid "href %s does not contain version" +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/drivers/ceph.py:389 #, python-format -msgid "Converting nw_info: %s" +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/common.py:305 -#, python-format -msgid "Converted networks: %s" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/drivers/ceph.py:397 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:407 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/backup/drivers/ceph.py:488 #, python-format -msgid "Loaded extension: %s" +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "Ext name: %s" +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/backup/drivers/ceph.py:528 #, python-format -msgid "Ext alias: %s" +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/drivers/ceph.py:543 #, python-format -msgid "Ext description: %s" +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/drivers/ceph.py:555 #, python-format -msgid "Ext namespace: %s" +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/drivers/ceph.py:566 #, python-format -msgid "Ext updated: %s" +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/ceph.py:586 #, python-format -msgid "Exception loading extension: %s" +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/extensions.py:246 -#, python-format -msgid "Loading extension %s" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "Calling extension factory %s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:264 -#, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/ceph.py:697 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" -msgstr "" - -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:713 #, python-format -msgid "Exception handling resource: %s" +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:586 -#, python-format -msgid "Fault thrown: %s" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "HTTP exception thrown: %s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:846 #, python-format -msgid "There is no such action: %s" -msgstr "" - -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:916 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:964 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" -msgstr "" - -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/ceph.py:1039 #, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/swift.py:146 #, python-format -msgid "Invalid server status: %(status)s" +msgid "container %s does not exist" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/swift.py:219 #, python-format -msgid "Bad personality format: missing %s" +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/swift.py:234 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "volume size %d is invalid." msgstr "" -#: cinder/api/openstack/compute/servers.py:550 +#: cinder/backup/drivers/swift.py:248 #, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 -#, python-format -msgid "Invalid fixed IP address (%s)" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:278 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 -#, python-format -msgid "Bad network format: missing %s" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/swift.py:362 #, python-format -msgid "Error in confirm-resize %s" +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/swift.py:378 #, python-format -msgid "Error in revert-resize %s" +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/swift.py:423 #, python-format -msgid "Error in reboot %s" +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/backup/drivers/tsm.py:352 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/backup/drivers/tsm.py:362 #, python-format -msgid "Compute.api::pause %s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/backup/drivers/tsm.py:413 #, python-format -msgid "Compute.api::unpause %s" +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/backup/drivers/tsm.py:421 #, python-format -msgid "compute.api::suspend %s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/backup/drivers/tsm.py:432 #, python-format -msgid "compute.api::resume %s" +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/brick/exception.py:55 #, python-format -msgid "Error in migrate %s" +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 -#, python-format -msgid "Compute.api::reset_network %s" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/brick/exception.py:93 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/brick/exception.py:97 #, python-format -msgid "Compute.api::lock %s" +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/brick/exception.py:101 #, python-format -msgid "Compute.api::unlock %s" +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/brick/exception.py:105 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#: cinder/brick/initiator/connector.py:229 #, python-format msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/brick/initiator/connector.py:317 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 -#, python-format -msgid "Cannot delete aggregate: %(id)s" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "Aggregates does not have %s action" +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 -msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/initiator/connector.py:834 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "No more floating ips in pool %s." +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/iscsi/iscsi.py:184 #, python-format -msgid "Invalid status: '%s'" +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/brick/iscsi/iscsi.py:227 #, python-format -msgid "Invalid mode: '%s'" +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Invalid update setting: '%s'" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Key pair '%s' already exists." +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Unable to find address %r" +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "Network does not have %s action" +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "Disassociating network with id %s" +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" -msgstr "" +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Nicht möglich volume %s zufinden" -#: cinder/api/openstack/compute/contrib/networks.py:87 -#, python-format -msgid "Showing network with id %s" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/local_dev/lvm.py:370 #, python-format -msgid "Deleting network with id %s" +msgid "Unable to find VG: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 -#, python-format -msgid "Security group %s already exists" +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 -#, python-format -msgid "Security group %s is not a string or unicode" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Security group %s cannot be empty." +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 -#, python-format -msgid "Security group %s should not be greater than 255 characters." +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 -#, python-format -msgid "Security group (%s) not found" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "This rule already exists in group %s" +msgid "Already mounted: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 -#, python-format -msgid "Rule (%s) not found" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 -#, python-format -msgid "start instance %r" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "stop instance %r" +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "vol=%s" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Delete volume with id: %s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgid "No backup with id %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "Volume %s: wird erstellt" + +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "Delete snapshot with id: %s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "Create snapshot from volume %s" +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/auth/ldapdriver.py:650 -#, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/auth/manager.py:302 -#, python-format -msgid "Failed authorization for access key %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Using project name = user name (%s)" -msgstr "Verwende Project-Name = User-Name (%s)" +msgid "Table |%s| not created!" +msgstr "" -#: cinder/auth/manager.py:315 -#, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/auth/manager.py:324 -#, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 -#, python-format -msgid "expected_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:353 -#, python-format -msgid "host_only_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgid "Table |%s| not created" msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +msgid "Exception while dropping table %s." msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +msgid "Exception while creating table %s." msgstr "" -#: cinder/auth/manager.py:613 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "modifying project %s" +msgid "Column |%s| not created!" msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/auth/manager.py:676 -#, python-format -msgid "Deleting project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/auth/manager.py:753 -#, python-format -msgid "Access Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/auth/manager.py:755 -#, python-format -msgid "Secret Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -msgid "Instance type for vpn instances" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Launching VPN for %s" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/compute/api.py:144 +#: cinder/image/image_utils.py:101 #, python-format -msgid "Unable to find host for Instance %s" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/api.py:192 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/api.py:203 +#: cinder/image/image_utils.py:157 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:259 +#: cinder/image/image_utils.py:178 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/api.py:261 +#: cinder/image/image_utils.py:206 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/compute/api.py:312 +#: cinder/image/image_utils.py:260 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/api.py:383 -#, python-format -msgid "Going to run %s instances..." +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/compute/api.py:447 -#, python-format -msgid "bdm %s" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "block_device_mapping %s" +msgid "Not deleting key %s" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/compute/api.py:871 -msgid "Going to try to soft delete instance" +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/api.py:939 -msgid "Going to try to terminate instance" +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/api.py:977 -msgid "Going to try to stop instance" +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/api.py:996 -msgid "Going to try to start instance" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/api.py:1000 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "Searching by: %s" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "Image type not recognized %s" -msgstr "" - -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1377 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "DB error: %s" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "Instance type %s not found for deletion" +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/openstack/common/log.py:623 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/compute/manager.py:144 -#, python-format -msgid "check_instance_lock: locked: |%s|" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/compute/manager.py:146 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/manager.py:151 -#, python-format -msgid "check_instance_lock: executing: |%s|" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Setting up bdm %s" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "Instance %s not found." -msgstr "" +msgid "Running cmd (subprocess): %s" +msgstr "Führe Kommando (subprocess) aus: %s" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" -msgstr "Instanz wurde bereits erstellt" +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "Ergebnis war %s" -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +msgid "Running cmd (SSH): %s" msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:565 -#, python-format -msgid "Instance network_info: |%s|" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "%(action_str)s instance" +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/service.py:337 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "Starting %d workers" msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "terminating bdm %s" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "Rebuilding instance %s" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "Rebooting instance %s" +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "instance %s: snapshotting" +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/versionutils.py:69 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "Rotating out %d backups" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:1005 -#, python-format -msgid "Deleting image %s" +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "Failed to set admin password. Instance %s is not running" +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "Instance %s: Root password set" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/compute/manager.py:1079 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "SQL connection failed. %s attempts left." msgstr "" -#: cinder/compute/manager.py:1098 -#, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/notifier/api.py:145 #, python-format -msgid "instance %s: rescuing" -msgstr "Instanz %s: Rettung" +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "instance %s: unrescuing" +msgid "Failed to load notifier %s. These notifications will not be sent." msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "instance %s: migrating" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format -msgid "instance %s: pausing" -msgstr "Instanz %s pausiert" +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" -#: cinder/compute/manager.py:1489 -#, python-format -msgid "instance %s: unpausing" -msgstr "Instanz %s wird fortgesetzt" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "instance %s: suspending" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "instance %s: resuming" +msgid "unpacked context: %s" msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "instance %s: locking" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "instance %s: unlocking" +msgid "received %s" msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "instance %s: getting locked state" -msgstr "" +msgid "no method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "instance %s: reset network" -msgstr "" +msgid "No method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "instance %s: inject network info" +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "network_info to inject: |%s|" +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:1655 +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID ist %s" + +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "instance %s: getting vnc console" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." msgstr "" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:1714 -#, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:1752 -#, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/rpc/common.py:151 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/common.py:156 #, python-format msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." -msgstr "" - -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "Returning exception %s to caller" msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." -msgstr "" - -#: cinder/compute/manager.py:2075 msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +msgid "Deserializing: %s" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" msgstr "" -#: cinder/console/manager.py:97 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/console/vmrc_manager.py:122 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/impl_zmq.py:267 #, python-format -msgid "Removing console %(console_id)s." +msgid "Running func with context: %s" msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/console/xvp.py:116 -#, python-format -msgid "Re-wrote %s" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/console/xvp.py:141 -#, python-format -msgid "Error starting xvp: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/consoleauth/manager.py:63 -#, python-format -msgid "Deleting Expired Token: (%s)" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:198 -#, python-format -msgid "Unrecognized read_deleted value '%s'" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, python-format -msgid "No ComputeNode for %(host)s" +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 -#, python-format -msgid "No backend config with id %(sm_backend_id)s" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 -#, python-format -msgid "No sm_flavor called %(sm_flavor)s" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 -#, python-format -msgid "SQL connection failed. %s attempts left." +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 -#, python-format -msgid "Table |%s| not created!" -msgstr "" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 -#, python-format -msgid "join list for moving mac_addresses |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +msgid "topic is %s." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/image/glance.py:281 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/image/glance.py:289 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/image/glance.py:410 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/image/s3.py:309 -#, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/image/s3.py:328 -#, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/image/s3.py:379 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/scheduler/filter_scheduler.py:114 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "Filtered %s" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "Unknown chain: %r" +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/linux_net.py:694 -#, python-format -msgid "Hupping dnsmasq threw %s" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/linux_net.py:696 -#, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "killing radvd threw %s" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "Pid %d is stale, relaunching radvd" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Starting VLAN inteface %s" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Starting Bridge interface for %s" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/network/linux_net.py:1142 -#, python-format -msgid "Starting bridge %s " +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/network/linux_net.py:1149 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format -msgid "Done starting bridge %s" +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/network/linux_net.py:1167 -#, python-format -msgid "Failed unplugging gateway interface '%s'" +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/network/linux_net.py:1170 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Unplugged gateway interface '%s'" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/network/manager.py:291 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "Interface %(interface)s not found" +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/network/manager.py:315 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "floating IP allocation for instance |%s|" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/network/manager.py:353 -#, python-format -msgid "floating IP deallocation for instance |%s|" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/network/manager.py:402 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "Faked command matched %s" msgstr "" -#: cinder/network/manager.py:614 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/manager.py:660 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/manager.py:778 +#: cinder/tests/test_netapp_nfs.py:360 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/manager.py:896 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "network allocations for instance |%s|" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/manager.py:901 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "network deallocation for instance |%s|" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Given data: %s" msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Leased IP |%(address)s|" +msgid "Result data: %s" msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "IP %s leased that is not associated" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/manager.py:1256 -#, python-format -msgid "IP |%s| leased that isn't allocated" +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Volume %s: wird entfernt" + +#: cinder/tests/integrated/api/client.py:32 #, python-format -msgid "Released IP |%(address)s|" +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/manager.py:1265 -#, python-format -msgid "IP %s released that is not associated" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/network/manager.py:1268 -#, python-format -msgid "IP %s released that was not leased" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +msgid "Body: %s" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +msgid "Decoding JSON: %s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/network/quantum/client.py:180 +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/transfer/api.py:136 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format -msgid "Quantum entity not found: %s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/transfer/api.py:182 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/transfer/api.py:199 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "Volume %s has been transferred." msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/volume/api.py:143 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/network/quantum/manager.py:204 -#, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/network/quantum/manager.py:301 -#, python-format -msgid "network allocations for instance %s" +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" msgstr "" -#: cinder/network/quantum/manager.py:588 -#, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/network/quantum/manager.py:606 -#, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 +#: cinder/volume/api.py:490 #, python-format -msgid "Server returned error: %s" -msgstr "" - -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/api.py:502 #, python-format msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 -#, python-format -msgid "No network with net_id = %s" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 -#, python-format -msgid "No fixed IPs to deallocate for vif %s" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 -#, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 -#, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/api.py:757 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/notifier/list_notifier.py:65 -#, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/volume/api.py:797 #, python-format -msgid "Returning exception %s to caller" +msgid "No available service named %s" msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/rpc/amqp.py:231 -#, python-format -msgid "received %s" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." msgstr "" -#: cinder/rpc/amqp.py:236 -#, python-format -msgid "no method for message: %s" -msgstr "keine Methode für diese Nachricht gefunden: %s" +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" -#: cinder/rpc/amqp.py:237 +#: cinder/volume/api.py:842 #, python-format -msgid "No method for message: %s" -msgstr "keine Methode für diese Nachricht gefunden: %s" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" -#: cinder/rpc/amqp.py:321 -#, python-format -msgid "Making asynchronous call on %s ..." +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/volume/api.py:862 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID ist %s" +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/api.py:868 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/rpc/amqp.py:379 +#: cinder/volume/api.py:887 #, python-format -msgid "Sending notification on %s..." +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/api.py:900 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/driver.py:282 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/driver.py:327 #, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/driver.py:340 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/driver.py:358 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/volume/driver.py:394 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/driver.py:433 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/driver.py:451 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/rpc/impl_qpid.py:346 -#, python-format -msgid "Connected to AMQP server on %s" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" -msgstr "" +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Nicht möglich volume %s zufinden" -#: cinder/scheduler/driver.py:63 +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/scheduler/driver.py:80 -#, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/scheduler/driver.py:89 -#, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/manager.py:203 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/manager.py:264 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/manager.py:271 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgid "volume %s: skipping export" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/manager.py:273 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 -#, python-format -msgid "No host selection for %s defined." +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/manager.py:286 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "Filtered %(hosts)s" +msgid "volume %s: deleting" +msgstr "Volume %s: wird entfernt" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 +#: cinder/volume/manager.py:389 #, python-format -msgid "Weighted %(weighted_host)s" -msgstr "" +msgid "volume %s: removing export" +msgstr "Volume %s: entferne Export" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/manager.py:394 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/scheduler/host_manager.py:151 -#, python-format -msgid "Host filter fails for non-forced host %(host)s" +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/manager.py:427 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/manager.py:430 #, python-format -msgid "Host filter passes for %(host)s" +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/manager.py:434 #, python-format -msgid "Received %(service_name)s service update from %(host)s." -msgstr "" +msgid "volume %s: deleted successfully" +msgstr "Volume %s: erfolgreich entfernt" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/manager.py:462 #, python-format -msgid "No service for compute ID %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/manager.py:490 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/manager.py:496 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "snapshot %s: deleting" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/manager.py:526 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/manager.py:559 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/tests/fake_utils.py:72 -#, python-format -msgid "Faking execution of cmd (subprocess): %s" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/manager.py:698 #, python-format -msgid "Faked command matched %s" +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:760 #, python-format -msgid "Faked command raised an exception %s" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:807 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:880 #, python-format -msgid "Running instances: %s" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/manager.py:909 #, python-format -msgid "After terminating instances: %s" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:940 #, python-format -msgid "After force-killing instances: %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:976 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Target %s allocated" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/tests/test_volume_types.py:58 -#, python-format -msgid "Given data: %s" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/tests/test_volume_types.py:59 -#, python-format -msgid "Result data: %s" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "DB error: %s" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "Quota exceeded: code=%(code)s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "_create: %s" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "_delete: %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "_get: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "_get_all: %s" +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/utils.py:144 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "flavor: %s" +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/tests/integrated/api/client.py:107 -#, python-format -msgid "Body: %s" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/tests/integrated/api/client.py:151 -#, python-format -msgid "%(relative_uri)s => code %(http_status)s" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/tests/integrated/api/client.py:168 -#, python-format -msgid "Decoding JSON: %s" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Nested received %(queue)s, %(value)s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Nested return %s" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "Received %s" +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" -msgstr "Konnte Verbindung zum Hypervisor nicht öffnen" - -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "Compute_service record created for %s " +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Compute_service record updated for %s " +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Filters added to instance %s" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "Adding security group rule: %r" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Adding provider rule: %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/eqlx.py:348 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Got exception: %s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "No such domain (%s)" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/dom.py:134 -#, python-format -msgid "Failed power down Bare-metal node %s" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "No such domain %s" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Domains: %s" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Nodes: %s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "After storing domains: %s" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Created new domain: %s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:226 -#, python-format -msgid "change_domain_state: to new state %s" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/glusterfs.py:608 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "No base file found for %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "instance %s: rebooted" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "instance %s: rescued" +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "instance %s: is building" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "instance %s: booted" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "instance %s spawned successfully" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "instance %s:not booted" +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "instance %s: Creating image" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "file already exists at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "instance %s: starting toXML method" +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "instance %s: finished toXML method" +msgid "Available shares: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "#### RLK: cpu_arch = %s " -msgstr "" - -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:216 -#, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "%s is not a directory." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "unknown disk image handler: %s" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Failed to remove container: %s" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "User %(username)s not found in password file." +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "unsupported partition: %s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "" - -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +msgid "%s" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "Failed to map partitions: %s" +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "qemu-nbd error: %s" +msgid " but size is now %d" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/nfs.py:361 #, python-format -msgid "nbd device %s did not show up" +msgid "%s is already mounted" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "Connecting to libvirt: %s" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Deleting instance files %(target)s" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -#, fuzzy, python-format -msgid "Instance soft rebooted successfully." -msgstr "Volume %s: erfolgreich erstellt" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Automatically confirming migration %d" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -msgid "Guest does not have a console available" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "block_device_list %s" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +msgid "deleting rbd volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/libvirt/connection.py:1942 -#, python-format -msgid "'' must be 1, but %d\n" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "connection data: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 -#, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 -#, python-format -msgid "Timeout migrating for %s. nwfilter not found." +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/rbd.py:724 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "not cloneable: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 -#, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 -#, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "%s is a valid instance name" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "%s has a disk file" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Base file too young to remove: %s" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Removing base file: %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "API response: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 -#, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "Unknown base file: %s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Active base files: %s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 -#, python-format -msgid "Corrupt base files: %s" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Removable base files: %s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/vif.py:99 -#, python-format -msgid "Ensuring bridge %s" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 -#, python-format -msgid "Failed while unplugging vif of instance '%s'" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/libvirt/volume.py:166 -#, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/libvirt/volume.py:178 -#, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 -#, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 -#, python-format -msgid "%(text)s: _db_content => %(content)s" +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 -#, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Glance image %s is in killed state" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/zadara.py:472 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 -#, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, python-format -msgid "httplib error in %s: " +msgid "Create Volume: %(volume)s Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Socket error in %s: " +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format -msgid "Type error in %s: " +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "Exception in %s " +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Got total of %s instances" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 -#, python-format -msgid "Created VM with the name %s on the ESX host" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "Powered on the VM instance %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 -#, python-format -msgid "Created Snapshot of the VM instance %s " +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Uploading image %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Uploaded image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Deleted temporary vmdk file %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format -msgid "Did hard reboot of VM %s" +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 -#, python-format -msgid "instance - %s not present" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Powering off the VM %s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Powered off the VM %s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Unregistering the VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Unregistered the VM %s" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:602 -#, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "Suspending the VM %s " +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Suspended the VM %s " +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Resuming the VM %s" +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Resumed the VM %s " +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format -msgid "Creating directory with path %s" +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format -msgid "Created directory with path %s" +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format -msgid "Downloading image %s from glance image server" +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Create export: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." msgstr "" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Calling %(localname)s %(impl)s" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "Calling getter %s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "Volume %s: erfolgreich entfernt" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "Found no network for bridge %s" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:146 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Unable to join %(host)s in the pool" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:162 -#, python-format -msgid "Pool-eject failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/pool.py:174 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "Unable to set up pool: %(e)s." +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:185 -#, python-format -msgid "Pool-set_name_label failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/vif.py:103 -#, python-format -msgid "Found no PIF for device %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 -#, python-format -msgid "VBD not found in instance %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "VBD %s already detached" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "Snapshotting with label '%(label)s'" +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "Created snapshot %(template_vm_ref)s" +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format -msgid "download_vhd failed: %r" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 -#, python-format -msgid "Copying VDI %s to /boot/guest on dom0" +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Add target WWN: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "VDI %s is still available" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Found iSCSI endpoint: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "Re-scanning SR %s" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/hds/hds.py:178 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/hds/hds.py:372 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/hds/hds.py:395 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "Plugging VBD %s ... " +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Plugging VBD %s done." +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "Destroying VBD for VDI %s ... " +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "Destroying VBD for VDI %s done." +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Running pygrub against %s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Found Xen kernel %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/rest_common.py:59 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "Writing partition table %s done." +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Updating progress to %(progress)d" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:354 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -msgid "Starting instance" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/rest_common.py:874 #, python-format -msgid "Injecting file path: '%s'" +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/rest_common.py:1101 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "Instance agent version: %s" +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "Updating Agent to %s" +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/rest_common.py:1256 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:686 -#, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "Nicht möglich Volumen zur Instanze %s hinzuzufügen" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:421 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/huawei/ssh_common.py:436 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/huawei/ssh_common.py:466 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/ssh_common.py:865 #, python-format msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/huawei/ssh_common.py:873 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/ssh_common.py:933 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/ssh_common.py:1079 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format -msgid "Creating VIF for network %(network_ref)s" +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "OpenSSL error: %s" +msgid "%s is not set" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "type is = %s" +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "name = %s" +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 #, python-format -msgid "Forgetting SR %s..." +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 #, python-format -msgid "Introducing %s..." +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "Error finding vdis in SR %s" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "Could not find key in output of command %(cmd)s: %(out)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "Creating SR %s" +msgid "Failed to find host %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Introducing SR %s" +msgid "enter: create_host: host %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Checking for SR %s" +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "SR %s not found in the xapi database" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 #, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "Nicht möglich Volumen zur Instanze %s hinzuzufügen" +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -msgstr "Einhängepunkt%(mountpoint)s zur Instanze %(instance_name)s hinzugefügt" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 -#, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 #, python-format -msgid "Unable to locate volume %s" -msgstr "Nicht möglich volume %s zufinden" +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 #, python-format -msgid "Unable to detach volume %s" +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Unable to destroy vbd %s" +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 #, python-format -msgid "Error purging SR %s" +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 #, python-format -msgid "Error in handshake: %s" +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 #, python-format -msgid "Invalid request: %s" +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 #, python-format -msgid "Request: %s" +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 #, python-format -msgid "Request made with missing token: %s" +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "Request made with invalid token: %s" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "Unexpected error: %s" +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "enter: delete_vdisk: vdisk %s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "leave: delete_vdisk: vdisk %s" msgstr "" -#: cinder/volume/api.py:137 -msgid "Volume status must be available or error" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" msgstr "" -#: cinder/volume/api.py:325 -msgid "Volume Snapshot status must be available or error" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" +"%(description)s\n" +"Befehl: %(cmd)s\n" +"Exit-Code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 #, python-format -msgid "volume group %s doesn't exist" +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." msgstr "" -#: cinder/volume/driver.py:384 -#, python-format -msgid "Could not find iSCSI export for volume %s" +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/netapp/common.py:103 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/netapp/common.py:109 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "Storage family %s is not supported" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/netapp/common.py:116 #, python-format -msgid "FAKE ISCSI: %s" +msgid "No default storage protocol found for storage family %(storage_family)s" msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/netapp/common.py:123 #, python-format -msgid "rbd has no pool %s" +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/netapp/common.py:130 #, python-format -msgid "Sheepdog is not working: %s" +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/netapp/common.py:158 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/netapp/iscsi.py:69 #, python-format -msgid "Re-exporting %s volumes" +msgid "No metadata property %(prop)s defined for the LUN %(name)s" msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/netapp/iscsi.py:105 #, python-format -msgid "volume %s: skipping export" +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/netapp/iscsi.py:166 #, python-format -msgid "volume %s: creating" -msgstr "Volume %s: wird erstellt" +msgid "Created LUN with name %s" +msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/netapp/iscsi.py:175 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/netapp/iscsi.py:191 #, python-format -msgid "volume %s: creating export" -msgstr "Volume %s: erstelle Export" +msgid "Destroyed LUN %s" +msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/netapp/iscsi.py:227 #, python-format -msgid "volume %s: created successfully" -msgstr "Volume %s: erfolgreich erstellt" +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/netapp/iscsi.py:249 #, python-format -msgid "volume %s: removing export" -msgstr "Volume %s: entferne Export" +msgid "Failed to get target portal for the LUN %s" +msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/netapp/iscsi.py:252 #, python-format -msgid "volume %s: deleting" -msgstr "Volume %s: wird entfernt" +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "Volume %s: erfolgreich entfernt" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 #, python-format -msgid "volume %s: volume is busy" +msgid "Resizing %s failed. Cleaning volume." msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/netapp/iscsi.py:325 #, python-format -msgid "volume %s: deleted successfully" -msgstr "Volume %s: erfolgreich entfernt" +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/netapp/iscsi.py:412 #, python-format -msgid "snapshot %s: creating" +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/netapp/iscsi.py:431 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 #, python-format -msgid "snapshot %s: created successfully" +msgid "Message: %s" msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/netapp/iscsi.py:545 #, python-format -msgid "snapshot %s: deleting" +msgid "Error getting lun attribute. Exception: %s" msgstr "" -#: cinder/volume/manager.py:214 +#: cinder/volume/drivers/netapp/iscsi.py:600 #, python-format -msgid "snapshot %s: snapshot is busy" +msgid "No need to extend volume %s as it is already the requested new size." msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/netapp/iscsi.py:606 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "Resizing lun %s directly to new size." msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/netapp/iscsi.py:662 #, python-format -msgid "New capabilities found: %s" +msgid "Moving lun %(name)s to %(new_name)s." msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/netapp/iscsi.py:684 #, python-format -msgid "Notification {%s} received" +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/netapp/iscsi.py:690 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "%s cannot be sub clone resized as it contains no blocks." msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/netapp/iscsi.py:707 #, python-format -msgid "%s is not set" +msgid "Post clone resize lun %s" msgstr "" -#: cinder/volume/netapp.py:128 -msgid "Connected to DFM server" +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/netapp/iscsi.py:723 #, python-format -msgid "Job failed: %s" +msgid "Failure moving new cloned lun to %s." msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 -#, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "Nicht möglich volume %s zufinden" +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, fuzzy, python-format -msgid "No LUN ID for volume %s" -msgstr "Nicht möglich volume %s zufinden" +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "Volume %s: erstelle Export" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "Volume %s: erstelle Export" + +#~ msgid "volume %s: creating from image" +#~ msgstr "Volume %s: wird erstellt" + +#~ msgid "volume %s: creating" +#~ msgstr "Volume %s: wird erstellt" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "Volume %s: erstelle Export" + +#~ msgid "volume %s: create failed" +#~ msgstr "Volume %s: wird erstellt" + +#~ msgid "volume %s: created successfully" +#~ msgstr "Volume %s: erfolgreich erstellt" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "Hole %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "Nicht möglich volume %s zufinden" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "Nicht möglich volume %s zufinden" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "Nicht möglich volume %s zufinden" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "Nicht möglich volume %s zufinden" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "" +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:614 -#, python-format -msgid "Failed to get host details for host ID %s" -msgstr "" +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:620 -#, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "" +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:625 -#, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "" +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#~ msgid "Connection to glance failed" +#~ msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Invalid snapshot" +#~ msgstr "" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Invalid input received" +#~ msgstr "" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid volume type" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, python-format -msgid "Got response: %s" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "The request is invalid." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "Reconnected to queue" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "response %s" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "topic is %s" -#~ msgstr "Betreff ist %s" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" -#~ msgid "message %s" -#~ msgstr "Nachricht %s" +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "Publishing to route %s" +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "Declaring queue %s" +#~ msgid "zfs send/recv done, new volume %s created" #~ msgstr "" -#~ msgid "Declaring exchange %s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgid "rbd export-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "Created VM %s..." +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Can't find lun or lun goup in array" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " +#~ msgid "migrate_volume: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "_wait_child %d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "wait wrap.failed %s" #~ msgstr "" #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "volume %s mapping to multi host" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" #~ msgstr "" -#~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Volume status must be available" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/en_AU/LC_MESSAGES/cinder.po b/cinder/locale/en_AU/LC_MESSAGES/cinder.po index e23929f197..8f45a5b14a 100644 --- a/cinder/locale/en_AU/LC_MESSAGES/cinder.po +++ b/cinder/locale/en_AU/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2011-10-21 11:27+0000\n" "Last-Translator: Tom Fifield \n" "Language-Team: English (Australia) \n" @@ -15,8195 +15,10746 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "Filename of root CA" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Filename of private key" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" -msgstr "" - -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "Where we keep our keys" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "Where we keep our root CA" - -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "Should we use a CA for each project?" - -#: cinder/crypto.py:67 -#, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" -msgstr "Subject for certificate for users, %s for project, user, timestamp" - -#: cinder/crypto.py:72 -#, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" -msgstr "Subject for certificate for projects, %s for project, timestamp" - -#: cinder/crypto.py:292 -#, python-format -msgid "Flags path: %s" -msgstr "Flags path: %s" - -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Unexpected error while running command." - -#: cinder/exception.py:59 -#, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" -msgstr "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" - -#: cinder/exception.py:94 -msgid "DB exception wrapped." -msgstr "" - -#: cinder/exception.py:155 +#: cinder/exception.py:66 cinder/brick/exception.py:33 msgid "An unknown exception occurred." msgstr "" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" -msgstr "" - -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:195 -msgid "Connection to glance failed" +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" msgstr "" -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "" -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, fuzzy, python-format msgid "Not authorized for image %(image_id)s." -msgstr "no method for message: %s" +msgstr "Not authorized for image %(image_id)s." + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" -#: cinder/exception.py:220 +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "" -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:154 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:237 +#: cinder/exception.py:163 msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:167 +msgid "The results are invalid." msgstr "" -#: cinder/exception.py:245 +#: cinder/exception.py:171 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:253 +#: cinder/exception.py:179 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Invalid volume: %(reason)s" msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "%(err)s" msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:197 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:201 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:277 -#, python-format -msgid "Invalid cidr %(cidr)s." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:218 #, python-format -msgid "%(err)s" +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:228 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Volume %(volume_id)s could not be found." msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:232 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:237 #, python-format msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:242 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:246 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:250 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" -#: cinder/exception.py:334 -#, fuzzy -msgid "Failed to terminate instance" -msgstr "Going to start terminating instances" - -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:282 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:303 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:377 +#: cinder/exception.py:307 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:381 +#: cinder/exception.py:311 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:389 +#: cinder/exception.py:319 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:323 #, python-format msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:332 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:336 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:340 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:344 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:348 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:422 -msgid "Resource could not be found." +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:427 +#: cinder/exception.py:356 #, python-format -msgid "Required flag %(flag)s not set." +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:365 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "Volume Type %(id)s already exists." + +#: cinder/exception.py:369 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:373 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Malformed message body: %(reason)s" msgstr "" -#: cinder/exception.py:440 +#: cinder/exception.py:377 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Could not find config at %(path)s" msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "Could not find parameter %(param)s" + +#: cinder/exception.py:385 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:398 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:457 +#: cinder/exception.py:402 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:409 #, python-format msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:415 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:419 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:475 +#: cinder/exception.py:423 #, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:427 #, python-format -msgid "No target id found for volume %(volume_id)s." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:484 +#: cinder/exception.py:432 #, python-format -msgid "No disk at %(location)s" +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:436 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:440 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" -#: cinder/exception.py:496 +#: cinder/exception.py:444 +#, python-format msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" -#: cinder/exception.py:501 +#: cinder/exception.py:449 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" -#: cinder/exception.py:505 +#: cinder/exception.py:453 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Failed to export for volume: %(reason)s" msgstr "" -#: cinder/exception.py:509 +#: cinder/exception.py:457 #, python-format -msgid "User %(user_id)s could not be found." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:513 +#: cinder/exception.py:461 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:517 +#: cinder/exception.py:465 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:521 -#, python-format -msgid "Role %(role_id)s could not be found." -msgstr "" +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "Failed to copy image to volume: %(reason)s" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:529 -#, python-format -msgid "%(req)s is required to create a network." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:533 -#, python-format -msgid "Network %(network_id)s could not be found." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:485 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:541 -#, python-format -msgid "Network could not be found for uuid %(uuid)s" +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:545 -#, python-format -msgid "Network could not be found with cidr %(cidr)s." -msgstr "" +#: cinder/exception.py:493 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Invalid backup: %(reason)s" -#: cinder/exception.py:549 +#: cinder/exception.py:497 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "Connection to swift failed: %(reason)s" msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." msgstr "" -#: cinder/exception.py:557 +#: cinder/exception.py:505 #, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" -#: cinder/exception.py:562 +#: cinder/exception.py:509 #, python-format -msgid "Host is not set to the network (%(network_id)s)." +msgid "SSH command injection detected: %(command)s" msgstr "" -#: cinder/exception.py:566 +#: cinder/exception.py:513 #, python-format -msgid "Network %(network)s has active ports, cannot delete." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:522 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:527 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:531 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:536 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:541 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:546 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:599 +#: cinder/exception.py:550 #, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:554 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:608 -#, python-format -msgid "Fixed IP address %(address)s is invalid." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:620 -#, python-format -msgid "Floating ip not found for id %(id)s." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:576 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:580 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:584 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." -msgstr "" +#: cinder/exception.py:593 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Unable to create server object for initiator %(name)s" -#: cinder/exception.py:644 +#: cinder/exception.py:597 #, python-format -msgid "Floating ip %(address)s is not associated." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:652 +#: cinder/exception.py:605 #, python-format -msgid "Interface %(interface)s not found." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:656 +#: cinder/exception.py:609 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:613 #, python-format -msgid "Certificate %(certificate_id)s not found." +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:668 -#, python-format -msgid "Host %(host)s could not be found." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:672 +#: cinder/exception.py:626 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:676 +#: cinder/exception.py:630 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:680 +#: cinder/exception.py:636 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:684 -#, python-format -msgid "Access Key %(access_key)s could not be found." +#: cinder/exception.py:641 +msgid "Unknown NFS exception" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:692 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Quota for project %(project_id)s could not be found." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:696 -#, python-format -msgid "Quota class %(class_name)s could not be found." +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" msgstr "" -#: cinder/exception.py:700 -#, python-format -msgid "Security group %(security_group_id)s not found." +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:704 -#, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:709 -#, python-format -msgid "Security group with rule %(rule_id)s not found." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:713 -#, python-format -msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:718 +#: cinder/quota.py:105 #, python-format msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:723 +#: cinder/quota.py:748 #, python-format -msgid "Migration %(migration_id)s could not be found." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:727 +#: cinder/quota.py:770 #, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:732 +#: cinder/quota.py:790 #, python-format -msgid "Console pool %(pool_id)s could not be found." +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:736 +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:742 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "Console %(console_id)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:746 +#: cinder/service.py:95 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Starting %(topic)s node (version %(version_string)s)" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:750 +#: cinder/service.py:148 #, python-format msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:755 +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "Service killed that has no database entry" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "The service database object disappeared, Recreating it." + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "Recovered model server connection!" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "model server went away" + +#: cinder/service.py:298 #, python-format -msgid "Invalid console type %(console_type)s " +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:763 -#, python-format -msgid "Instance type %(instance_type_id)s could not be found." +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" msgstr "" -#: cinder/exception.py:767 +#: cinder/service.py:387 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:772 +#: cinder/utils.py:96 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:776 +#: cinder/utils.py:127 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:780 -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:784 +#: cinder/utils.py:228 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Error connecting via ssh: %s" + +#: cinder/utils.py:412 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." -msgstr "" +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" -#: cinder/exception.py:789 +#: cinder/utils.py:423 #, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" -msgstr "" +msgid "backend %s" +msgstr "backend %s" -#: cinder/exception.py:793 +#: cinder/utils.py:698 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:798 +#: cinder/utils.py:759 #, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Unable to find cert_file : %s" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Unable to find ca_file : %s" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Unable to find key_file : %s" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:807 +#: cinder/wsgi.py:169 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:811 +#: cinder/wsgi.py:206 #, python-format -msgid "LDAP group %(group_id)s could not be found." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:815 -#, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:819 -#, python-format -msgid "File %(file_path)s could not be found." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "You must implement __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:827 -#, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:832 -#, python-format -msgid "Network adapter %(adapter)s could not be found." +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:844 +#: cinder/api/common.py:162 #, python-format -msgid "Unable to use global role %(role_id)s" +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:861 +#: cinder/api/extensions.py:197 #, python-format -msgid "Key pair %(key_name)s already exists." +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:865 +#: cinder/api/extensions.py:235 #, python-format -msgid "User %(user)s already exists." +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:869 +#: cinder/api/extensions.py:236 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:873 +#: cinder/api/extensions.py:237 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:877 +#: cinder/api/extensions.py:239 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" -msgstr "User %(uid)s is already a member of the group %(group_dn)s" +msgid "Ext namespace: %s" +msgstr "" -#: cinder/exception.py:882 +#: cinder/api/extensions.py:240 #, python-format -msgid "Project %(project)s already exists." +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:886 +#: cinder/api/extensions.py:242 #, python-format -msgid "Instance %(name)s already exists." +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:890 +#: cinder/api/extensions.py:256 #, python-format -msgid "Instance Type %(name)s already exists." +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:894 +#: cinder/api/extensions.py:262 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:898 +#: cinder/api/extensions.py:276 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:906 +#: cinder/api/extensions.py:287 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:910 +#: cinder/api/extensions.py:356 #, python-format -msgid "Could not find config at %(path)s" +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/extensions.py:381 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:938 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "delete called for member %s" + +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "Delete backup with id: %s" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/contrib/backups.py:185 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Creating new backup %s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/exception.py:958 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/exception.py:963 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/exception.py:967 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/exception.py:971 -#, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/exception.py:975 -#, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/exception.py:980 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "Host '%s' could not be found." msgstr "" -#: cinder/exception.py:984 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Invalid status: '%s'" msgstr "" -#: cinder/exception.py:988 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/exception.py:992 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/exception.py:1005 -#, python-format -msgid "Error in SolidFire API response: status=%(status)s" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/exception.py:1009 -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/exception.py:1013 -#, python-format -msgid "Detected existing vlan with id %(vlan)d" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/exception.py:1017 -#, python-format -msgid "Instance %(instance_id)s could not be found." +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/exception.py:1025 -#, fuzzy, python-format -msgid "Could not fetch image %(image)s" -msgstr "Could not attach image to loopback: %s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" -#: cinder/log.py:315 -#, python-format -msgid "syslog facility must be one of: %s" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/manager.py:146 -#, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/service.py:177 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" -msgstr "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" -#: cinder/service.py:195 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" -msgstr "Service killed that has no database entry" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." -msgstr "The service database object disappeared, Recreating it." +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" -msgstr "Recovered model server connection!" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" -#: cinder/service.py:340 -msgid "model server went away" -msgstr "model server went away" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/service.py:440 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" -msgstr "Inner Exception: %s" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" -msgstr "Fetching %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Running cmd (subprocess): %s" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" -msgstr "Result was %s" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" -msgstr "Running cmd (SSH): %s" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" -msgstr "debug in callback: %s" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" -#: cinder/utils.py:534 -#, python-format -msgid "Link Local address is not found.:%s" -msgstr "Link Local address is not found.:%s" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" -#: cinder/utils.py:537 -#, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" -msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, fuzzy, python-format +msgid "Creating new volume transfer %s" +msgstr "Creating new volume transfer %s" + +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "Creating transfer of volume %s" -#: cinder/utils.py:648 +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "Invalid backend: %s" -msgstr "Invalid backend: %s" +msgid "Accepting volume transfer %s" +msgstr "" -#: cinder/utils.py:659 +#: cinder/api/contrib/volume_transfer.py:196 +#, fuzzy, python-format +msgid "Accepting transfer %s" +msgstr "Accepting transfer %s" + +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "backend %s" -msgstr "backend %s" +msgid "Delete transfer with id: %s" +msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/utils.py:942 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgid "Valid control location are: %s" msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/utils.py:1138 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "Expected object of type: %s" -msgstr "" +msgid "Caught error: %s" +msgstr "Caught error: %s" -#: cinder/utils.py:1169 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Invalid server_string: %s" +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/utils.py:1298 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/utils.py:1463 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "Class %(fullname)s is deprecated" +msgid "Extended resource: %s" msgstr "" -#: cinder/utils.py:1495 +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/utils.py:1497 +#: cinder/api/openstack/__init__.py:110 #, python-format -msgid "Function %(name)s in %(location)s is deprecated" +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/wsgi.py:97 -#, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/wsgi.py:117 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgid "Exception handling resource: %s" msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" -msgstr "You must implement __call__" +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" -msgstr "not available" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" -#: cinder/api/direct.py:299 -#, python-format -msgid "Returned non-serializeable type: %s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/validator.py:142 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/__init__.py:95 -#, python-format -msgid "FaultWrapper: %s" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." -msgstr "Too many failed authentications." +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" -#: cinder/api/ec2/__init__.py:180 +#: cinder/api/openstack/wsgi.py:987 #, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +#, fuzzy +msgid "snapshot does not exist" +msgstr "snapshot does not exist" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Authentication Failure: %s" -msgstr "Authentication Failure: %s" +msgid "Delete snapshot with id: %s" +msgstr "" -#: cinder/api/ec2/__init__.py:404 -#, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" -msgstr "Authenticated Request For %(uname)s:%(pname)s)" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" -#: cinder/api/ec2/__init__.py:435 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "action: %s" -msgstr "action: %s" +msgid "Create snapshot from volume %s" +msgstr "" -#: cinder/api/ec2/__init__.py:437 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "arg: %(key)s\t\tval: %(value)s" -msgstr "arg: %(key)s\t\tval: %(value)s" +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +#, fuzzy +msgid "volume does not exist" +msgstr "volume does not exist" -#: cinder/api/ec2/__init__.py:512 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" -msgstr "Unauthorised request for controller=%(controller)s and action=%(action)s" +msgid "vol=%s" +msgstr "" -#: cinder/api/ec2/__init__.py:584 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "InstanceNotFound raised: %s" -msgstr "InstanceNotFound raised: %s" +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" -#: cinder/api/ec2/__init__.py:590 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "VolumeNotFound raised: %s" -msgstr "VolumeNotFound raised: %s" +msgid "snapshot id:%s not found" +msgstr "" -#: cinder/api/ec2/__init__.py:596 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "SnapshotNotFound raised: %s" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:602 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "NotFound raised: %s" -msgstr "NotFound raised: %s" +msgid "Create volume of %s GB" +msgstr "Create volume of %s GB" -#: cinder/api/ec2/__init__.py:605 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "EC2APIError raised: %s" +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/__init__.py:613 -#, python-format -msgid "KeyPairExists raised: %s" +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/__init__.py:617 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "InvalidParameterValue raised: %s" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/__init__.py:621 -#, python-format -msgid "InvalidPortRange raised: %s" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" msgstr "" -#: cinder/api/ec2/__init__.py:625 -#, python-format -msgid "NotAuthorized raised: %s" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/ec2/__init__.py:633 -#, fuzzy, python-format -msgid "QuotaError raised: %s" -msgstr "Unexpected error raised: %s" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" -#: cinder/api/ec2/__init__.py:637 -#, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/__init__.py:646 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Unexpected error raised: %s" -msgstr "Unexpected error raised: %s" +msgid "source volume id:%s not found" +msgstr "" -#: cinder/api/ec2/__init__.py:647 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "Environment: %s" +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." -msgstr "An unknown error has occurred. Please try your request again." +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Backup status must be available or error" -#: cinder/api/ec2/apirequest.py:64 -#, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" -msgstr "Unsupported API request: controller = %(controller)s, action = %(action)s" +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Volume to be backed up must be available" + +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "Backup status must be available" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" -#: cinder/api/ec2/cloud.py:336 +#: cinder/backup/api.py:154 #, python-format -msgid "Create snapshot of volume %s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:372 +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Volume to be restored to must be available" + +#: cinder/backup/api.py:176 #, python-format msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:378 +#: cinder/backup/api.py:181 #, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:382 +#: cinder/backup/manager.py:100 #, python-format -msgid "Create key pair %s" -msgstr "Create key pair %s" +msgid "Checking hostname '%s' for backend info." +msgstr "" -#: cinder/api/ec2/cloud.py:391 +#: cinder/backup/manager.py:107 #, python-format -msgid "Import key %s" +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/backup/manager.py:117 #, python-format -msgid "Delete key pair %s" -msgstr "Delete key pair %s" +msgid "Manager requested for volume_backend '%s'." +msgstr "" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/backup/manager.py:129 #, python-format -msgid "Revoke security group ingress %s" -msgstr "Revoke security group ingress %s" +msgid "Driver requested for volume_backend '%s'." +msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, fuzzy, python-format -msgid "%s Not enough parameters to build a valid rule" -msgstr "Not enough parameters to build a valid rule." - -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." -msgstr "No rule for the specified parameters." - -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 -#, python-format -msgid "Authorize security group ingress %s" -msgstr "Authorise security group ingress %s" - -#: cinder/api/ec2/cloud.py:725 -#, fuzzy, python-format -msgid "%s - This rule already exists in group" -msgstr "This rule already exists in group %s" - -#: cinder/api/ec2/cloud.py:769 +#: cinder/backup/manager.py:147 #, python-format msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/backup/manager.py:154 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +msgid "Registering default backend %s." msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#: cinder/backup/manager.py:158 #, python-format -msgid "Create Security Group %s" -msgstr "Create Security Group %s" +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/backup/manager.py:165 #, python-format -msgid "group %s already exists" -msgstr "group %s already exists" +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 -#, python-format -msgid "Delete security group %s" -msgstr "Delete security group %s" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#: cinder/backup/manager.py:189 #, python-format -msgid "Get console output for instance %s" -msgstr "Get console output for instance %s" +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" -#: cinder/api/ec2/cloud.py:894 +#: cinder/backup/manager.py:194 #, python-format -msgid "Create volume from snapshot %s" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/backup/manager.py:206 #, python-format -msgid "Create volume of %s GB" -msgstr "Create volume of %s GB" - -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/backup/manager.py:212 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" - -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/backup/manager.py:217 #, python-format -msgid "Detach volume %s" -msgstr "Detach volume %s" - -#: cinder/api/ec2/cloud.py:959 -#, fuzzy, python-format -msgid "Detach Volume Failed." -msgstr "Detach volume %s" +msgid "Resuming delete on backup: %s." +msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/backup/manager.py:225 #, python-format -msgid "attribute not supported: %s" -msgstr "attribute not supported: %s" +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/backup/manager.py:237 #, python-format -msgid "vol = %s\n" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" -msgstr "Allocate address" - -#: cinder/api/ec2/cloud.py:1267 +#: cinder/backup/manager.py:249 #, python-format -msgid "Release address %s" -msgstr "Release address %s" +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/manager.py:282 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" -msgstr "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "Create backup finished. backup: %s." +msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/manager.py:286 #, python-format -msgid "Disassociate address %s" -msgstr "Disassociate address %s" - -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" -msgstr "Going to start terminating instances" - -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/manager.py:299 #, python-format -msgid "Reboot instance %r" -msgstr "Reboot instance %r" - -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/manager.py:329 #, python-format -msgid "De-registering image %s" -msgstr "De-registering image %s" - -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/manager.py:360 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" -msgstr "Registered image %(image_location)s with id %(image_id)s" - -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" -msgstr "user or group not specified" - -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" -msgstr "only group \"all\" is supported" - -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" -msgstr "operation_type must be add or remove" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/manager.py:379 #, python-format -msgid "Updating image %s publicity" -msgstr "Updating image %s publicity" +msgid "Delete backup started, backup: %s." +msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/backup/manager.py:386 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/manager.py:399 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/backup/manager.py:422 #, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "Failed to get metadata for ip: %s" +msgid "Delete backup finished, backup %s deleted." +msgstr "" -#: cinder/api/openstack/__init__.py:43 -#, python-format -msgid "Caught error: %s" -msgstr "Caught error: %s" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/drivers/ceph.py:147 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "invalid user '%s'" msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "Extended resource: %s" +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/drivers/ceph.py:250 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "Could not find %s in request." +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 #, python-format -msgid "Successfully authenticated '%s'" +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/drivers/ceph.py:361 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/common.py:203 -#, python-format -msgid "marker [%s] not found" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/drivers/ceph.py:397 #, python-format -msgid "href %s does not contain version" +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "Converting nw_info: %s" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "Converted networks: %s" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/drivers/ceph.py:488 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/backup/drivers/ceph.py:555 #, python-format -msgid "Loaded extension: %s" +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:566 #, python-format -msgid "Ext name: %s" +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/backup/drivers/ceph.py:586 #, python-format -msgid "Ext alias: %s" +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/extensions.py:227 -#, python-format -msgid "Ext description: %s" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "Ext namespace: %s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:230 -#, python-format -msgid "Ext updated: %s" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "Exception loading extension: %s" +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/drivers/ceph.py:697 #, python-format -msgid "Loading extension %s" +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/backup/drivers/ceph.py:704 #, python-format -msgid "Calling extension factory %s" +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/drivers/ceph.py:708 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/drivers/ceph.py:713 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:846 #, python-format -msgid "Exception handling resource: %s" +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/wsgi.py:586 -#, python-format -msgid "Fault thrown: %s" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "HTTP exception thrown: %s" +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:964 #, python-format -msgid "There is no such action: %s" +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:1037 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" -msgstr "" +#: cinder/backup/drivers/swift.py:146 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "container %s does not exist" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/swift.py:192 #, python-format msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." -msgstr "" - -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/swift.py:219 #, python-format -msgid "Invalid server status: %(status)s" +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Bad personality format: missing %s" +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/swift.py:304 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 -#, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:345 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/swift.py:350 #, python-format -msgid "Bad network format: missing %s" +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/swift.py:455 #, python-format -msgid "Error in confirm-resize %s" +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "Error in revert-resize %s" +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/tsm.py:143 #, python-format -msgid "Error in reboot %s" +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/brick/exception.py:93 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/brick/exception.py:97 #, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pause %s" +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/brick/exception.py:101 #, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::unpause %s" +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 -#, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::suspend %s" +#: cinder/brick/exception.py:105 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Failed to create iscsi target for volume %(volume_id)s." -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 -#, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" +#: cinder/brick/exception.py:109 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Failed to remove iscsi target for volume %(volume_id)s." -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/brick/exception.py:113 #, python-format -msgid "Error in migrate %s" +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/brick/exception.py:117 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "Compute.api::reset_network %s" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/brick/initiator/connector.py:127 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::lock %s" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 -#, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::unlock %s" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 -#, python-format -msgid "createBackup entity requires %s attribute" +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 -#, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "Aggregates does not have %s action" +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/brick/initiator/connector.py:858 msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "No more floating ips in pool %s." +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" -msgstr "" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Invalid status: '%s'" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 +#: cinder/brick/iscsi/iscsi.py:280 #, fuzzy, python-format -msgid "Invalid mode: '%s'" -msgstr "Invalid backend: %s" +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" -#: cinder/api/openstack/compute/contrib/hosts.py:152 -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "" - -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" -msgstr "" +#: cinder/brick/iscsi/iscsi.py:532 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Removing iscsi_target: %s" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Key pair '%s' already exists." +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Unable to find address %r" +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "Network does not have %s action" +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "Disassociating network with id %s" +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" -msgstr "" +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Unable to locate Volume Group %s" -#: cinder/api/openstack/compute/contrib/networks.py:87 -#, python-format -msgid "Showing network with id %s" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 -#, python-format -msgid "Deleting network with id %s" -msgstr "" +#: cinder/brick/local_dev/lvm.py:370 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Unable to find VG: %s" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 +#: cinder/brick/local_dev/lvm.py:489 #, fuzzy, python-format -msgid "Security group is still in use" -msgstr "Revoke security group ingress %s" +msgid "Unable to find LV: %s" +msgstr "Unable to find LV: %s" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 -#, python-format -msgid "Security group %s already exists" +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 -#, python-format -msgid "Security group %s is not a string or unicode" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Security group %s cannot be empty." +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 -#, python-format -msgid "Security group %s should not be greater than 255 characters." +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 -#, python-format -msgid "Security group (%s) not found" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." -msgstr "Not enough parameters to build a valid rule." +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "This rule already exists in group %s" -msgstr "This rule already exists in group %s" - -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +msgid "Already mounted: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 -#, python-format -msgid "Rule (%s) not found" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/compute/nova.py:97 #, python-format -msgid "start instance %r" +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 -#, python-format -msgid "stop instance %r" -msgstr "" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "vol=%s" +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "Delete volume with id: %s" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Delete snapshot with id: %s" +msgid "No backup with id %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "Volume must be available" + +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "Create snapshot from volume %s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" -msgstr "Attempted to instantiate singleton" - -#: cinder/auth/ldapdriver.py:650 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" -msgstr "Looking up user: %r" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" -#: cinder/auth/manager.py:302 -#, python-format -msgid "Failed authorization for access key %s" -msgstr "Failed authorisation for access key %s" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" -#: cinder/auth/manager.py:308 -#, python-format -msgid "Using project name = user name (%s)" -msgstr "Using project name = user name (%s)" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" -#: cinder/auth/manager.py:315 -#, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" -msgstr "failed authorisation: no project named %(pjid)s (user=%(uname)s)" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" -#: cinder/auth/manager.py:324 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +msgid "Table |%s| not created!" msgstr "" -"Failed authorisation: user %(uname)s not admin and not member of project " -"%(pjname)s" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 -#, python-format -msgid "expected_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" -msgstr "Invalid signature for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" -#: cinder/auth/manager.py:353 -#, python-format -msgid "host_only_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" -msgstr "Must specify project" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" -msgstr "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" -msgstr "Adding sitewide role %(role)s to user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" -#: cinder/auth/manager.py:519 -#, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" -msgstr "Removing role %(role)s from user %(uid)s on project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" -#: cinder/auth/manager.py:522 -#, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" -msgstr "Removing sitewide role %(role)s from user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" -msgstr "Created project %(name)s with manager %(manager_user)s" +msgid "Table |%s| not created" +msgstr "" -#: cinder/auth/manager.py:613 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "modifying project %s" -msgstr "modifying project %s" +msgid "Exception while dropping table %s." +msgstr "" -#: cinder/auth/manager.py:625 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "Adding user %(uid)s to project %(pid)s" -msgstr "Adding user %(uid)s to project %(pid)s" +msgid "Exception while creating table %s." +msgstr "" -#: cinder/auth/manager.py:646 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "Remove user %(uid)s from project %(pid)s" -msgstr "Remove user %(uid)s from project %(pid)s" +msgid "Column |%s| not created!" +msgstr "" -#: cinder/auth/manager.py:676 -#, python-format -msgid "Deleting project %s" -msgstr "Deleting project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" -msgstr "Created user %(rvname)s (admin: %(rvadmin)r)" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" -msgstr "Deleting user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" -#: cinder/auth/manager.py:753 -#, python-format -msgid "Access Key change for user %s" -msgstr "Access Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" -#: cinder/auth/manager.py:755 -#, python-format -msgid "Secret Key change for user %s" -msgstr "Secret Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" -msgstr "Admin status set to %(admin)r for user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" -msgstr "No vpn data for project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -#, fuzzy, python-format -msgid "Instance type for vpn instances" -msgstr "Get console output for instance %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" -msgstr "Network to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" -msgstr "Netmask to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" -#: cinder/cloudpipe/pipelib.py:107 -#, python-format -msgid "Launching VPN for %s" -msgstr "Launching VPN for %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/compute/api.py:144 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Unable to find host for Instance %s" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/api.py:192 -#, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/compute/api.py:203 +#: cinder/image/image_utils.py:101 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/api.py:257 -#, fuzzy, python-format -msgid "Cannot run any more instances of this type." -msgstr "Instance quota exceeded. You can only run %s more instances of this type." - -#: cinder/compute/api.py:259 -#, fuzzy, python-format -msgid "Can only run %s more instances of this type." -msgstr "Instance quota exceeded. You can only run %s more instances of this type." - -#: cinder/compute/api.py:261 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" -msgstr "Creating a raw instance" - -#: cinder/compute/api.py:312 +#: cinder/image/image_utils.py:157 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:383 -#, python-format -msgid "Going to run %s instances..." -msgstr "Going to run %s instances..." +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" -#: cinder/compute/api.py:447 +#: cinder/image/image_utils.py:178 #, python-format -msgid "bdm %s" +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/image/image_utils.py:206 #, python-format -msgid "block_device_mapping %s" +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/image/image_utils.py:224 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/compute/api.py:871 -#, fuzzy, python-format -msgid "Going to try to soft delete instance" -msgstr "Going to try to terminate %s" - -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/api.py:939 -#, fuzzy, python-format -msgid "Going to try to terminate instance" -msgstr "Going to try to terminate %s" +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" -#: cinder/compute/api.py:977 -#, fuzzy, python-format -msgid "Going to try to stop instance" -msgstr "Going to try to terminate %s" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" -#: cinder/compute/api.py:996 -#, fuzzy, python-format -msgid "Going to try to start instance" -msgstr "Going to try to terminate %s" +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" -#: cinder/compute/api.py:1000 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/openstack/common/excutils.py:48 #, python-format -msgid "Searching by: %s" +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "Image type not recognized %s" +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/api.py:1377 -#, python-format +#: cinder/openstack/common/gettextutils.py:261 msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "DB error: %s" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "Instance type %s not found for deletion" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: decorating: |%s|" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/openstack/common/log.py:301 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "received %s" + +#: cinder/openstack/common/log.py:402 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/compute/manager.py:144 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: locked: |%s|" +msgid "syslog facility must be one of: %s" +msgstr "" -#: cinder/compute/manager.py:146 +#: cinder/openstack/common/log.py:623 #, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: admin: |%s|" +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: executing: |%s|" +msgid "task run outlasted interval by %s sec" +msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "check_instance_lock: not executing |%s|" +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/policy.py:163 #, python-format -msgid "Setting up bdm %s" +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:406 -#, fuzzy, python-format -msgid "Exception encountered while terminating the instance %s" -msgstr "After terminating instances: %s" +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "Instance %s not found." +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" -msgstr "Instance has already been created" +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" -msgstr "" +msgid "Result was %s" +msgstr "Result was %s" -#: cinder/compute/manager.py:528 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." -msgstr "" +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:565 +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 #, python-format -msgid "Instance network_info: |%s|" +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Unhandled exception" +msgstr "Unhandled exception" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "%(action_str)s instance" +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/service.py:337 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "Starting %d workers" + +#: cinder/openstack/common/service.py:354 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "terminating bdm %s" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "Rebuilding instance %s" +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "Rebooting instance %s" -msgstr "Rebooting instance %s" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "instance %s: snapshotting" -msgstr "instance %s: snapshotting" +msgid "Unknown byte multiplier: %s" +msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/versionutils.py:69 #, python-format msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:1001 -#, python-format -msgid "Rotating out %d backups" +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "Deleting image %s" +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "Failed to set admin password. Instance %s is not running" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1041 -#, python-format -msgid "Instance %s: Root password set" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:1079 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/notifier/api.py:145 #, python-format msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "Failed to load notifier %s. These notifications will not be sent." msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "instance %s: rescuing" -msgstr "instance %s: rescuing" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format -msgid "instance %s: unrescuing" -msgstr "instance %s: unrescuing" +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "instance %s: migrating" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "instance %s: pausing" -msgstr "instance %s: pausing" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "instance %s: unpausing" -msgstr "instance %s: unpausing" +msgid "unpacked context: %s" +msgstr "unpacked context: %s" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "instance %s: retrieving diagnostics" +msgid "UNIQUE_ID is %s." +msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "instance %s: suspending" -msgstr "instance %s: suspending" +msgid "received %s" +msgstr "received %s" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "instance %s: resuming" -msgstr "instance %s: resuming" +msgid "no method for message: %s" +msgstr "no method for message: %s" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "instance %s: locking" -msgstr "instance %s: locking" +msgid "No method for message: %s" +msgstr "No method for message: %s" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "instance %s: unlocking" -msgstr "instance %s: unlocking" +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "instance %s: getting locked state" -msgstr "instance %s: getting locked state" +msgid "Making synchronous call on %s ..." +msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "instance %s: reset network" -msgstr "instance %s: reset network" +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "instance %s: inject network info" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:1618 -#, python-format -msgid "network_info to inject: |%s|" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:1655 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "instance %s: getting vnc console" +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/compute/manager.py:1685 -#, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." msgstr "" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:1714 -#, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:1752 -#, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/rpc/common.py:151 #, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "Detaching volume from unknown instance %s" +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/common.py:156 #, python-format msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." -msgstr "" - -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "Pre live migration failed at %(dest)s" -msgstr "" +msgid "Returning exception %s to caller" +msgstr "Returning exception %s to caller" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." -msgstr "" - -#: cinder/compute/manager.py:2075 msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +msgid "Deserializing: %s" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" -msgstr "Adding console" - -#: cinder/console/manager.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." -msgstr "Tried to remove non-existent console %(console_id)s." - -#: cinder/console/vmrc_manager.py:122 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Subscribing to %s" msgstr "" -#: cinder/console/vmrc_manager.py:125 -#, python-format -msgid "Removing console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" -msgstr "Rebuilding xvp conf" - -#: cinder/console/xvp.py:116 -#, python-format -msgid "Re-wrote %s" -msgstr "Re-wrote %s" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" -msgstr "Stopping xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "Running func with context: %s" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" -msgstr "Starting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" -#: cinder/console/xvp.py:141 -#, python-format -msgid "Error starting xvp: %s" -msgstr "Error starting xvp: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" -msgstr "Restarting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +#, fuzzy +msgid "Registering reactor" +msgstr "Registering reactor" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." -msgstr "xvp not running..." +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" -#: cinder/consoleauth/manager.py:63 -#, python-format -msgid "Deleting Expired Token: (%s)" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/consoleauth/manager.py:79 -#, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" -msgstr "Use of empty request context is deprecated" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "No ComputeNode for %(host)s" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#: cinder/openstack/common/rpc/impl_zmq.py:506 #, python-format -msgid "No backend config with id %(sm_backend_id)s" +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 -#, python-format -msgid "No sm_flavor called %(sm_flavor)s" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." -msgstr "python-migrate is not installed. Exiting." +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/db/sqlalchemy/session.py:137 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "SQL connection failed. %s attempts left." +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 -#, python-format -msgid "Table |%s| not created!" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 -#, python-format -msgid "join list for moving mac_addresses |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" -msgstr "" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "Received message: %s" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" -msgstr "" +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "Matchmaker does not implement registration or heartbeat." -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/image/glance.py:281 -#, python-format -msgid "Metadata after formatting for Glance %s" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/image/glance.py:289 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/image/glance.py:410 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/image/s3.py:309 -#, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/image/s3.py:328 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/image/s3.py:379 -#, python-format -msgid "Failed to decrypt private key: %s" -msgstr "Failed to decrypt private key: %s" +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "Must implement a fallback schedule" + +#: cinder/scheduler/driver.py:82 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "Must implement schedule_create_volume" -#: cinder/image/s3.py:387 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "Failed to decrypt initialization vector: %s" -msgstr "Failed to decrypt initialisation vector: %s" +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" -#: cinder/image/s3.py:398 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" -msgstr "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "Filtered %s" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "Unknown chain: %r" +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/linux_net.py:694 -#, python-format -msgid "Hupping dnsmasq threw %s" -msgstr "Hupping dnsmasq threw %s" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" -#: cinder/network/linux_net.py:696 -#, python-format -msgid "Pid %d is stale, relaunching dnsmasq" -msgstr "Pid %d is stale, relaunching dnsmasq" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "killing radvd threw %s" -msgstr "killing radvd threw %s" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "Pid %d is stale, relaunching radvd" -msgstr "Pid %d is stale, relaunching radvd" +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Starting VLAN inteface %s" -msgstr "Starting VLAN inteface %s" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Starting Bridge interface for %s" -msgstr "Starting Bridge interface for %s" - -#: cinder/network/linux_net.py:1142 -#, fuzzy, python-format -msgid "Starting bridge %s " -msgstr "Starting Bridge interface for %s" +msgid "Could not decode scheduler options: '%s'" +msgstr "" -#: cinder/network/linux_net.py:1149 -#, fuzzy, python-format -msgid "Done starting bridge %s" -msgstr "Error starting xvp: %s" +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" -#: cinder/network/linux_net.py:1167 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format -msgid "Failed unplugging gateway interface '%s'" +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/network/linux_net.py:1170 -#, python-format -msgid "Unplugged gateway interface '%s'" +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/network/manager.py:291 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Interface %(interface)s not found" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/network/manager.py:315 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "floating IP allocation for instance |%s|" -msgstr "" +msgid "FAKE ISCSI: %s" +msgstr "FAKE ISCSI: %s" -#: cinder/network/manager.py:353 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "floating IP deallocation for instance |%s|" +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/network/manager.py:402 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "Faked command matched %s" msgstr "" -#: cinder/network/manager.py:614 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/manager.py:660 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/manager.py:778 +#: cinder/tests/test_netapp_nfs.py:360 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" -msgstr "setting network host" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" -#: cinder/network/manager.py:896 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "network allocations for instance |%s|" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/manager.py:901 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "network deallocation for instance |%s|" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Given data: %s" msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Leased IP |%(address)s|" +msgid "Result data: %s" msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "IP %s leased that is not associated" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/manager.py:1256 -#, python-format -msgid "IP |%s| leased that isn't allocated" +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "volume: %s" + +#: cinder/tests/integrated/api/client.py:32 #, python-format -msgid "Released IP |%(address)s|" +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/manager.py:1265 -#, python-format -msgid "IP %s released that is not associated" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/network/manager.py:1268 -#, python-format -msgid "IP %s released that was not leased" -msgstr "IP %s released that was not leased" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +msgid "Body: %s" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" -#: cinder/network/manager.py:1839 +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +msgid "Decoding JSON: %s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/network/quantum/client.py:180 +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "Failed to create transfer record for %s" + +#: cinder/transfer/api.py:136 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format -msgid "Quantum entity not found: %s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/transfer/api.py:182 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/transfer/api.py:199 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "Volume %s has been transferred." msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/volume/api.py:143 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/network/quantum/manager.py:204 -#, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Volume status must be available to reserve" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/network/quantum/manager.py:301 -#, python-format -msgid "network allocations for instance %s" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/volume/api.py:490 #, python-format msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/volume/api.py:502 #, python-format msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 -#, python-format -msgid "Server returned error: %s" -msgstr "" +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume Snapshot status must be available or error" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 -#, python-format -msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Volume status must be available/in-use." + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 -#, python-format -msgid "No network with net_id = %s" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 +#: cinder/volume/api.py:757 #, python-format -msgid "No fixed IPs to deallocate for vif %s" +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 -#, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 -#, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/notifier/api.py:115 +#: cinder/volume/api.py:797 #, python-format -msgid "%s not in valid priorities" +msgid "No available service named %s" msgstr "" -#: cinder/notifier/api.py:130 -#, python-format -msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/notifier/list_notifier.py:65 -#, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/volume/api.py:842 #, python-format -msgid "Returning exception %s to caller" -msgstr "Returning exception %s to caller" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" -msgstr "unpacked context: %s" +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" -#: cinder/rpc/amqp.py:231 +#: cinder/volume/api.py:862 #, python-format -msgid "received %s" -msgstr "received %s" +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" -#: cinder/rpc/amqp.py:236 +#: cinder/volume/api.py:868 #, python-format -msgid "no method for message: %s" -msgstr "no method for message: %s" +msgid "Volume %s is already part of an active migration." +msgstr "" -#: cinder/rpc/amqp.py:237 +#: cinder/volume/api.py:874 #, python-format -msgid "No method for message: %s" -msgstr "No method for message: %s" +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" -#: cinder/rpc/amqp.py:321 +#: cinder/volume/api.py:887 #, python-format -msgid "Making asynchronous call on %s ..." +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/volume/api.py:900 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID is %s" +msgid "New volume_type same as original: %s" +msgstr "" -#: cinder/rpc/amqp.py:346 -#, python-format -msgid "Making asynchronous cast on %s..." +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/rpc/amqp.py:379 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Sending notification on %s..." -msgstr "" +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recovering from a failed execute. Try number %s" -#: cinder/rpc/common.py:54 +#: cinder/volume/driver.py:282 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/driver.py:340 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/driver.py:358 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/driver.py:394 #, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/driver.py:433 #, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/driver.py:451 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 -#, python-format -msgid "Timed out waiting for RPC response: %s" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 -#, python-format -msgid "Failed to consume message from queue: %s" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/driver.py:546 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/driver.py:548 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/rpc/impl_qpid.py:346 +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Connected to AMQP server on %s" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/volume/manager.py:228 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/volume/manager.py:235 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/scheduler/driver.py:89 +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/manager.py:257 #, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/manager.py:264 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" -msgstr "Must implement a fallback schedule" +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." -msgstr "" +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: cinder/volume/manager.py:380 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volume is not local to this node" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/manager.py:389 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" -msgstr "" +msgid "volume %s: removing export" +msgstr "volume %s: removing export" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/manager.py:394 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 -#, python-format -msgid "No host selection for %s defined." +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/manager.py:427 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/manager.py:434 #, python-format -msgid "Filtered %(hosts)s" -msgstr "" +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" -#: cinder/scheduler/filter_scheduler.py:209 +#: cinder/volume/manager.py:451 #, python-format -msgid "Weighted %(weighted_host)s" +msgid "snapshot %s: creating" msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/manager.py:462 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/manager.py:490 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/manager.py:496 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Host filter passes for %(host)s" +msgid "snapshot %s: deleting" msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/manager.py:526 #, python-format -msgid "Received %(service_name)s service update from %(host)s." +msgid "Cannot delete snapshot %s: snapshot is busy" msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/manager.py:559 #, python-format -msgid "No service for compute ID %s" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/scheduler/manager.py:85 -#, python-format -msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/scheduler/manager.py:150 -#, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/scheduler/manager.py:159 -#, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/manager.py:698 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" + +#: cinder/volume/manager.py:760 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/manager.py:807 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/tests/fake_utils.py:72 +#: cinder/volume/manager.py:909 #, python-format -msgid "Faking execution of cmd (subprocess): %s" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/manager.py:921 #, python-format -msgid "Faked command matched %s" +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:940 #, python-format -msgid "Faked command raised an exception %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:976 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:1044 #, python-format -msgid "Running instances: %s" -msgstr "Running instances: %s" +msgid "Notification {%s} received" +msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/manager.py:1091 #, python-format -msgid "After terminating instances: %s" -msgstr "After terminating instances: %s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:1105 #, python-format -msgid "After force-killing instances: %s" +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:1107 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" -msgstr "start address" +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "Target %s allocated" -msgstr "Target %s allocated" +msgid "Failed to get all associations of qos specs %s" +msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/test_volume_types.py:58 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "Given data: %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/test_volume_types.py:59 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "Result data: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/tests/test_xenapi.py:637 -#, python-format -msgid "Removing simulated guest agent files in %s" +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/volume/utils.py:144 #, python-format -msgid "Quota exceeded: code=%(code)s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "_create: %s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "_delete: %s" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "_get: %s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "_get_all: %s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 -#, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Creating clone of volume: %s" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 -#, python-format -msgid "test_snapshot_show: resp=%s" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/tests/integrated/test_login.py:31 -#, python-format -msgid "flavor: %s" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Body: %s" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" -msgstr "" - -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "Decoding JSON: %s" +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/tests/rpc/common.py:133 -#, python-format -msgid "Nested received %(queue)s, %(value)s" -msgstr "Nested received %(queue)s, %(value)s" - -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Nested return %s" -msgstr "Nested return %s" +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Received %s" -msgstr "Received %s" - -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" -msgstr "Failed to open connection to the hypervisor" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Compute_service record created for %s " +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 -#, python-format -msgid "Compute_service record updated for %s " +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" -msgstr "Attempted to unfilter instance %s which is not filtered" +msgid "Setting CLI terminal width: '%s'" +msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Filters added to instance %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "Adding security group rule: %r" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "Adding provider rule: %s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." -msgstr "" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Error running SSH command: %s" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/eqlx.py:348 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/eqlx.py:440 #, python-format -msgid "Got exception: %s" -msgstr "Got exception: %s" - -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "No such domain (%s)" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/dom.py:134 -#, python-format -msgid "Failed power down Bare-metal node %s" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" -msgstr "" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "casted to %s" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "No such domain %s" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Domains: %s" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Nodes: %s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "After storing domains: %s" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Created new domain: %s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:226 -#, python-format -msgid "change_domain_state: to new state %s" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/glusterfs.py:608 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" -msgstr "instance %(instance_name)s: deleting instance files %(target)s" +msgid "No base file found for %s." +msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "instance %s: rebooted" -msgstr "instance %s: rebooted" - -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "instance %s: rescued" -msgstr "instance %s: rescued" +msgid "No file found with %s as backing file." +msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "instance %s: is building" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "instance %s: booted" -msgstr "instance %s: booted" +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "instance %s spawned successfully" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "instance %s:not booted" +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "instance %s: Creating image" -msgstr "instance %s: Creating image" +msgid "Unrecognized backing format: %s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "file already exists at %s" msgstr "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "instance %s: starting toXML method" -msgstr "instance %s: starting toXML method" +msgid "Exception during mounting %s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "instance %s: finished toXML method" -msgstr "instance %s: finished toXML method" +msgid "Available shares: %s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:216 -#, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "%s is not a directory." msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/gpfs.py:197 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "unknown disk image handler: %s" +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Failed to mount filesystem: %s" -msgstr "Failed to mount filesystem: %s" +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "Failed to remove container: %s" +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "User %(username)s not found in password file." +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "volume_info:%s" msgstr "" -#: cinder/virt/disk/guestfs.py:39 -#, python-format -msgid "unsupported partition: %s" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "Could not attach image to loopback: %s" +msgid "Symbolic link %s not found" +msgstr "" -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/nfs.py:263 #, python-format -msgid "Failed to map partitions: %s" +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" -msgstr "No free nbd devices" +#: cinder/volume/drivers/nfs.py:361 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "%s is already mounted" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "qemu-nbd error: %s" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "nbd device %s did not show up" -msgstr "nbd device %s did not show up" +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "Connecting to libvirt: %s" -msgstr "Connecting to libvirt: %s" - -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" -msgstr "Connection to libvirt broke" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/nfs.py:526 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/rbd.py:160 #, python-format -msgid "Deleting instance files %(target)s" +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/virt/libvirt/connection.py:692 +#: cinder/volume/drivers/rbd.py:210 #, fuzzy, python-format -msgid "Instance soft rebooted successfully." -msgstr "instance %s: rebooted" - -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." -msgstr "" +msgid "error opening rbd image %s" +msgstr "error opening rbd image %s" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." -msgstr "" +#: cinder/volume/drivers/rbd.py:265 +#, fuzzy +msgid "error connecting to ceph cluster" +msgstr "error connecting to ceph cluster" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Automatically confirming migration %d" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" -msgstr "data: %(data)r, fpath: %(fpath)r" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" -#: cinder/virt/libvirt/connection.py:978 -msgid "Guest does not have a console available" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "block_device_list %s" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +msgid "deleting rbd volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/libvirt/connection.py:1942 -#, python-format -msgid "'' must be 1, but %d\n" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 -#, python-format -msgid "topology (%(topology)s) must have %(ks)s" -msgstr "" +#: cinder/volume/drivers/rbd.py:696 +#, fuzzy, python-format +msgid "connection data: %s" +msgstr "connection data: %s" -#: cinder/virt/libvirt/connection.py:2067 -#, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 -#, python-format -msgid "Timeout migrating for %s. nwfilter not found." -msgstr "" +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Not an rbd snapshot" -#: cinder/virt/libvirt/connection.py:2352 -#, python-format -msgid "skipping %(path)s since it looks like volume" -msgstr "" +#: cinder/volume/drivers/rbd.py:724 +#, fuzzy, python-format +msgid "not cloneable: %s" +msgstr "not cloneable: %s" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 -#, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "Unable to open image %(loc)s: %(err)s" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 -#, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" -msgstr "" +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog is not working: %s" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" -msgstr "" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "Sheepdog is not working" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "%s is a valid instance name" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "%s has a disk file" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Base file too young to remove: %s" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Removing base file: %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "API response: %s" + +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/solidfire.py:398 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Failed to get model update from clone" + +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 -#, python-format -msgid "Unknown base file: %s" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Active base files: %s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "Corrupt base files: %s" +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Removable base files: %s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" -msgstr "Unable to find an open port" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" -#: cinder/virt/libvirt/vif.py:90 -#, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/libvirt/vif.py:99 -#, python-format -msgid "Ensuring bridge %s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 -#, python-format -msgid "Failed while unplugging vif of instance '%s'" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/libvirt/volume.py:166 -#, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +#: cinder/volume/drivers/solidfire.py:673 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Failed to get updated stats" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/zadara.py:357 #, python-format -msgid "%(text)s: _db_content => %(content)s" -msgstr "%(text)s: _db_content => %(content)s" +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Glance image %s is in killed state" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 +#: cinder/volume/drivers/emc/emc_smis_common.py:40 msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 -#, python-format -msgid "Creating Port Group with name %s on the ESX host" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 -#, python-format -msgid "Created Port Group with name %s on the ESX host" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "Create Volume: %(volume)s Size: %(size)lu" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" -msgstr "" - -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "httplib error in %s: " +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Socket error in %s: " +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "Type error in %s: " +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Exception in %s " +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Got total of %s instances" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 -#, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Powered on the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Uploading image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Uploaded image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format -msgid "Deleted temporary vmdk file %s" +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Did hard reboot of VM %s" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "instance - %s not present" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Powering off the VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Powered off the VM %s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 -#, python-format -msgid "Unregistering the VM %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 #, python-format -msgid "Unregistered the VM %s" +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Suspending the VM %s " +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Suspended the VM %s " +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "Resuming the VM %s" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "Resumed the VM %s " -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format -msgid "Creating directory with path %s" +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "Created directory with path %s" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Create export: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "create_export: Volume: %(volume)s Device ID: %(device_id)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Downloading image %s from glance image server" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Error mapping volume %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "ExposePaths for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "HidePaths for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" -msgstr "Raising NotImplemented" +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "AddMembers for volume %s completed successfully." -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format -msgid "xenapi.fake does not have an implementation for %s" -msgstr "xenapi.fake does not have an implementation for %s" +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Calling %(localname)s %(impl)s" -msgstr "Calling %(localname)s %(impl)s" +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" -#: cinder/virt/xenapi/fake.py:594 -#, python-format -msgid "Calling getter %s" -msgstr "Calling getter %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "RemoveMembers for volume %s completed successfully." -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "Map volume: %(volume)s" msgstr "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Found non-unique network for bridge %s" -msgstr "Found non-unique network for bridge %s" +msgid "Terminate connection: %(volume)s" +msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Found no network for bridge %s" -msgstr "Found no network for bridge %s" +msgid "Found Storage Type: %s" +msgstr "" -#: cinder/virt/xenapi/pool.py:111 -#, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:143 -#, python-format -msgid "Pool-Join failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/pool.py:146 -#, python-format -msgid "Unable to join %(host)s in the pool" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "Unable to detach volume %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Cannot connect to ECOM server" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "Found no PIF for device %s" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "VBD not found in instance %s" -msgstr "VBD not found in instance %s" - -#: cinder/virt/xenapi/vm_utils.py:262 -#, fuzzy, python-format -msgid "VBD %s already detached" -msgstr "group %s already exists" +msgid "Pool %(storage_type)s is not found." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Unable to unplug VBD %s" -msgstr "Unable to unplug VBD %s" +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "Unable to destroy VBD %s" -msgstr "Unable to destroy VBD %s" +msgid "Volume %(volumename)s not found on the array." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, fuzzy, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -msgstr "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Volume name: %(volumename)s Volume instance: %(vol_instance)s." -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." -msgstr "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "Unable to destroy VDI %s" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 -#, fuzzy, python-format -msgid "Snapshotting with label '%(label)s'" -msgstr "Snapshotting VM %(vm_ref)s with label '%(label)s'..." - -#: cinder/virt/xenapi/vm_utils.py:392 -#, fuzzy, python-format -msgid "Created snapshot %(template_vm_ref)s" -msgstr "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." - -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" -msgstr "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "download_vhd failed: %r" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 -#, fuzzy, python-format -msgid "Size for image %(image)s: %(virtual_size)d" -msgstr "Size for image %(image)s:%(virtual_size)d" +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 -#, python-format -msgid "Copying VDI %s to /boot/guest on dom0" -msgstr "Copying VDI %s to /boot/guest on dom0" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" -msgstr "Kernel/Ramdisk VDI %s destroyed" +msgid "Add target WWN: %s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "Looking up vdi %s for PV kernel" +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Cannot find device number for volume %s" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "Found iSCSI endpoint: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "VDI %s is still available" -msgstr "VDI %s is still available" +msgid "ISCSI properties: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" -msgstr "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" -msgstr "(VM_UTILS) xenapi power_state -> |%s|" +msgid "setting LU upper (end) limit to %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "Re-scanning SR %s" -msgstr "Re-scanning SR %s" +msgid "XML exception reading parameter: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/hds/hds.py:178 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" -msgstr "" +#: cinder/volume/drivers/hds/hds.py:250 +#, fuzzy, python-format +msgid "HDP not found: %s" +msgstr "HDP not found: %s" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "LUN %s is deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" -msgstr "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "Plugging VBD %s ... " -msgstr "Plugging VBD %s ... " +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Plugging VBD %s done." -msgstr "Plugging VBD %s done." +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 -#, python-format -msgid "Destroying VBD for VDI %s ... " -msgstr "Destroying VBD for VDI %s ... " +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Destroying VBD for VDI %s done." -msgstr "Destroying VBD for VDI %s done." +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Running pygrub against %s" -msgstr "Running pygrub against %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Found Xen kernel %s" -msgstr "Found Xen kernel %s" - -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." -msgstr "No Xen kernel found. Booting HVM." +msgid "parse_xml_file: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/rest_common.py:59 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "Writing partition table %s done." -msgstr "Writing partition table %s done." +msgid "HVS Response Data: %(res)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -#, fuzzy -msgid "Starting instance" -msgstr "Creating a raw instance" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Injecting file path: '%s'" -msgstr "Injecting file path: '%s'" +msgid "JSON transfer data error. %s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -#, fuzzy -msgid "Starting VM" -msgstr "Restarting xvp" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Instance agent version: %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "Updating Agent to %s" +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -#, fuzzy, python-format -msgid "Finished snapshot and upload for VM" -msgstr "Finished snapshot and upload for VM %s" - -#: cinder/virt/xenapi/vmops.py:677 -#, fuzzy, python-format -msgid "Starting snapshot for VM" -msgstr "Starting snapshot for VM %s" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" -#: cinder/virt/xenapi/vmops.py:686 -#, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "Unable to attach volume to instance %s" +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 -#, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" -msgstr "kernel/ramdisk files removed" - -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -#, fuzzy -msgid "Destroying VM" -msgstr "Restarting xvp" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/ssh_common.py:580 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/ssh_common.py:697 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/ssh_common.py:792 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -#, fuzzy -msgid "Injecting network info to xenstore" -msgstr "setting network host" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 -#, fuzzy, python-format -msgid "Creating VIF for network %(network_ref)s" -msgstr "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 -#, fuzzy, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" -msgstr "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 -#, python-format -msgid "OpenSSL error: %s" -msgstr "OpenSSL error: %s" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "type is = %s" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "name = %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" -msgstr "Unable to create Storage Repository" +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." -msgstr "Introduced %(label)s as %(sr_ref)s." +msgid "Failed getting details for pool %s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 -#, python-format -msgid "Forgetting SR %s..." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Introducing %s..." -msgstr "Introducing %s..." +msgid "%s is not set" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 -#, python-format -msgid "Unable to find SR from VBD %s" -msgstr "Unable to find SR from VBD %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" -msgstr "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 -#, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" -msgstr "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Unable to introduce VDI on SR %s" -msgstr "Unable to introduce VDI on SR %s" +msgid "ensure_export: Volume %s not found on storage" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 -#, python-format -msgid "Unable to get record of VDI %s on" -msgstr "Unable to get record of VDI %s on" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Unable to introduce VDI for SR %s" -msgstr "Unable to introduce VDI for SR %s" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 -#, python-format -msgid "Error finding vdis in SR %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "Mountpoint cannot be translated: %s" -msgstr "Mountpoint cannot be translated: %s" +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 #, python-format -msgid "Creating SR %s" +msgid "initialize_connection: Did not find a preferred node for volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "Introducing SR %s" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "Checking for SR %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 -#, python-format -msgid "SR %s not found in the xapi database" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 -#, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" -msgstr "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgid "leave: extend_volume: volume %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "Unable to attach volume to instance %s" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -msgstr "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "Unable to locate volume %s" -msgstr "Unable to locate volume %s" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 -#, python-format -msgid "Unable to detach volume %s" -msgstr "Unable to detach volume %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 -#, python-format -msgid "Unable to destroy vbd %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "Error purging SR %s" +msgid "Could not find key in output of command %(cmd)s: %(out)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" -msgstr "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "Failed to get code level (%s)." +msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Error in handshake: %s" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "Invalid request: %s" +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "Request: %s" +msgid "Failed to find host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "Request made with missing token: %s" +msgid "enter: get_host_from_connector: %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "Request made with invalid token: %s" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Unexpected error: %s" +msgid "enter: create_host: host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 -#, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "Volume status must be available" - -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/volume/api.py:325 -#, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "Volume status must be available" - -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "Recovering from a failed execute. Try number %s" +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "volume group %s doesn't exist" -msgstr "volume group %s doesn't exist" +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/volume/driver.py:318 -#, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" msgstr "" -#: cinder/volume/driver.py:327 -#, python-format +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" msgstr "" -#: cinder/volume/driver.py:384 -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "Could not find iSCSI export for volume %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" -#: cinder/volume/driver.py:388 -#, python-format -msgid "ISCSI Discovery: Found %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 #, python-format -msgid "FAKE ISCSI: %s" -msgstr "FAKE ISCSI: %s" +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" -#: cinder/volume/driver.py:505 -#, python-format -msgid "rbd has no pool %s" -msgstr "rbd has no pool %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Sheepdog is not working: %s" -msgstr "Sheepdog is not working: %s" - -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" -msgstr "Sheepdog is not working" +msgid "enter: create_vdisk: vdisk %s " +msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 #, python-format -msgid "Re-exporting %s volumes" -msgstr "Re-exporting %s volumes" +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 #, python-format -msgid "volume %s: skipping export" -msgstr "volume %s: skipping export" +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 #, python-format -msgid "volume %s: creating" -msgstr "volume %s: creating" +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" -msgstr "volume %(vol_name)s: creating logical volume of size %(vol_size)sG" +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 #, python-format -msgid "volume %s: creating export" -msgstr "volume %s: creating export" +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "volume %s: created successfully" -msgstr "volume %s: created successfully" - -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" -msgstr "Volume is still attached" - -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" -msgstr "Volume is not local to this node" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "volume %s: removing export" -msgstr "volume %s: removing export" +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "volume %s: deleting" -msgstr "volume %s: deleting" +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 #, python-format -msgid "volume %s: volume is busy" +msgid "Tried to delete non-existant vdisk %s." msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 #, python-format -msgid "volume %s: deleted successfully" -msgstr "volume %s: deleted successfully" +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 #, python-format -msgid "snapshot %s: creating" +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 #, python-format -msgid "snapshot %s: created successfully" +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 #, python-format -msgid "snapshot %s: deleting" +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" msgstr "" -#: cinder/volume/manager.py:214 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 #, fuzzy, python-format -msgid "snapshot %s: snapshot is busy" -msgstr "instance %s: snapshotting" - -#: cinder/volume/manager.py:226 -#, python-format -msgid "snapshot %s: deleted successfully" +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 #, python-format -msgid "New capabilities found: %s" +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 #, python-format -msgid "Notification {%s} received" +msgid "Did not find success message nor error for %(fun)s: %(out)s" msgstr "" -#: cinder/volume/netapp.py:79 -#, python-format -msgid "API %(name)sfailed: %(reason)s" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "%s is not set" +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/volume/netapp.py:128 -#, fuzzy -msgid "Connected to DFM server" -msgstr "Reconnected to queue" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/netapp/iscsi.py:105 #, fuzzy, python-format -msgid "Job failed: %s" -msgstr "NotFound raised: %s" +msgid "Using NetApp filer: %s" +msgstr "Using NetApp filer: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#: cinder/volume/drivers/netapp/iscsi.py:191 #, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "Could not find iSCSI export for volume %s" +msgid "Destroyed LUN %s" +msgstr "Destroyed LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#: cinder/volume/drivers/netapp/iscsi.py:238 #, fuzzy, python-format -msgid "No LUN ID for volume %s" -msgstr "Could not find iSCSI export for volume %s" +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Failed to get LUN target details for the LUN %s" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#: cinder/volume/drivers/netapp/iscsi.py:249 #, fuzzy, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "Failed to get metadata for ip: %s" +msgid "Failed to get target portal for the LUN %s" +msgstr "Failed to get target portal for the LUN %s" -#: cinder/volume/netapp.py:614 +#: cinder/volume/drivers/netapp/iscsi.py:252 #, fuzzy, python-format -msgid "Failed to get host details for host ID %s" -msgstr "Failed to get metadata for ip: %s" +msgid "Failed to get target IQN for the LUN %s" +msgstr "Failed to get target IQN for the LUN %s" -#: cinder/volume/netapp.py:620 +#: cinder/volume/drivers/netapp/iscsi.py:290 #, fuzzy, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "Failed to get metadata for ip: %s" +msgid "Snapshot %s deletion successful" +msgstr "Snapshot %s deletion successful" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" -#: cinder/volume/netapp.py:625 +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 #, fuzzy, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "Failed to get metadata for ip: %s" +msgid "Failed to get vol with required size for volume: %s" +msgstr "Failed to get vol with required size for volume: %s" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" msgstr "" -#: cinder/volume/san.py:320 +#: cinder/volume/drivers/netapp/iscsi.py:1393 #, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." msgstr "" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "Volume %s does not exist in Nexenta SA" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "Got response: %s" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Error running ssh command: %s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "Volume (%s) already exists on array" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Creating folder %s " + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "Volume status must be available" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "Caught error: %s" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Unexpected error while running command." + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "Got exception: %s" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "volume %(vol_name)s: creating logical volume of size %(vol_size)sG" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "volume %s: creating export" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "volume %s: creating export" + +#~ msgid "volume %s: creating from image" +#~ msgstr "volume %s: creating" + +#~ msgid "volume %s: creating" +#~ msgstr "volume %s: creating" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "volume %s: creating export" + +#~ msgid "volume %s: create failed" +#~ msgstr "volume %s: creating" + +#~ msgid "volume %s: created successfully" +#~ msgstr "volume %s: created successfully" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "Tried to remove non-existent console %(console_id)s." + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "Fetching %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "Link Local address is not found.:%s" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "Unable to find Volume Group: %s" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "Failed to create Volume Group: %s" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "Unable to find group: %(group)s" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "Configure response : %s" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "Fail to create volume %(volname)s" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "Failed to delete volume %(volname)s" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "Failed to Create Snapshot %(snapname)s" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "volume group %s doesn't exist" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Unexpected error while running command." + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "Connection to swift failed" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "debug in callback: %s" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "pool %s doesn't exist" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "Disk not found: %s" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, fuzzy, python-format -msgid "Got response: %s" -msgstr "response %s" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, fuzzy, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "volume group %s doesn't exist" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "The request is invalid." #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "Could not find attribute for LUN named %s" + +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" -#~ msgstr "trying to destroy already destroyed instance: %s" +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" -#~ msgstr "Initing the Adapter Consumer for %s" +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "topic is %s" -#~ msgstr "topic is %s" +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" -#~ msgid "message %s" -#~ msgstr "message %s" +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" -#~ msgstr "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" -#~ msgid "Publishing to route %s" -#~ msgstr "Publishing to route %s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Declaring queue %s" -#~ msgstr "Declaring queue %s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Declaring exchange %s" -#~ msgstr "Declaring exchange %s" +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" -#~ msgstr "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" -#~ msgstr "Getting from %(queue)s: %(message)s" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" -#~ msgstr "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" -#~ msgstr "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Invalid request body" +#~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "virsh said: %r" -#~ msgstr "virsh said: %r" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" -#~ msgid "cool, it's a device" -#~ msgstr "cool, it's a device" +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Created VM %s..." -#~ msgstr "Created VM %s..." +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." -#~ msgstr "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " -#~ msgstr "Creating VBD for VDI %s ... " +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." -#~ msgstr "Creating VBD for VDI %s done." +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" -#~ msgid "VBD.unplug successful first time." -#~ msgstr "VBD.unplug successful first time." +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." -#~ msgstr "VBD.unplug rejected: retrying..." +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." -#~ msgstr "VBD.unplug successful eventually." +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" -#~ msgstr "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" -#~ msgstr "Ignoring XenAPI.Failure %s" +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" -#~ msgstr "instance %s: Failed to spawn" +#~ msgid "SIGTERM received" +#~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "_wait_child %d" #~ msgstr "" +#~ msgid "wait wrap.failed %s" +#~ msgstr "wait wrap.failed %s" + #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." -#~ msgstr "Starting VM %s..." - -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "volume %s mapping to multi host" +#~ msgstr "volume %s mapping to multi host" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." -#~ msgstr "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" #~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" #~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -#~ msgstr "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/en_GB/LC_MESSAGES/cinder.po b/cinder/locale/en_GB/LC_MESSAGES/cinder.po index e816526c83..b687c02237 100644 --- a/cinder/locale/en_GB/LC_MESSAGES/cinder.po +++ b/cinder/locale/en_GB/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2012-03-30 11:10+0000\n" "Last-Translator: Anthony Harrington \n" "Language-Team: English (United Kingdom) \n" @@ -15,8195 +15,10747 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "Filename of root CA" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Filename of private key" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" -msgstr "Filename of root Certificate Revocation List" - -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "Where we keep our keys" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "Where we keep our root CA" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "An unknown exception occurred." -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "Should we use a CA for each project?" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" -#: cinder/crypto.py:67 +#: cinder/exception.py:107 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" -msgstr "Subject for certificate for users, %s for project, user, timestamp" +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" -#: cinder/crypto.py:72 +#: cinder/exception.py:112 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" -msgstr "Subject for certificate for projects, %s for project, timestamp" +msgid "Volume driver reported an error: %(message)s" +msgstr "" -#: cinder/crypto.py:292 +#: cinder/exception.py:116 #, python-format -msgid "Flags path: %s" -msgstr "Flags path: %s" - -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Unexpected error while running command." +msgid "Backup driver reported an error: %(message)s" +msgstr "" -#: cinder/exception.py:59 +#: cinder/exception.py:120 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +msgid "Connection to glance failed: %(reason)s" msgstr "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" - -#: cinder/exception.py:94 -msgid "DB exception wrapped." -msgstr "DB exception wrapped." - -#: cinder/exception.py:155 -msgid "An unknown exception occurred." -msgstr "An unknown exception occurred." - -#: cinder/exception.py:178 -msgid "Failed to decrypt text" -msgstr "Failed to decrypt text" - -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" -msgstr "Failed to paginate through images from image service" - -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" -msgstr "Virtual Interface creation failed" - -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" -msgstr "5 attempts to create virtual interface with unique mac address failed" -#: cinder/exception.py:195 -msgid "Connection to glance failed" -msgstr "Connection to glance failed" - -#: cinder/exception.py:199 -msgid "Connection to melange failed" -msgstr "Connection to melange failed" - -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "Not authorised." -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "User does not have admin privileges" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Policy doesn't allow %(action)s to be performed." -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, python-format msgid "Not authorized for image %(image_id)s." msgstr "" -#: cinder/exception.py:220 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "Unacceptable parameters." -#: cinder/exception.py:225 -msgid "Invalid snapshot" -msgstr "Invalid snapshot" +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:154 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" -msgstr "Volume %(volume_id)s is not attached to anything" +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" -msgstr "Keypair data is invalid" +#: cinder/exception.py:159 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Volume %(volume_id)s is still attached, detach volume first." -#: cinder/exception.py:237 +#: cinder/exception.py:163 msgid "Failed to load data into json format" msgstr "Failed to load data into json format" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:167 +#, fuzzy +msgid "The results are invalid." msgstr "The request is invalid." -#: cinder/exception.py:245 -#, python-format -msgid "Invalid signature %(signature)s for user %(user)s." -msgstr "Invalid signature %(signature)s for user %(user)s." - -#: cinder/exception.py:249 -msgid "Invalid input received" -msgstr "Invalid input received" - -#: cinder/exception.py:253 +#: cinder/exception.py:171 #, python-format -msgid "Invalid instance type %(instance_type)s." -msgstr "Invalid instance type %(instance_type)s." - -#: cinder/exception.py:257 -msgid "Invalid volume type" -msgstr "Invalid volume type" - -#: cinder/exception.py:261 -msgid "Invalid volume" -msgstr "Invalid volume" +msgid "Invalid input received: %(reason)s" +msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:175 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" -msgstr "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "Invalid volume type: %(reason)s" +msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:179 #, python-format -msgid "Invalid IP protocol %(protocol)s." -msgstr "Invalid IP protocol %(protocol)s." +msgid "Invalid volume: %(reason)s" +msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:183 #, python-format msgid "Invalid content type %(content_type)s." msgstr "Invalid content type %(content_type)s." -#: cinder/exception.py:277 +#: cinder/exception.py:187 #, python-format -msgid "Invalid cidr %(cidr)s." -msgstr "Invalid cidr %(cidr)s." - -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." -msgstr "Invalid reuse of an RPC connection." - -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format msgid "%(err)s" msgstr "%(err)s" -#: cinder/exception.py:296 +#: cinder/exception.py:197 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Invalid auth key: %(reason)s" msgstr "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." -#: cinder/exception.py:301 +#: cinder/exception.py:201 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:305 -#, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." msgstr "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." -#: cinder/exception.py:310 +#: cinder/exception.py:210 #, python-format -msgid "Instance %(instance_id)s is not running." -msgstr "Instance %(instance_id)s is not running." +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:214 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:218 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:334 -msgid "Failed to terminate instance" +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:255 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:278 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:377 +#: cinder/exception.py:282 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:381 +#: cinder/exception.py:287 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:389 +#: cinder/exception.py:295 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:299 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:303 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:307 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:311 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:315 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:319 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:422 -msgid "Resource could not be found." +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" msgstr "" -#: cinder/exception.py:427 -#, python-format -msgid "Required flag %(flag)s not set." +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:332 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:336 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:440 +#: cinder/exception.py:340 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:344 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:352 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:457 +#: cinder/exception.py:356 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:365 #, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Volume Type %(id)s already exists." msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:369 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:373 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Malformed message body: %(reason)s" msgstr "" -#: cinder/exception.py:475 +#: cinder/exception.py:377 #, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgid "Could not find config at %(path)s" msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "Could not find parameter %(param)s" + +#: cinder/exception.py:385 #, python-format -msgid "No target id found for volume %(volume_id)s." +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -#: cinder/exception.py:484 +#: cinder/exception.py:389 #, python-format -msgid "No disk at %(location)s" +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:398 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:402 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:496 +#: cinder/exception.py:409 +#, python-format msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:501 +#: cinder/exception.py:415 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:505 +#: cinder/exception.py:419 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:509 +#: cinder/exception.py:423 #, python-format -msgid "User %(user_id)s could not be found." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" -#: cinder/exception.py:513 +#: cinder/exception.py:427 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:517 +#: cinder/exception.py:432 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:436 #, python-format -msgid "Role %(role_id)s could not be found." +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" -#: cinder/exception.py:529 +#: cinder/exception.py:444 #, python-format -msgid "%(req)s is required to create a network." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:449 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:453 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Failed to export for volume: %(reason)s" msgstr "" -#: cinder/exception.py:541 +#: cinder/exception.py:457 #, python-format -msgid "Network could not be found for uuid %(uuid)s" +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:461 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:465 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "Failed to copy image to volume: %(reason)s" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:557 -#, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:562 -#, python-format -msgid "Host is not set to the network (%(network_id)s)." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:566 +#: cinder/exception.py:485 #, python-format -msgid "Network %(network)s has active ports, cannot delete." +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:574 -#, python-format -msgid "No fixed IP associated with id %(id)s." -msgstr "" +#: cinder/exception.py:493 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Invalid backup: %(reason)s" -#: cinder/exception.py:578 +#: cinder/exception.py:497 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "Connection to swift failed: %(reason)s" msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:501 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Transfer %(transfer_id)s could not be found." msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:505 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:509 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "SSH command injection detected: %(command)s" msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:513 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" -#: cinder/exception.py:599 +#: cinder/exception.py:517 #, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:522 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:527 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:620 +#: cinder/exception.py:541 #, python-format -msgid "Floating ip not found for id %(id)s." +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:546 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:550 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:554 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:644 -#, python-format -msgid "Floating ip %(address)s is not associated." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:652 +#: cinder/exception.py:576 #, python-format -msgid "Interface %(interface)s not found." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:656 +#: cinder/exception.py:580 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:584 #, python-format -msgid "Certificate %(certificate_id)s not found." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:668 +#: cinder/exception.py:593 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Unable to create server object for initiator %(name)s" + +#: cinder/exception.py:597 #, python-format -msgid "Host %(host)s could not be found." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:672 -#, python-format -msgid "Compute host %(host)s could not be found." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:676 +#: cinder/exception.py:605 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:680 +#: cinder/exception.py:609 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:684 +#: cinder/exception.py:613 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:692 -#, python-format -msgid "Quota for project %(project_id)s could not be found." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:696 +#: cinder/exception.py:626 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:700 +#: cinder/exception.py:630 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:704 +#: cinder/exception.py:636 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:709 -#, python-format -msgid "Security group with rule %(rule_id)s not found." +#: cinder/exception.py:641 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Unknown NFS exception" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:713 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:718 -#, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +#: cinder/exception.py:654 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Unknown Gluster exception" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:723 -#, python-format -msgid "Migration %(migration_id)s could not be found." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:732 -#, python-format -msgid "Console pool %(pool_id)s could not be found." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:736 +#: cinder/quota.py:105 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:742 +#: cinder/quota.py:748 #, python-format -msgid "Console %(console_id)s could not be found." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:746 +#: cinder/quota.py:770 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:750 +#: cinder/quota.py:790 #, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:755 -#, python-format -msgid "Invalid console type %(console_type)s " +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:763 +#: cinder/quota_utils.py:46 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:767 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:772 +#: cinder/service.py:95 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:776 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:780 +#: cinder/service.py:148 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:784 -#, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +#: cinder/service.py:216 +msgid "Service killed that has no database entry" msgstr "" -#: cinder/exception.py:789 -#, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." msgstr "" -#: cinder/exception.py:793 -#, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +#: cinder/service.py:270 +msgid "Recovered model server connection!" msgstr "" -#: cinder/exception.py:798 +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 #, python-format msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:807 -#, python-format -msgid "LDAP user %(user_id)s could not be found." -msgstr "" +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Full set of CONF:" -#: cinder/exception.py:811 +#: cinder/service.py:387 #, python-format -msgid "LDAP group %(group_id)s could not be found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:815 +#: cinder/utils.py:96 #, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:819 +#: cinder/utils.py:127 #, python-format -msgid "File %(file_path)s could not be found." +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:827 +#: cinder/utils.py:228 #, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgid "Error connecting via ssh: %s" msgstr "" -#: cinder/exception.py:832 +#: cinder/utils.py:412 #, python-format -msgid "Network adapter %(adapter)s could not be found." -msgstr "" +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" -#: cinder/exception.py:836 +#: cinder/utils.py:423 #, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" -msgstr "" +msgid "backend %s" +msgstr "backend %s" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:844 +#: cinder/utils.py:759 #, python-format -msgid "Unable to use global role %(role_id)s" +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" -msgstr "" +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Unable to find cert_file : %s" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" -msgstr "" +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Unable to find ca_file : %s" -#: cinder/exception.py:861 -#, python-format -msgid "Key pair %(key_name)s already exists." -msgstr "" +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Unable to find key_file : %s" -#: cinder/exception.py:865 -#, python-format -msgid "User %(user)s already exists." +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:869 +#: cinder/wsgi.py:169 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:873 +#: cinder/wsgi.py:206 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:877 -#, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:882 -#, python-format -msgid "Project %(project)s already exists." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:886 -#, python-format -msgid "Instance %(name)s already exists." +#: cinder/wsgi.py:313 +msgid "You must implement __call__" msgstr "" -#: cinder/exception.py:890 -#, python-format -msgid "Instance Type %(name)s already exists." +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:894 -#, python-format -msgid "Volume Type %(name)s already exists." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:898 -#, python-format -msgid "%(path)s is on shared storage: %(reason)s" +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:906 -#, python-format -msgid "Malformed message body: %(reason)s" +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:910 -#, python-format -msgid "Could not find config at %(path)s" +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/common.py:162 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "" - -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" -msgstr "" - -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:938 +#: cinder/api/extensions.py:235 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/extensions.py:237 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/extensions.py:239 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:958 +#: cinder/api/extensions.py:242 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:963 +#: cinder/api/extensions.py:256 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:967 +#: cinder/api/extensions.py:262 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:971 +#: cinder/api/extensions.py:276 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:975 +#: cinder/api/extensions.py:278 #, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:980 +#: cinder/api/extensions.py:287 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:984 +#: cinder/api/extensions.py:356 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:988 +#: cinder/api/extensions.py:381 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:992 -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:1005 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "Error in SolidFire API response: status=%(status)s" +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:1009 -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:1013 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Detected existing vlan with id %(vlan)d" +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:1017 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Instance %(instance_id)s could not be found." +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:1021 +#: cinder/api/contrib/backups.py:140 #, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgid "delete called for member %s" msgstr "" -#: cinder/exception.py:1025 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "Could not fetch image %(image)s" +msgid "Delete backup with id: %s" msgstr "" -#: cinder/log.py:315 +#: cinder/api/contrib/backups.py:185 #, python-format -msgid "syslog facility must be one of: %s" +msgid "Creating new backup %s" msgstr "" -#: cinder/manager.py:146 -#, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/manager.py:152 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Running periodic task %(full_task_name)s" +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/manager.py:159 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Error during %(full_task_name)s: %(e)s" +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." msgstr "" -#: cinder/service.py:177 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Invalid status: '%s'" msgstr "" -#: cinder/service.py:195 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/service.py:340 -msgid "model server went away" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" -msgstr "Full set of FLAGS:" - -#: cinder/service.py:440 -#, python-format -msgid "%(flag)s : FLAG SET " +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" -msgstr "Inner Exception: %s" - -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" -msgstr "Fetching %s" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Running cmd (subprocess): %s" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" -msgstr "Result was %s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" -msgstr "Running cmd (SSH): %s" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/utils.py:352 +#: cinder/api/contrib/quotas.py:111 #, python-format -msgid "debug in callback: %s" -msgstr "debug in callback: %s" +msgid "Bad key(s) in quota set: %s" +msgstr "" -#: cinder/utils.py:534 -#, python-format -msgid "Link Local address is not found.:%s" -msgstr "Link Local address is not found.:%s" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" -#: cinder/utils.py:537 -#, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" -msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" -#: cinder/utils.py:648 -#, python-format -msgid "Invalid backend: %s" -msgstr "Invalid backend: %s" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" -#: cinder/utils.py:659 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "backend %s" -msgstr "backend %s" - -#: cinder/utils.py:709 -msgid "in looping call" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -#: cinder/utils.py:927 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/utils.py:942 -#, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/utils.py:1023 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/utils.py:1169 -#, python-format -msgid "Invalid server_string: %s" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:1298 -#, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/utils.py:1495 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/wsgi.py:97 -#, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/wsgi.py:117 +#: cinder/api/contrib/volume_transfer.py:147 #, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgid "Creating new volume transfer %s" msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." -msgstr "" +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "Creating transfer of volume %s" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" msgstr "" -#: cinder/api/direct.py:299 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "Returned non-serializeable type: %s" +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/api/validator.py:142 -#, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/api/ec2/__init__.py:95 -#, python-format -msgid "FaultWrapper: %s" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" msgstr "" -#: cinder/api/ec2/__init__.py:180 -#, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "Authentication Failure: %s" +msgid "Caught error: %s" msgstr "" -#: cinder/api/ec2/__init__.py:404 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/api/ec2/__init__.py:435 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "action: %s" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/api/ec2/__init__.py:437 -#, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/api/ec2/__init__.py:584 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "InstanceNotFound raised: %s" +msgid "Extended resource: %s" msgstr "" -#: cinder/api/ec2/__init__.py:590 +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "VolumeNotFound raised: %s" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/api/ec2/__init__.py:596 +#: cinder/api/openstack/__init__.py:110 #, python-format -msgid "SnapshotNotFound raised: %s" +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/api/ec2/__init__.py:605 -#, python-format -msgid "EC2APIError raised: %s" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/api/ec2/__init__.py:613 -#, python-format -msgid "KeyPairExists raised: %s" +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/api/ec2/__init__.py:617 -#, python-format -msgid "InvalidParameterValue raised: %s" +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/api/ec2/__init__.py:621 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "InvalidPortRange raised: %s" +msgid "Exception handling resource: %s" msgstr "" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/openstack/wsgi.py:682 #, python-format -msgid "NotAuthorized raised: %s" +msgid "Fault thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:629 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "InvalidRequest raised: %s" +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:633 -#, python-format -msgid "QuotaError raised: %s" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:637 -#, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:646 -#, python-format -msgid "Unexpected error raised: %s" +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:647 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "Environment: %s" +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/apirequest.py:64 -#, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" msgstr "" -#: cinder/api/ec2/cloud.py:336 -#, python-format -msgid "Create snapshot of volume %s" +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" msgstr "" -#: cinder/api/ec2/cloud.py:372 +#: cinder/api/openstack/wsgi.py:987 #, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/cloud.py:378 -#, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/cloud.py:382 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "Create key pair %s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/cloud.py:391 -#, python-format -msgid "Import key %s" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/cloud.py:409 -#, python-format -msgid "Delete key pair %s" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +#, fuzzy +msgid "snapshot does not exist" +msgstr "snapshot does not exist" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/cloud.py:551 -#, fuzzy, python-format -msgid "Invalid CIDR" -msgstr "Invalid cidr %(cidr)s." +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "Revoke security group ingress %s" +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "%s Not enough parameters to build a valid rule" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "Authorize security group ingress %s" +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/cloud.py:725 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "%s - This rule already exists in group" +msgid "Delete volume with id: %s" msgstr "" -#: cinder/api/ec2/cloud.py:769 -#, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "Create Security Group %s" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "group %s already exists" +msgid "Create volume of %s GB" msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "Delete security group %s" +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 -#, python-format -msgid "Get console output for instance %s" -msgstr "Get console output for instance %s" - -#: cinder/api/ec2/cloud.py:894 -#, python-format -msgid "Create volume from snapshot %s" +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "Create volume of %s GB" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:931 -#, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 -#, python-format -msgid "Detach volume %s" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" msgstr "" -#: cinder/api/ec2/cloud.py:959 -msgid "Detach Volume Failed." +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "attribute not supported: %s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "vol = %s\n" +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Backup status must be available or error" + +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Volume to be backed up must be available" + +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "Backup status must be available" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/ec2/cloud.py:1267 +#: cinder/backup/api.py:154 #, python-format -msgid "Release address %s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Volume to be restored to must be available" + +#: cinder/backup/api.py:176 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/api.py:181 #, python-format -msgid "Disassociate address %s" +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/manager.py:107 #, python-format -msgid "Reboot instance %r" +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/manager.py:123 #, python-format -msgid "De-registering image %s" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/manager.py:147 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/ec2/cloud.py:1542 -#, python-format -msgid "Updating image %s publicity" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/backup/manager.py:189 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/manager.py:194 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/backup/manager.py:206 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/backup/manager.py:212 #, python-format -msgid "Caught error: %s" +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/manager.py:217 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/manager.py:237 #, python-format -msgid "Extended resource: %s" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/manager.py:249 #, python-format msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/manager.py:282 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/manager.py:286 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/manager.py:299 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/manager.py:329 #, python-format -msgid "Could not find %s in request." +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/manager.py:360 #, python-format -msgid "Successfully authenticated '%s'" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/manager.py:386 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" msgstr "" -#: cinder/api/openstack/common.py:203 -#, python-format -msgid "marker [%s] not found" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "href %s does not contain version" +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "Converting nw_info: %s" +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/drivers/ceph.py:250 #, python-format -msgid "Converted networks: %s" +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:188 -#, python-format -msgid "Loaded extension: %s" +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:361 #, python-format -msgid "Ext name: %s" +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "Ext alias: %s" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/drivers/ceph.py:377 #, python-format -msgid "Ext description: %s" +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "Ext namespace: %s" +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/drivers/ceph.py:389 #, python-format -msgid "Ext updated: %s" +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/extensions.py:232 -#, python-format -msgid "Exception loading extension: %s" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/drivers/ceph.py:397 #, python-format -msgid "Loading extension %s" +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/backup/drivers/ceph.py:407 #, python-format -msgid "Calling extension factory %s" +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/ceph.py:488 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:555 #, python-format -msgid "Exception handling resource: %s" +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/ceph.py:566 #, python-format -msgid "Fault thrown: %s" +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/ceph.py:586 #, python-format -msgid "HTTP exception thrown: %s" +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "There is no such action: %s" +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" -msgstr "" - -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:708 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/ceph.py:964 #, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 -#, python-format -msgid "Invalid server status: %(status)s" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/swift.py:141 #, python-format -msgid "Bad personality format: missing %s" +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/swift.py:151 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 +#: cinder/backup/drivers/swift.py:157 #, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/swift.py:173 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:182 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/swift.py:192 #, python-format -msgid "Bad network format: missing %s" +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/swift.py:234 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "volume size %d is invalid." + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Error in confirm-resize %s" +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/swift.py:301 #, python-format -msgid "Error in revert-resize %s" +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "Error in reboot %s" +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/backup/drivers/tsm.py:199 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/backup/drivers/tsm.py:206 #, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pause %s" +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/backup/drivers/tsm.py:213 #, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::unpause %s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/backup/drivers/tsm.py:260 #, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::suspend %s" +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/backup/drivers/tsm.py:286 #, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/backup/drivers/tsm.py:298 #, python-format -msgid "Error in migrate %s" +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/backup/drivers/tsm.py:308 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "Compute.api::reset_network %s" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/backup/drivers/tsm.py:338 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/backup/drivers/tsm.py:352 #, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::lock %s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/backup/drivers/tsm.py:362 #, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::unlock %s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/backup/drivers/tsm.py:413 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 -#, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#: cinder/brick/exception.py:93 #, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/exception.py:97 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/brick/exception.py:101 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/brick/exception.py:105 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/exception.py:109 #, python-format -msgid "Aggregates does not have %s action" +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/exception.py:113 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/brick/exception.py:117 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/initiator/connector.py:127 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/brick/initiator/connector.py:229 +#, python-format msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" -msgstr "" - -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" -msgstr "" - -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." -msgstr "" - -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "No more floating ips in pool %s." +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "Invalid status: '%s'" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 -#, fuzzy, python-format -msgid "Invalid mode: '%s'" -msgstr "Invalid backend: %s" - -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Invalid update setting: '%s'" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/initiator/connector.py:834 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/initiator/linuxscsi.py:145 #, python-format -msgid "Key pair '%s' already exists." +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "Unable to find address %r" +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/iscsi/iscsi.py:177 #, python-format -msgid "Network does not have %s action" +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/iscsi/iscsi.py:184 #, python-format -msgid "Disassociating network with id %s" +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Showing network with id %s" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Deleting network with id %s" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#: cinder/brick/iscsi/iscsi.py:489 #, python-format -msgid "Security group %s already exists" +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 #, python-format -msgid "Security group %s is not a string or unicode" +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/iscsi/iscsi.py:532 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Removing iscsi_target: %s" + +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Security group %s cannot be empty." +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#: cinder/brick/iscsi/iscsi.py:571 #, python-format -msgid "Security group %s should not be greater than 255 characters." +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Security group (%s) not found" +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "This rule already exists in group %s" +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Unable to locate Volume Group %s" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/brick/local_dev/lvm.py:370 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Unable to find VG: %s" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 -#, python-format -msgid "Rule (%s) not found" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/brick/local_dev/lvm.py:489 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Unable to find LV: %s" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "start instance %r" +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 -#, python-format -msgid "stop instance %r" +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 -#, python-format -msgid "vol=%s" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 -#, python-format -msgid "Delete volume with id: %s" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 -#, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "Delete snapshot with id: %s" +msgid "Already mounted: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 -#, python-format -msgid "Create snapshot from volume %s" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/auth/ldapdriver.py:650 -#, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/auth/manager.py:302 +#: cinder/compute/nova.py:97 #, python-format -msgid "Failed authorization for access key %s" +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" + +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "Using project name = user name (%s)" +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/auth/manager.py:315 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/auth/manager.py:324 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "user.secret: %s" +msgid "No backup with id %s" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "Volume must be available" + +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "expected_signature: %s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "signature: %s" +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/auth/manager.py:353 -#, python-format -msgid "host_only_signature: %s" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/auth/manager.py:493 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +msgid "Table |%s| not created!" msgstr "" -#: cinder/auth/manager.py:519 -#, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/auth/manager.py:522 -#, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/auth/manager.py:595 -#, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/auth/manager.py:613 -#, python-format -msgid "modifying project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:676 -#, python-format -msgid "Deleting project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/auth/manager.py:753 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Access Key change for user %s" +msgid "Table |%s| not created" msgstr "" -#: cinder/auth/manager.py:755 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Secret Key change for user %s" +msgid "Exception while dropping table %s." msgstr "" -#: cinder/auth/manager.py:757 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +msgid "Exception while creating table %s." msgstr "" -#: cinder/auth/manager.py:802 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "No vpn data for project %s" +msgid "Column |%s| not created!" msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -#, fuzzy, python-format -msgid "Instance type for vpn instances" -msgstr "Invalid instance type %(instance_type)s." +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:107 -#, python-format -msgid "Launching VPN for %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/compute/api.py:144 -#, python-format -msgid "Unable to find host for Instance %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/compute/api.py:192 -#, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/compute/api.py:203 -#, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/compute/api.py:259 -#, python-format -msgid "Can only run %s more instances of this type." +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/compute/api.py:261 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/compute/api.py:312 +#: cinder/image/image_utils.py:101 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/api.py:383 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format -msgid "Going to run %s instances..." +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/api.py:447 +#: cinder/image/image_utils.py:157 #, python-format -msgid "bdm %s" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:474 +#: cinder/image/image_utils.py:178 #, python-format -msgid "block_device_mapping %s" +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/image/image_utils.py:206 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/api.py:871 -msgid "Going to try to soft delete instance" +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/api.py:939 -msgid "Going to try to terminate instance" +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/compute/api.py:977 -msgid "Going to try to stop instance" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/compute/api.py:996 -msgid "Going to try to start instance" +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" msgstr "" -#: cinder/compute/api.py:1000 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/openstack/common/excutils.py:48 #, python-format -msgid "Searching by: %s" +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "Image type not recognized %s" +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/api.py:1377 -#, python-format +#: cinder/openstack/common/gettextutils.py:261 msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/api.py:1644 -#, python-format -msgid "multiple fixedips exist, using the first: %s" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "DB error: %s" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "Instance type %s not found for deletion" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: decorating: |%s|" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:144 +#: cinder/openstack/common/log.py:301 #, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: locked: |%s|" +msgid "Deprecated: %s" +msgstr "" -#: cinder/compute/manager.py:146 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: admin: |%s|" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: executing: |%s|" - -#: cinder/compute/manager.py:155 -#, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "check_instance_lock: not executing |%s|" - -#: cinder/compute/manager.py:201 -#, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/log.py:623 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "Setting up bdm %s" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "Instance %s not found." +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" -msgstr "Instance has already been created" - -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/openstack/common/policy.py:149 #, python-format msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" -msgstr "" - -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" -msgstr "" - -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:565 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Instance network_info: |%s|" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" -msgstr "" +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "%(action_str)s instance" -msgstr "" +msgid "Result was %s" +msgstr "Result was %s" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "terminating bdm %s" +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 -#, python-format -msgid "%s. Setting instance vm_state to ERROR" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 #, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:816 -#, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:823 -#, python-format -msgid "Rebuilding instance %s" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:876 -#, python-format -msgid "Rebooting instance %s" -msgstr "Rebooting instance %s" +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Unhandled exception" +msgstr "Unhandled exception" -#: cinder/compute/manager.py:891 -#, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "instance %s: snapshotting" -msgstr "instance %s: snapshotting" +msgid "Started child %d" +msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/service.py:337 #, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Starting %d workers" msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "Rotating out %d backups" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "Deleting image %s" +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "Failed to set admin password. Instance %s is not running" +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "Instance %s: Root password set" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:1079 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/versionutils.py:69 #, python-format msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:1116 -#, python-format -msgid "instance %s: rescuing" -msgstr "instance %s: rescuing" +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "instance %s: unrescuing" +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1287 -#, python-format -msgid "instance %s: migrating" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:1471 -#, python-format -msgid "instance %s: pausing" -msgstr "instance %s: pausing" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "DB exception wrapped." -#: cinder/compute/manager.py:1489 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "instance %s: unpausing" +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:1525 -#, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "instance %s: retrieving diagnostics" - -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "instance %s: suspending" -msgstr "instance %s: suspending" +msgid "SQL connection failed. %s attempts left." +msgstr "" -#: cinder/compute/manager.py:1556 -#, python-format -msgid "instance %s: resuming" -msgstr "instance %s: resuming" +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "instance %s: locking" -msgstr "instance %s: locking" +msgid "%s not in valid priorities" +msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/notifier/api.py:145 #, python-format -msgid "instance %s: unlocking" -msgstr "instance %s: unlocking" +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "instance %s: getting locked state" -msgstr "instance %s: getting locked state" +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" -#: cinder/compute/manager.py:1606 -#, python-format -msgid "instance %s: reset network" -msgstr "instance %s: reset network" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "instance %s: inject network info" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format -msgid "network_info to inject: |%s|" +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:1655 -#, python-format -msgid "instance %s: getting vnc console" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "unpacked context: %s" msgstr "" -#: cinder/compute/manager.py:1714 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "received %s" msgstr "" -#: cinder/compute/manager.py:1752 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgid "no method for message: %s" msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "Detaching volume from unknown instance %s" +msgid "No method for message: %s" +msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "MSG_ID is %s" msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "An unknown RPC related exception occurred" + +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." -msgstr "" - -#: cinder/compute/manager.py:2075 msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" -msgstr "" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Invalid reuse of an RPC connection." -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/common.py:156 #, python-format msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "Returning exception %s to caller" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" -msgstr "" - -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/console/manager.py:97 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/console/vmrc_manager.py:122 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "Removing console %(console_id)s." +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/console/xvp.py:116 -#, python-format -msgid "Re-wrote %s" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "Error starting xvp: %s" +msgid "Deserializing: %s" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/consoleauth/manager.py:75 -#, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +msgid "Subscribing to %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" -msgstr "Use of empty request context is deprecated" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" -#: cinder/db/sqlalchemy/api.py:198 -#, python-format -msgid "Unrecognized read_deleted value '%s'" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/openstack/common/rpc/impl_zmq.py:267 #, python-format -msgid "No ComputeNode for %(host)s" +msgid "Running func with context: %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 -#, python-format -msgid "No backend config with id %(sm_backend_id)s" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 -#, python-format -msgid "No sm_flavor called %(sm_flavor)s" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 -#, python-format -msgid "No sm_volume with id %(volume_id)s" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "SQL connection failed. %s attempts left." +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 -#, python-format -msgid "Table |%s| not created!" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "join list for moving mac_addresses |%s|" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "Received message: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/impl_zmq.py:698 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "RPC Message Invalid." + +#: cinder/openstack/common/rpc/impl_zmq.py:721 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "%(msg)s" msgstr "" -#: cinder/image/glance.py:281 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/image/glance.py:289 -#, python-format -msgid "Metadata returned from Glance formatted for Base %s" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/image/glance.py:410 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "topic is %s." msgstr "" -#: cinder/image/s3.py:309 +#: cinder/openstack/common/rpc/impl_zmq.py:815 #, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/image/s3.py:328 -#, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/image/s3.py:340 +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "Matchmaker does not implement registration or heartbeat." + +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/image/s3.py:379 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 -#, python-format -msgid "Bad prefix for to_global_ipv6: %s" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/ipv6/account_identifier.py:42 -#, python-format -msgid "Bad project_id for to_global_ipv6: %s" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/scheduler/filter_scheduler.py:114 #, python-format -msgid "Unknown chain: %r" +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/network/linux_net.py:694 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "Hupping dnsmasq threw %s" -msgstr "Hupping dnsmasq threw %s" - -#: cinder/network/linux_net.py:696 -#, python-format -msgid "Pid %d is stale, relaunching dnsmasq" -msgstr "Pid %d is stale, relaunching dnsmasq" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "killing radvd threw %s" -msgstr "killing radvd threw %s" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "Pid %d is stale, relaunching radvd" -msgstr "Pid %d is stale, relaunching radvd" +msgid "Filtered %s" +msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/scheduler/filter_scheduler.py:276 #, python-format -msgid "Starting VLAN inteface %s" -msgstr "Starting VLAN inteface %s" +msgid "Choosing %s" +msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Starting Bridge interface for %s" -msgstr "Starting Bridge interface for %s" - -#: cinder/network/linux_net.py:1142 -#, fuzzy, python-format -msgid "Starting bridge %s " -msgstr "Starting Bridge interface for %s" +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" -#: cinder/network/linux_net.py:1149 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "Done starting bridge %s" +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/linux_net.py:1167 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "Failed unplugging gateway interface '%s'" +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/network/linux_net.py:1170 -#, python-format -msgid "Unplugged gateway interface '%s'" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/manager.py:291 -#, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 -#, python-format -msgid "Interface %(interface)s not found" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/manager.py:315 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "floating IP allocation for instance |%s|" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/manager.py:353 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "floating IP deallocation for instance |%s|" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/network/manager.py:402 -#, python-format -msgid "Quota exceeded for %s, tried to allocate address" +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/network/manager.py:614 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/network/manager.py:660 -#, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/network/manager.py:778 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/network/manager.py:896 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "network allocations for instance |%s|" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/network/manager.py:901 -#, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "network deallocation for instance |%s|" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Faked command matched %s" msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "Leased IP |%(address)s|" +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "IP %s leased that is not associated" +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/manager.py:1256 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "IP |%s| leased that isn't allocated" +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/tests/test_netapp_nfs.py:360 #, python-format -msgid "Released IP |%(address)s|" +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/manager.py:1265 -#, python-format -msgid "IP %s released that is not associated" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/manager.py:1268 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "IP %s released that was not leased" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "Result data: %s" msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/tests/api/contrib/test_backups.py:741 +#, fuzzy +msgid "Invalid input" +msgstr "Invalid input" + +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "volume: %s" + +#: cinder/tests/integrated/api/client.py:32 #, python-format msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/network/quantum/client.py:180 -#, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "Quantum entity not found: %s" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "Body: %s" msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/tests/integrated/api/client.py:121 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "unable to deserialize object of type = '%s'" -msgstr "" - -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." -msgstr "" +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "Failed to create transfer record for %s" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/manager.py:301 +#: cinder/transfer/api.py:182 #, python-format -msgid "network allocations for instance %s" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/transfer/api.py:199 #, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +msgid "Volume %s has been transferred." msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/volume/api.py:143 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 -#, python-format -msgid "Server returned error: %s" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/api.py:214 #, python-format -msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 #, python-format -msgid "No network with net_id = %s" +msgid "Searching by: %s" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 -#, python-format -msgid "No fixed IPs to deallocate for vif %s" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 -#, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 -#, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Volume status must be available to reserve" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/api.py:490 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/notifier/list_notifier.py:65 +#: cinder/volume/api.py:502 #, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume Snapshot status must be available or error" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/rpc/amqp.py:146 -#, python-format -msgid "Returning exception %s to caller" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/rpc/amqp.py:231 -#, python-format -msgid "received %s" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Volume status must be available/in-use." + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/rpc/amqp.py:236 -#, python-format -msgid "no method for message: %s" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/rpc/amqp.py:237 +#: cinder/volume/api.py:757 #, python-format -msgid "No method for message: %s" +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/rpc/amqp.py:321 -#, python-format -msgid "Making asynchronous call on %s ..." +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/rpc/amqp.py:324 -#, python-format -msgid "MSG_ID is %s" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/api.py:797 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "No available service named %s" msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/rpc/amqp.py:379 -#, python-format -msgid "Sending notification on %s..." +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/api.py:842 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/api.py:868 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/api.py:874 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/api.py:887 #, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/api.py:900 #, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/driver.py:282 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/driver.py:327 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/driver.py:340 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/rpc/impl_qpid.py:346 +#: cinder/volume/driver.py:358 #, python-format -msgid "Connected to AMQP server on %s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/scheduler/driver.py:63 -#, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/volume/driver.py:546 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/scheduler/driver.py:89 +#: cinder/volume/driver.py:548 #, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/manager.py:228 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/manager.py:235 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/manager.py:244 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/volume/manager.py:264 #, python-format -msgid "No host selection for %s defined." +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/manager.py:271 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/manager.py:286 #, python-format -msgid "Filtered %(hosts)s" +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 -#, python-format -msgid "Weighted %(weighted_host)s" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "Host filter fails for ignored host %(host)s" -msgstr "" +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: cinder/volume/manager.py:380 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volume is not local to this node" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/manager.py:389 #, python-format -msgid "Host filter fails for non-forced host %(host)s" -msgstr "" +msgid "volume %s: removing export" +msgstr "volume %s: removing export" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/manager.py:394 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/manager.py:427 #, python-format -msgid "Host filter passes for %(host)s" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/manager.py:430 #, python-format -msgid "Received %(service_name)s service update from %(host)s." +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/manager.py:462 #, python-format -msgid "No service for compute ID %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/manager.py:490 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/manager.py:496 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "snapshot %s: deleting" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/manager.py:526 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/manager.py:559 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/tests/fake_utils.py:72 -#, python-format -msgid "Faking execution of cmd (subprocess): %s" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/manager.py:698 #, python-format -msgid "Faked command matched %s" +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:760 #, python-format -msgid "Faked command raised an exception %s" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:807 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:880 #, python-format -msgid "Running instances: %s" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/manager.py:909 #, python-format -msgid "After terminating instances: %s" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:940 #, python-format -msgid "After force-killing instances: %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:976 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Target %s allocated" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/tests/test_volume_types.py:58 -#, python-format -msgid "Given data: %s" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/tests/test_volume_types.py:59 -#, python-format -msgid "Result data: %s" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "Quota exceeded: code=%(code)s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "_create: %s" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "_delete: %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "_get: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "_get_all: %s" +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/utils.py:144 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "flavor: %s" +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/tests/integrated/api/client.py:107 -#, python-format -msgid "Body: %s" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/tests/integrated/api/client.py:151 -#, python-format -msgid "%(relative_uri)s => code %(http_status)s" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/tests/integrated/api/client.py:168 -#, python-format -msgid "Decoding JSON: %s" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Nested received %(queue)s, %(value)s" -msgstr "Nested received %(queue)s, %(value)s" +msgid "Coraid Appliance ping failed: %s" +msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Nested return %s" -msgstr "Nested return %s" - -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "Received %s" -msgstr "Received %s" +msgid "Volume \"%s\" deleted." +msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" -msgstr "Failed to open connection to the hypervisor" +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "Compute_service record created for %s " +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Compute_service record updated for %s " +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Filters added to instance %s" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "Adding security group rule: %r" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Adding provider rule: %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" -msgstr "" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Error running SSH command: %s" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 -#, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/eqlx.py:405 #, python-format -msgid "Got exception: %s" +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/glusterfs.py:91 #, python-format -msgid "No such domain (%s)" +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "Failed power down Bare-metal node %s" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "casted to %s" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "No such domain %s" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Domains: %s" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Nodes: %s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "After storing domains: %s" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Created new domain: %s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:226 -#, python-format -msgid "change_domain_state: to new state %s" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/glusterfs.py:608 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "No base file found for %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "instance %s: rebooted" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "instance %s: rescued" +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "instance %s: is building" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "instance %s: booted" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "instance %s spawned successfully" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "instance %s:not booted" +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "instance %s: Creating image" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "file already exists at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "instance %s: starting toXML method" +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "instance %s: finished toXML method" +msgid "Available shares: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:216 -#, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "%s is not a directory." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "unknown disk image handler: %s" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Failed to remove container: %s" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "User %(username)s not found in password file." +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "unsupported partition: %s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "" - -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +msgid "%s" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "Failed to map partitions: %s" +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "qemu-nbd error: %s" +msgid " but size is now %d" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/nfs.py:361 #, python-format -msgid "nbd device %s did not show up" +msgid "%s is already mounted" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "Connecting to libvirt: %s" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Deleting instance files %(target)s" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -#, fuzzy, python-format -msgid "Instance soft rebooted successfully." -msgstr "volume %s: created successfully" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Automatically confirming migration %d" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -#, fuzzy -msgid "Guest does not have a console available" -msgstr "User does not have admin privileges" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "block_device_list %s" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +msgid "deleting rbd volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/libvirt/connection.py:1942 -#, python-format -msgid "'' must be 1, but %d\n" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "connection data: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 -#, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 -#, python-format -msgid "Timeout migrating for %s. nwfilter not found." -msgstr "" +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Not an rbd snapshot" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/rbd.py:724 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "not cloneable: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 -#, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "Unable to open image %(loc)s: %(err)s" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 -#, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "%s is a valid instance name" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "%s has a disk file" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Base file too young to remove: %s" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Removing base file: %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "API response: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 -#, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "Unknown base file: %s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Active base files: %s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 -#, python-format -msgid "Corrupt base files: %s" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Removable base files: %s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/vif.py:99 -#, python-format -msgid "Ensuring bridge %s" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 -#, python-format -msgid "Failed while unplugging vif of instance '%s'" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/libvirt/volume.py:166 -#, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/libvirt/volume.py:178 -#, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 -#, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 -#, python-format -msgid "%(text)s: _db_content => %(content)s" -msgstr "%(text)s: _db_content => %(content)s" +#: cinder/volume/drivers/solidfire.py:673 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Failed to get updated stats" -#: cinder/virt/vmwareapi/fake.py:131 -#, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Glance image %s is in killed state" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "Create Volume: %(volume)s Size: %(size)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format -msgid "httplib error in %s: " +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "Socket error in %s: " +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "Type error in %s: " +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Exception in %s " +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Got total of %s instances" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 -#, python-format -msgid "Powered on the VM instance %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Uploading image %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Uploaded image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Deleted temporary vmdk file %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 -#, python-format -msgid "Rebooted guest OS of VM %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Did hard reboot of VM %s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "instance - %s not present" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Powering off the VM %s" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "Powered off the VM %s" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Unregistering the VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Unregistered the VM %s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 -#, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Suspending the VM %s " +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Suspended the VM %s " +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "Resuming the VM %s" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "Resumed the VM %s " +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Create export: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "Creating directory with path %s" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Created directory with path %s" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Downloading image %s from glance image server" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:674 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "Error mapping volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "ExposePaths for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "HidePaths for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" -msgstr "Raising NotImplemented" +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "AddMembers for volume %s completed successfully." -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format -msgid "xenapi.fake does not have an implementation for %s" -msgstr "xenapi.fake does not have an implementation for %s" +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Calling %(localname)s %(impl)s" -msgstr "Calling %(localname)s %(impl)s" +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" -#: cinder/virt/xenapi/fake.py:594 -#, python-format -msgid "Calling getter %s" -msgstr "Calling getter %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "RemoveMembers for volume %s completed successfully." -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "Map volume: %(volume)s" msgstr "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Found no network for bridge %s" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:111 -#, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:143 -#, python-format -msgid "Pool-Join failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/pool.py:146 -#, python-format -msgid "Unable to join %(host)s in the pool" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "Unable to detach volume %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" -#: cinder/virt/xenapi/pool.py:185 -#, python-format -msgid "Pool-set_name_label failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "Found no PIF for device %s" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "VBD not found in instance %s" -msgstr "VBD not found in instance %s" +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "VBD %s already detached" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Unable to unplug VBD %s" -msgstr "Unable to unplug VBD %s" +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "Unable to destroy VBD %s" -msgstr "Unable to destroy VBD %s" - -#: cinder/virt/xenapi/vm_utils.py:305 -#, fuzzy, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -msgstr "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Volume %(volumename)s not found on the array." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." -msgstr "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 -#, fuzzy, python-format -msgid "Snapshotting with label '%(label)s'" -msgstr "Snapshotting VM %(vm_ref)s with label '%(label)s'..." - -#: cinder/virt/xenapi/vm_utils.py:392 -#, fuzzy, python-format -msgid "Created snapshot %(template_vm_ref)s" -msgstr "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" -msgstr "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "download_vhd failed: %r" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 -#, fuzzy, python-format -msgid "Size for image %(image)s: %(virtual_size)d" -msgstr "Size for image %(image)s:%(virtual_size)d" +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 -#, python-format -msgid "Copying VDI %s to /boot/guest on dom0" -msgstr "Copying VDI %s to /boot/guest on dom0" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" -msgstr "Kernel/Ramdisk VDI %s destroyed" +msgid "Add target WWN: %s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "Looking up vdi %s for PV kernel" +msgid "Could not find iSCSI export for volume %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Cannot find device number for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "Found iSCSI endpoint: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "VDI %s is still available" -msgstr "VDI %s is still available" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" -msgstr "(VM_UTILS) xenserver vm state -> |%s|" +msgid "ISCSI properties: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" -msgstr "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "Re-scanning SR %s" -msgstr "Re-scanning SR %s" +msgid "%(element)s: %(val)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/hds/hds.py:250 +#, fuzzy, python-format +msgid "HDP not found: %s" +msgstr "HDP not found: %s" + +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "LUN %s is deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" -msgstr "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "Plugging VBD %s ... " -msgstr "Plugging VBD %s ... " +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Plugging VBD %s done." -msgstr "Plugging VBD %s done." +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 -#, python-format -msgid "Destroying VBD for VDI %s ... " -msgstr "Destroying VBD for VDI %s ... " +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Destroying VBD for VDI %s done." -msgstr "Destroying VBD for VDI %s done." +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Running pygrub against %s" -msgstr "Running pygrub against %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Found Xen kernel %s" -msgstr "Found Xen kernel %s" - -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." -msgstr "No Xen kernel found. Booting HVM." +msgid "parse_xml_file: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/rest_common.py:59 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "Writing partition table %s done." -msgstr "Writing partition table %s done." +msgid "HVS Response Data: %(res)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -#, fuzzy, python-format -msgid "Starting instance" -msgstr "Rebooting instance %s" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Injecting file path: '%s'" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Instance agent version: %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "Updating Agent to %s" +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:686 -#, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "Unable to attach volume to instance %s" - -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 -#, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 -#, fuzzy, python-format -msgid "Instance %(instance_uuid)s not found" -msgstr "Instance %(instance_id)s is not running." +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/ssh_common.py:792 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 -#, fuzzy, python-format -msgid "Creating VIF for network %(network_ref)s" -msgstr "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 -#, fuzzy, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" -msgstr "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 -#, python-format -msgid "OpenSSL error: %s" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "type is = %s" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "name = %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Failed getting details for pool %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 -#, python-format -msgid "Forgetting SR %s..." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Introducing %s..." +msgid "%s is not set" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 -#, python-format -msgid "Unable to find SR from VBD %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 -#, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "ensure_export: Volume %s not found on storage" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 -#, python-format -msgid "Unable to get record of VDI %s on" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 -#, python-format -msgid "Error finding vdis in SR %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 #, python-format -msgid "Creating SR %s" +msgid "initialize_connection: Did not find a preferred node for volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "Introducing SR %s" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "Checking for SR %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 -#, python-format -msgid "SR %s not found in the xapi database" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 -#, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" -msgstr "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" - -#: cinder/virt/xenapi/volumeops.py:189 -#, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "Unable to attach volume to instance %s" +msgid "leave: extend_volume: volume %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -msgstr "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 #, python-format -msgid "Unable to locate volume %s" -msgstr "Unable to locate volume %s" +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "Unable to detach volume %s" -msgstr "Unable to detach volume %s" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 -#, python-format -msgid "Unable to destroy vbd %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 -#, python-format -msgid "Error purging SR %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" -msgstr "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "Error in handshake: %s" +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Invalid request: %s" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "Request: %s" +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "Request made with missing token: %s" +msgid "Failed to find host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "Request made with invalid token: %s" +msgid "enter: get_host_from_connector: %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "Unexpected error: %s" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "enter: create_host: host %s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" msgstr "" -#: cinder/volume/api.py:85 -#, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "Volume status must be available" - -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/volume/api.py:325 -#, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "Volume status must be available" - -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "volume group %s doesn't exist" +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Could not find iSCSI export for volume %s" +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 #, python-format -msgid "FAKE ISCSI: %s" +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 #, python-format -msgid "rbd has no pool %s" +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 #, python-format -msgid "Sheepdog is not working: %s" +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "Re-exporting %s volumes" -msgstr "Re-exporting %s volumes" +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "volume %s: skipping export" -msgstr "volume %s: skipping export" +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 #, python-format -msgid "volume %s: creating" -msgstr "volume %s: creating" +msgid "Tried to delete non-existant vdisk %s." +msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" -msgstr "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 #, python-format -msgid "volume %s: creating export" -msgstr "volume %s: creating export" +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 #, python-format -msgid "volume %s: created successfully" -msgstr "volume %s: created successfully" +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" -msgstr "Volume is still attached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" -msgstr "Volume is not local to this node" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 #, python-format -msgid "volume %s: removing export" -msgstr "volume %s: removing export" +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 #, python-format -msgid "volume %s: deleting" -msgstr "volume %s: deleting" +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 #, python-format -msgid "volume %s: volume is busy" +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 #, python-format -msgid "volume %s: deleted successfully" -msgstr "volume %s: deleted successfully" +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "snapshot %s: creating" +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/netapp/api.py:419 #, python-format -msgid "snapshot %s: created successfully" +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/netapp/common.py:103 #, python-format -msgid "snapshot %s: deleting" +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" msgstr "" -#: cinder/volume/manager.py:214 -#, fuzzy, python-format -msgid "snapshot %s: snapshot is busy" -msgstr "instance %s: snapshotting" +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/netapp/common.py:116 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "No default storage protocol found for storage family %(storage_family)s" msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/netapp/common.py:130 #, python-format -msgid "New capabilities found: %s" +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/netapp/common.py:158 #, python-format -msgid "Notification {%s} received" +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/netapp/iscsi.py:69 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "No metadata property %(prop)s defined for the LUN %(name)s" msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/netapp/iscsi.py:105 #, python-format -msgid "%s is not set" +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "Destroyed LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" msgstr "" -#: cinder/volume/netapp.py:128 -msgid "Connected to DFM server" +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/netapp/iscsi.py:238 #, python-format -msgid "Job failed: %s" +msgid "Failed to get LUN target details for the LUN %s" msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#: cinder/volume/drivers/netapp/iscsi.py:290 #, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "Unable to locate volume %s" +msgid "Snapshot %s deletion successful" +msgstr "Snapshot %s deletion successful" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 #, fuzzy, python-format -msgid "No LUN ID for volume %s" -msgstr "Unable to locate volume %s" +msgid "Failed to get vol with required size for volume: %s" +msgstr "Failed to get vol with required size for volume: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#: cinder/volume/drivers/netapp/nfs.py:411 #, python-format -msgid "Failed to get LUN details for LUN ID %s" +msgid "Cloning image %s from cache" msgstr "" -#: cinder/volume/netapp.py:614 +#: cinder/volume/drivers/netapp/nfs.py:415 #, python-format -msgid "Failed to get host details for host ID %s" +msgid "Cache share: %s" msgstr "" -#: cinder/volume/netapp.py:620 -#, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "" +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Error running ssh command: %s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "Volume status must be available" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Unexpected error while running command." + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "volume %(vol_name)s: creating lv of size %(vol_size)sG" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "volume %s: creating export" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "volume %s: creating export" + +#~ msgid "volume %s: creating from image" +#~ msgstr "volume %s: creating" + +#~ msgid "volume %s: creating" +#~ msgstr "volume %s: creating" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "volume %s: creating export" + +#~ msgid "volume %s: create failed" +#~ msgstr "volume %s: creating" + +#~ msgid "volume %s: created successfully" +#~ msgstr "volume %s: created successfully" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "Fetching %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "Link Local address is not found.:%s" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "Unable to find Volume Group: %s" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "Failed to create Volume Group: %s" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "Unable to find group: %(group)s" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "Unable to locate volume %s" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "Failed to delete volume %(volname)s" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Unexpected error while running command." + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "Connection to glance failed" -#: cinder/volume/netapp.py:625 -#, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "" +#~ msgid "Invalid snapshot" +#~ msgstr "Invalid snapshot" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#~ msgid "Invalid input received" +#~ msgstr "Invalid input received" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Invalid volume type" +#~ msgstr "Invalid volume type" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "Invalid volume" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "Invalid auth key" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "Connection to swift failed" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "debug in callback: %s" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "Disk not found: %s" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, python-format -msgid "Got response: %s" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "The request is invalid." +#~ msgstr "The request is invalid." + +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" -#~ msgstr "trying to destroy already destroyed instance: %s" +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Reconnected to queue" +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "response %s" +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "topic is %s" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "message %s" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "zfs send/recv done, new volume %s created" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" -#~ msgstr "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Publishing to route %s" -#~ msgstr "Publishing to route %s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Declaring queue %s" -#~ msgstr "Declaring queue %s" +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" -#~ msgid "Declaring exchange %s" -#~ msgstr "Declaring exchange %s" +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" -#~ msgstr "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" -#~ msgstr "Getting from %(queue)s: %(message)s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "Can't find lun or lun goup in array" #~ msgstr "" -#~ msgid "Created VM %s..." -#~ msgstr "Created VM %s..." +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." -#~ msgstr "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " -#~ msgstr "Creating VBD for VDI %s ... " +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." -#~ msgstr "Creating VBD for VDI %s done." +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" -#~ msgid "VBD.unplug successful first time." -#~ msgstr "VBD.unplug successful first time." +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." -#~ msgstr "VBD.unplug rejected: retrying..." +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." -#~ msgstr "VBD.unplug successful eventually." +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" -#~ msgstr "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" -#~ msgstr "Ignoring XenAPI.Failure %s" +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" -#~ msgstr "instance %s: Failed to spawn" +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "_wait_child %d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "wait wrap.failed %s" #~ msgstr "" #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" +#~ msgid "volume %s mapping to multi host" +#~ msgstr "volume %s mapping to multi host" + #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." -#~ msgstr "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -#~ msgstr "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " - -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/en_US/LC_MESSAGES/cinder.po b/cinder/locale/en_US/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..c39f8754a7 --- /dev/null +++ b/cinder/locale/en_US/LC_MESSAGES/cinder.po @@ -0,0 +1,11037 @@ +# English (United States) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: markmc \n" +"Language-Team: en_US \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "Arguments dropped when creating context: %s" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "read_deleted can only be one of 'no', 'yes' or 'only', not %r" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "An unknown exception occurred." + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "Exception in string format operation" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "Bad or unexpected response from the storage volume backend API: %(data)s" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "Not authorized." + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "User does not have admin privileges" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Policy doesn't allow %(action)s to be performed." + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Not authorized for image %(image_id)s." + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "Unacceptable parameters." + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Volume %(volume_id)s is still attached, detach volume first." + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "Failed to load data into json format" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "The results are invalid." + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Invalid content type %(content_type)s." + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "Service is unavailable at this time." + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "Image %(image_id)s is unacceptable: %(reason)s" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, fuzzy, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "Expected a uuid but received %(uuid)s." + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "Resource could not be found." + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "Volume %(volume_id)s could not be found." + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "Volume %(volume_id)s has no metadata with key %(metadata_key)s." + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "Volume type %(volume_type_id)s could not be found." + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "Volume type with name %(volume_type_name)s could not be found." + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "Snapshot %(snapshot_id)s could not be found." + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "deleting volume %(volume_name)s that has snapshot" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "deleting snapshot %(snapshot_name)s that has dependent volumes" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "No target id found for volume %(volume_id)s." + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "Invalid image href %(image_href)s." + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "Image %(image_id)s could not be found." + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "Service %(service_id)s could not be found." + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "Host %(host)s could not be found." + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "Scheduler Host Filter %(filter_name)s could not be found." + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "Scheduler Host Weigher %(weigher_name)s could not be found." + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "Could not find binary %(binary)s on host %(host)s." + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "Invalid reservation expiration %(expire)s." + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "Quota could not be found" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "Unknown quota resources %(unknown)s." + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "Quota for project %(project_id)s could not be found." + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "Quota class %(class_name)s could not be found." + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "Quota usage for project %(project_id)s could not be found." + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "Quota reservation %(uuid)s could not be found." + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "Quota exceeded for resources: %(overs)s" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "File %(file_path)s could not be found." + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "Volume Type %(id)s already exists." + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "Malformed message body: %(reason)s" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "Could not find config at %(path)s" + +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "Could not find config at %(param)s" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "Could not load paste app '%(name)s' from %(path)s" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "No valid host was found. %(reason)s" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "Host %(host)s is not up or doesn't exist." + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "Maximum number of volumes allowed (%(allowed)d) exceeded" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "Maximum number of snapshots allowed (%(allowed)d) exceeded" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "Detected more than one volume with name %(vol_name)s" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "Unknown or unsupported command %(cmd)s" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "Malformed response to command %(cmd)s: %(reason)s" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "Operation failed with status=%(status)s. Full dump: %(data)s" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "Failed to copy image to volume: %(reason)s" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "Backup %(backup_id)s could not be found." + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Invalid backup: %(reason)s" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, fuzzy, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "Transfer %(transfer_id)s could not be found." + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Unable to create server object for initiator %(name)s" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "Unable to find server object for initiator %(name)s" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "Unable to find any active VPSA controller" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "Failed to retrieve attachments for volume %(name)s" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "Invalid attachment info for volume %(name)s: %(reason)s" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "Bad HTTP response status %(status)s" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "Bad response from SolidFire API" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "Error in SolidFire API response: data=%(data)s" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "Unable to locate account %(account_name)s on Solidfire device" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "Invalid 3PAR Domain: %(err)s" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "Unknown NFS exception" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "No mounted NFS shares found" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "There is no share which can host %(volume_size)sG" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "Unknown Gluster exception" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "No mounted Gluster shares found" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "Notifying Schedulers of capabilities ..." + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "JSON file representing policy" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "Rule checked when requested rule is not found" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Starting %(topic)s node (version %(version_string)s)" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "Creating Consumer connection for Service %s" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "Service killed that has no database entry" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "The service database object disappeared, Recreating it." + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "Recovered model server connection!" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "model server went away" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "Full set of CONF:" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "Specify a password or private_key" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "Error connecting via ssh: %s" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "Could not remove tmpdir: %s" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "Unable to find cert_file : %s" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "Unable to find ca_file : %s" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "Unable to find key_file : %s" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "Could not bind to %(host)s:%(port)s after trying for 30 seconds" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "Stopping WSGI server." + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "WSGI server has stopped." + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "You must implement __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "limit param must be an integer" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "limit param must be positive" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "offset param must be an integer" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "offset param must be positive" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "marker [%s] not found" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s does not contain version" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "Initializing extension manager." + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "Loaded extension: %s" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "Ext name: %s" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "Ext alias: %s" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "Ext description: %s" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "Ext namespace: %s" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "Ext updated: %s" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "Exception loading extension: %s" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "Loading extension %s" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "Calling extension factory %s" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "osapi_volume_extension is set to deprecated path: %s" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "Failed to load extension %(ext_factory)s: %(exc)s" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "Failed to load extension %(classpath)s: %(exc)s" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "Failed to load extension %(ext_name)s: %(exc)s" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "element is not a child" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "root element selecting a list" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "subclasses must implement construct()!" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "Updating %(resource)s '%(id)s' with '%(update)r'" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "show called for member %s" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "delete called for member %s" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "Delete backup with id: %s" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "Creating new backup %s" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "Incorrect request body format" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "Creating backup of volume %(volume_id)s in container %(container)s" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "Restoring backup %(backup_id)s (%(body)s)" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "Restoring backup %(backup_id)s to volume %(volume_id)s" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "Snapshot not found." + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "cannot understand XML" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "Host '%s' could not be found." + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "Invalid status: '%s'" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "Invalid update setting: '%s'" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "Setting host %(host)s to %(state)s." + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "Describe-resource is admin only functionality" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "Host not found" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "Quota limit must be -1 or greater." + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "Request body empty" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "Request body and URI mismatch" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "Request body contains too many items" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "No image_name was specified in request." + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +#, fuzzy +msgid "Listing volume transfers" +msgstr "Listing volume transfers" + +#: cinder/api/contrib/volume_transfer.py:147 +#, fuzzy, python-format +msgid "Creating new volume transfer %s" +msgstr "Creating new volume transfer %s" + +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "Creating transfer of volume %s" + +#: cinder/api/contrib/volume_transfer.py:183 +#, fuzzy, python-format +msgid "Accepting volume transfer %s" +msgstr "Accepting volume transfer %s" + +#: cinder/api/contrib/volume_transfer.py:196 +#, fuzzy, python-format +msgid "Accepting transfer %s" +msgstr "Accepting transfer %s" + +#: cinder/api/contrib/volume_transfer.py:217 +#, fuzzy, python-format +msgid "Delete transfer with id: %s" +msgstr "Delete transfer with id: %s" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "Caught error: %s" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s returned with HTTP %(status)d" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "Request is too large." + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "Must specify an ExtensionManager class" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "Extended resource: %s" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "Extension %(ext_name)s extending resource: %(collection)s" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "cannot understand JSON" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "too many body keys" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "Exception handling resource: %s" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "Fault thrown: %s" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "HTTP exception thrown: %s" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "Empty body provided in request" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "Unrecognized Content-Type provided in request" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "No Content-Type provided in request" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "There is no such action: %s" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "Malformed request body" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "Unsupported Content-Type" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "Malformed request url" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s returned a fault: %(e)s" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "This request was rate-limited." + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "snapshot does not exist" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "Metadata item was not found" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "Delete snapshot with id: %s" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "Create snapshot from volume %s" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "Invalid value '%s' for force. " + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "volume does not exist" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "vol=%s" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "Delete volume with id: %s" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "Invalid imageRef provided." + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "Create volume of %s GB" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "Removing options '%(bad_options)s' from query" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "Removing options '%s' from query" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "Backup status must be available or error" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "Volume to be backed up must be available" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "Backup status must be available" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "Backup to be restored has invalid size" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "Creating volume of %(size)s GB for restore of backup %(backup_id)s" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "Volume to be restored to must be available" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "unsupported compression algorithm: %s" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "_check_container_exists: container: %s" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "container %s does not exist" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "container %s exists" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "_create_container started, container: %(container)s,backup: %(backup_id)s" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "_generate_swift_object_name_prefix: %s" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "generated object list: %s" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "_write_metadata finished" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "_read_metadata finished (%s)" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "volume size %d is invalid." + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "reading chunk of data from volume" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "not compressing data" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "About to put_object" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "swift MD5 for %(object_name)s: %(etag)s" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "backup MD5 for %(object_name)s: %(md5)s" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "Calling eventlet.sleep(0)" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "backup %s finished." + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "v1 swift volume backup restore of %s started" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "metadata_object_names = %s" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "decompressing data using %s algorithm" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "v1 swift volume backup restore of %s finished" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "Restoring swift backup version %s" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "No support to restore swift backup version %s" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "restore %(backup_id)s to %(volume_id)s finished." + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "swift error while listing objects, continuing with delete" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "swift error while deleting object %s, continuing with delete" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "deleted swift object: %(swift_object_name)s in container: %(container)s" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "delete %s finished" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Failed to create iscsi target for volume %(volume_id)s." + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Failed to remove iscsi target for volume %(volume_id)s." + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "Failed to attach iSCSI target for volume %(volume_id)s." + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "Creating iscsi_target for: %s" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "Removing iscsi_target for: %s" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "valid iqn needed for show_target" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "Removing iscsi_target for volume: %s" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "Creating iscsi_target for volume: %s" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "Removing iscsi_target: %s" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "Failed to add initiator iqn %s to target" + +#: cinder/brick/local_dev/lvm.py:75 +#, fuzzy +msgid "Error creating Volume Group" +msgstr "Error creating Volume Group" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, fuzzy, python-format +msgid "StdOut :%s" +msgstr "StdOut :%s" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, fuzzy, python-format +msgid "StdErr :%s" +msgstr "StdErr :%s" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Unable to locate Volume Group %s" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Unable to find VG: %s" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Unable to find LV: %s" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "Id not in sort_keys; is sort_keys unique?" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "Unknown sort direction, must be 'desc' or 'asc'" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "Unrecognized read_deleted value '%s'" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "Volume must be available" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "version should be an integer" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "Upgrade DB using Essex release first." + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "Exception while creating table." + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "Downgrade from initial Cinder install is unsupported." + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "Table |%s| not created!" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "quota_classes table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "quota_usages table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "reservations table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +#, fuzzy +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "Exception while creating table 'volume_glance_metedata'" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "volume_glance_metadata table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "backups table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "snapshot_metadata table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +#, fuzzy +msgid "transfers table not dropped" +msgstr "transfers table not dropped" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, fuzzy, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" +"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " +"%(extra)s." + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "'qemu-img info' parsing failed." + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "fmt=%(fmt)s backed by:%(backing_file)s" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "Converted to %(f1)s, but format is now %(f2)s" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Original exception being dropped: %s" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "Snapshot list encountered but no header found!" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "Could not release the acquired lock `%s`" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "Deprecated: %s" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "Error loading logging config %(log_config)s: %(err_msg)s" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "syslog facility must be one of: %s" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "Fatal call to deprecated config: %(msg)s" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "task run outlasted interval by %s sec" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "in fixed duration looping call" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "Dynamic looping call sleeping for %.02f seconds" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "in dynamic looping call" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "Running periodic task %(full_task_name)s" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "Error during %(full_task_name)s: %(e)s" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "Failed to understand rule %(match)r" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "Inheritance-based rules are deprecated; update _check_%s" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "No handler for matches of kind %s" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "Got unknown keyword args to utils.execute: %r" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "Result was %s" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r failed. Retrying." + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "Environment not supported over SSH" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "process_input not supported over SSH" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "Caught %s, exiting" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Parent process has died unexpectedly, exiting" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "Unhandled exception" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "Forking too fast, sleeping" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "Started child %d" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "Starting %d workers" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Child %(pid)d killed by signal %(sig)d" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Child %(pid)s exited with status %(code)d" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "pid %d not in child list" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Caught %s, stopping children" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Waiting on %d children to exit" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "Unknown byte multiplier: %s" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "Invalid Parameter: Unicode is not supported by the current database." + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "DB exception wrapped." + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "Got mysql server has gone away: %s" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "SQL connection failed. %s attempts left." + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "%s not in valid priorities" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "Failed to load notifier %s. These notifications will not be sent." + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "Could not send notification to %(topic)s. Payload=%(message)s" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "Pool creating new connection" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "no calling threads waiting for msg_id : %s, message : %s" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "unpacked context: %s" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "UNIQUE_ID is %s." + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "received %s" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "no method for message: %s" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "No method for message: %s" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "Expected exception during message handling (%s)" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Exception during message handling" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "Making synchronous call on %s ..." + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "Making asynchronous cast on %s..." + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "Making asynchronous fanout cast..." + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "Sending %(event_type)s on %(topic)s" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "An unknown RPC related exception occurred." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "Found duplicate message(%(msg_id)s). Skipping it." + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Invalid reuse of an RPC connection." + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "Specified RPC version, %(version)s, not supported by this endpoint." + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "Failed to sanitize %(item)s. Key error %(err)s" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Returning exception %s to caller" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "Failed to process message... skipping it." + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Reconnecting to AMQP server on %(hostname)s:%(port)d" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Connected to AMQP server on %(hostname)s:%(port)d" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Failed to declare consumer for topic '%(topic)s': %(err_str)s" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "Timed out waiting for RPC response: %s" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Failed to consume message from queue: %s" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Failed to publish message to topic '%(topic)s': %(err_str)s" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Connected to AMQP server on %s" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "Re-established AMQP queues" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "Error processing message. Skipping it." + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON serialization failed." + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "Deserializing: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "Connecting to %(addr)s with %(type)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "-> Subscribed to %(subscribe)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "-> bind: %(bind)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "Could not open socket." + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "Subscribing to %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "You cannot recv on this socket." + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "You cannot send on this socket." + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "Running func with context: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "Sending reply" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC message did not include method." + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registering reactor" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "In reactor registered" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "Consuming socket" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Creating proxy for topic: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "Topic contained dangerous characters." + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "Topic socket file creation failed." + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "Could not create ZeroMQ receiver daemon. Socket may already be in use." + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "CONSUMER RECEIVED DATA: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ Envelope version unsupported or unknown." + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "Skipping topic registration. Already registered." + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "Consumer is a zmq.%s" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "Creating payload" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "Creating queue socket for reply waiter" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "Sending cast" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "Cast sent; Waiting reply" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "Received message: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "Unpacking response" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "Unsupported or unknown ZMQ envelope returned." + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "RPC Message Invalid." + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "%(msg)s" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "Sending message(s) to: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "No matchmaker results. Not casting." + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "No match from matchmaker." + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "Match not found by MatchMaker." + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "Matchmaker does not implement registration or heartbeat." + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "Matchmaker unregistered: %s, %s" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "Register before starting heartbeat." + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "No key defining hosts for topic '%s', see ringfile" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "Must implement a fallback schedule" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "Must implement schedule_create_volume" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "Invalid value for 'scheduler_max_attempts', must be >=1" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "Ignoring %(service_name)s service update from %(host)s" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "Received %(service_name)s service update from %(host)s." + +#: cinder/scheduler/host_manager.py:294 +#, fuzzy, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "volume service is down or disabled. (host: %s)" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "Failed to schedule_%(method)s: %(ex)s" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "Could not stat scheduler options file %(filename)s: '%(e)s'" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "Free capacity not set: volume node info collection broken." + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAKE ISCSI: %s" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "Faking execution of cmd (subprocess): %s" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "Faked command matched %s" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "Faked command raised an exception %s" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" +"The following migrations are missing a downgrade:\n" +"\t%s" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "unrecognized argument %s" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "Run CLI command: %s" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "Given data: %s" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "Result data: %s" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "Volume not found for instance %(instance_id)s." + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "Invalid input" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "volume: %s" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "Authentication error" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "Authorization error" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "Item not found" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "Doing %(method)s on %(relative_url)s" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "Body: %s" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "%(auth_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "%(relative_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "Unexpected status code" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "Decoding JSON: %s" + +#: cinder/transfer/api.py:68 +#, fuzzy +msgid "Volume in unexpected state" +msgstr "Volume in unexpected state" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "status must be available" + +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "Failed to create transfer record for %s" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" + +#: cinder/transfer/api.py:182 +#, fuzzy, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "Failed to update quota donating volumetransfer id %s" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "Failed to update quota for deleting volume" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "Volume still has %d dependent snapshots" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "Searching by: %s" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "already attached" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "Volume status must be available to reserve" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "must be available" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "Volume Snapshot status must be available or error" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "Metadata property key blank" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "Metadata property key greater than 255 characters" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "Metadata property value greater than 255 characters" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "Volume status must be available/in-use." + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "Volume status is in-use." + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recovering from a failed execute. Try number %s" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "copy_image_to_volume %s." + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "copy_volume_to_image %s." + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "ISCSI provider_location not stored, using discovery" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "ISCSI Discovery: Found %s" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "Driver must implement initialize_connection" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "Driver path %s is deprecated, update your configuration to the new path." + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "volume %s stuck in a downloading state" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "Resuming any in progress delete operations" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "Resuming delete on volume: %s" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "volume is not local to this node" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removing export" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "Failed to update usages deleting volume" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "snapshot %s: creating" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "snapshot %s: created successfully" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "snapshot %s: deleting" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "Failed to update usages deleting snapshot" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "snapshot %s: deleted successfully" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "being attached by another instance" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "Updating volume status" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "Notification {%s} received" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "DB error: %s" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "id cannot be None" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "name cannot be None" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" +"Default volume type is not found, please check default_volume_type " +"config: %s" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "Skipping remove_export. No iscsi_target provisioned for volume: %s" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "Skipping ensure_export. No iscsi_target provision for volume: %s" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "Skipping ensure_export. No iscsi_target provisioned for volume: %s" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "Performing secure delete on volume: %s" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "Error unrecognized volume_clear option: %s" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "Creating clone of volume: %s" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "Error running SSH command: %s" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "There's no Gluster config file configured (%s)" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "Gluster config file at %(config)s doesn't exist" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "mount.glusterfs is not installed" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "casted to %s" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "Volume %s does not have provider_location specified, skipping" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "Exception during mounting %s" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "snapshot: %s not found, skipping delete operations" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "Detected inconsistency in provider_location id" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "Symbolic link %s not found" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "%s is already mounted" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "There's no NFS config file configured (%s)" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "NFS config file at %(config)s doesn't exist" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, fuzzy, python-format +msgid "error opening rbd image %s" +msgstr "error opening rbd image %s" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +#, fuzzy +msgid "error connecting to ceph cluster" +msgstr "error connecting to ceph cluster" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "error refreshing volume stats" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, fuzzy, python-format +msgid "connection data: %s" +msgstr "connection data: %s" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "Not stored in rbd" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "Blank components" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "Not an rbd snapshot" + +#: cinder/volume/drivers/rbd.py:724 +#, fuzzy, python-format +msgid "not cloneable: %s" +msgstr "not cloneable: %s" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "%s is in a different ceph cluster" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "Unable to open image %(loc)s: %(err)s" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "Value required for 'scality_sofs_config'" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "Cannot access 'scality_sofs_config': %s" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "Cannot execute /sbin/mount.sofs" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "Cannot mount Scality SOFS, check syslog for errors" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "Cannot find volume dir for Scality SOFS at '%s'" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog is not working: %s" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "Sheepdog is not working" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "Payload for SolidFire API call: %s" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "Call to json.loads() raised an exception: %s" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "Results of SolidFire API call: %s" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "Clone operation encountered: %s" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "Waiting for outstanding operation before retrying snapshot: %s" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "Detected xDBVersionMismatch, retry %s of 5" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "API response: %s" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "Found solidfire account: %s" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "solidfire account: %s does not exist, create it..." + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "Failed to get model update from clone" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "More than one valid preset was detected, using %s" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "Volume %s, not found on SF Cluster." + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "Found %(count)s volumes mapped to id: %(uuid)s." + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "Enter SolidFire delete_volume..." + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "Account for Volume ID %s was not found on the SolidFire Cluster!" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "Volume ID %s was not found on the SolidFire Cluster!" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "Leaving SolidFire delete_volume" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "Executing SolidFire ensure_export..." + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "Executing SolidFire create_export..." + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "Updating cluster status info" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "Failed to get updated stats" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "Sending %(method)s to %(url)s. Body \"%(body)s\"" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "Operation completed. %(data)s" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "Volume %(name)s could not be found. It might be already deleted" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "Attach properties: %(properties)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "Entering create_volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "Create Volume: %(volume)s Size: %(size)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "Create Volume: %(volume)s Storage type: %(storage_type)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "Create Volume: %(volumename)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "Leaving create_volume: %(volumename)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "Entering create_volume_from_snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "Entering create_cloned_volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "Entering delete_volume." + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "Delete Volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "Volume %(name)s not found on the array. No volume to delete." + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "Delete Volume: %(name)s DeviceID: %(deviceid)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "Entering create_snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Create snapshot: %(snapshot)s: volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "Cannot find Replication Service to create snapshot for volume %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, fuzzy, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "Error Create Snapshot: (snapshot)s Volume: %(volume)s Error: %(errordesc)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "Entering delete_snapshot." + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Delete Snapshot: %(snapshot)s: volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "Create export: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "create_export: Volume: %(volume)s Device ID: %(device_id)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "Error mapping volume %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "ExposePaths for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "Error unmapping volume %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "HidePaths for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "Error mapping volume %(vol)s. %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "AddMembers for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "Error unmapping volume %(vol)s. %(error)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "RemoveMembers for volume %s completed successfully." + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "Map volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "Cannot find Controller Configuration Service for storage system %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "Unmap volume: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "Volume %s is not mapped. No volume to unmap." + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "Initialize connection: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "Volume %s is already mapped." + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "Terminate connection: %(volume)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "Found Storage Type: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "Storage type not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "Found Masking View: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "Masking View not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "Ecom user not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "Ecom server not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "Cannot connect to ECOM server" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "Found Replication Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "Found Storage Configuration Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "Found Controller Configuration Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "Found Storage Hardware ID Management Service: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "Pool %(storage_type)s is not found." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "Storage system not found for pool %(storage_type)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "Pool: %(pool)s SystemName: %(systemname)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "Pool name: %(poolname)s System name: %(systemname)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "Volume %(volumename)s not found on the array." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Volume name: %(volumename)s Volume instance: %(vol_instance)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "Source: %(volumename)s Target: %(snapshotname)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "Error finding %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "Found %(name)s: %(initiator)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "Available device number on %(storage)s: %(device)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "Device number not found for volume %(volumename)s %(vol_instance)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "Device info: %(data)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "Found Storage Processor System: %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "Error finding Storage Hardware ID Service." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "Error finding Target WWNs." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "Add target WWN: %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "Target WWNs: %s." + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "Cannot find device number for volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "Found iSCSI endpoint: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "ISCSI properties: %s" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, fuzzy, python-format +msgid "XML exception reading parameter: %s" +msgstr "XML exception reading parameter: %s" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, fuzzy, python-format +msgid "No configuration found for service: %s" +msgstr "No configuration found for service: %s" + +#: cinder/volume/drivers/hds/hds.py:250 +#, fuzzy, python-format +msgid "HDP not found: %s" +msgstr "HDP not found: %s" + +#: cinder/volume/drivers/hds/hds.py:289 +#, fuzzy, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "iSCSI portal not found for service: %s" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, fuzzy, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "delete lun %(lun)s on %(name)s" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, fuzzy, python-format +msgid "LUN %s is deleted." +msgstr "LUN %s is deleted." + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "enter: do_setup" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "leave: do_setup" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "enter: check_for_setup_error" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "%s is not set" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "leave: check_for_setup_error" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "ensure_export: Volume %s not found on storage" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "enter: initialize_connection: volume %(vol)s with connector %(conn)s" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "initialize_connection: Failed to get attributes for volume %s" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "Did not find expected column name in lsvdisk: %s" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "initialize_connection: Missing volume attribute for volume %s" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "initialize_connection: Did not find a preferred node for volume %s" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "enter: terminate_connection: volume %(vol)s with connector %(conn)s" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "leave: terminate_connection: volume %(vol)s with connector %(conn)s" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "create_volume_from_snapshot: Source and destination size differ." + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "create_cloned_volume: Source and destination size differ." + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "Could not get pool data from the storage" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "WWPN on node %(node)s: %(wwpn)s" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "System does not support compression" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "If compression is set to True, rsize must also be set (not equal to -1)" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "leave: _create_vdisk: volume %s " + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "No metadata property %(prop)s defined for the LUN %(name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "Using NetApp filer: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "Success getting LUN list from server" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "Created LUN with name %s" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "No entry in LUN table for volume/snapshot %(name)s." + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "Destroyed LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "Mapped LUN %(name)s to the initiator %(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Failed to get LUN target details for the LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Failed to get target portal for the LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Failed to get target IQN for the LUN %s" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "Snapshot %s deletion successful" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "Unmapped LUN %(name)s from the initiator %(initiator_name)s" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "Error mapping lun. Code :%(code)s, Message:%(message)s" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "Error unmapping lun. Code :%(code)s, Message:%(message)s" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "Object is not a NetApp LUN." + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "No iscsi service found for vserver %s" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "Cloned LUN with new name %s" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "No clonned lun named %s found on the filer" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Failed to get vol with required size for volume: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "Clone operation with src %(name)s and dest %(new_name)s completed" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "Clone operation with src %(name)s and dest %(new_name)s failed" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "Converted to raw, but format is now %s" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "No interface found on cluster for ip %s" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "No storage path found for export path %s" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "Cloning with src %(src_path)s, dest %(dest_path)s" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "Volume %s does not exist in Nexenta SA" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "Ignored target creation error \"%s\" while ensuring export" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "Sending JSON data: %s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "No headers in server response" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "Bad response from server" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "Auto switching to HTTPS connection to %s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "Got response: %s" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "CLIQ command returned %s" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "Volume info: %(volume_name)s => %(volume_attributes)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "local_path not supported" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "Specify san_password or san_private_key" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "san_ip must be set" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "Cannot parse list-view output: %s" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "LUID not found for %(zfs_poolname)s. Output=%(out)s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "CPG (%s) doesn't exist on array" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "Error running ssh command: %s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "Volume (%s) already exists on array" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "Creating folder %s " + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "_update_volume_status: Could not get system name" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "_update_volume_status: Could not get storage pool data" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "Volume status must be available or error" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "Error: %s" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Unexpected state while cloning %s" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "iSCSI device not found at %s" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "Uncaught exception" + +#~ msgid "Out reactor registered" +#~ msgstr "Out reactor registered" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "CONSUMER GOT %s" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "ROUTER RELAY-OUT SUCCEEDED %(data)s" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "ROUTER RELAY-OUT QUEUED %(data)s" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "Could not create IPC directory %s" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "ROUTER RELAY-OUT %(data)s" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "May specify only one of snapshot, imageRef or source volume" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "Volume size cannot be lesser than the Snapshot size" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "Unable to clone volumes that are in an error state" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "Clones currently must be >= original volume size." + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "Volume size '%s' must be an integer and greater than 0" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "Size of specified image is larger than volume size." + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "Image minDisk size is larger than the volume size." + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "volume %(vol_name)s: creating lv of size %(vol_size)sG" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "volume %s: creating from snapshot" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "volume %s: creating from existing volume" + +#~ msgid "volume %s: creating from image" +#~ msgstr "volume %s: creating from image" + +#~ msgid "volume %s: creating" +#~ msgstr "volume %s: creating" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "volume %s: creating export" + +#~ msgid "volume %s: create failed" +#~ msgstr "volume %s: create failed" + +#~ msgid "volume %s: created successfully" +#~ msgstr "volume %s: created successfully" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "volume %s: Error trying to reschedule create" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "Failed to copy image to volume" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "Downloaded image %(image_id)s to %(volume_id)s successfully" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "Fetching %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "Link Local address is not found.:%s" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "Started %(name)s on %(_host)s:%(_port)s" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "Unable to find Volume Group: %s" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "Failed to create Volume Group: %s" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "snapshot %(snap_name)s: creating" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "Running with CoraidDriver for ESM EtherCLoud" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "Update session cookie %(session)s" + +#~ msgid "Message : %(message)s" +#~ msgstr "Message : %(message)s" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "Error while trying to set group: %(message)s" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "Unable to find group: %(group)s" + +#~ msgid "ESM urlOpen error" +#~ msgstr "ESM urlOpen error" + +#~ msgid "JSON Error" +#~ msgstr "JSON Error" + +#~ msgid "Request without URL" +#~ msgstr "Request without URL" + +#~ msgid "Configure data : %s" +#~ msgstr "Configure data : %s" + +#~ msgid "Configure response : %s" +#~ msgstr "Configure response : %s" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "Unable to retrive volume infos for volume %(volname)s" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "Cannot login on Coraid ESM" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "Fail to create volume %(volname)s" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "Failed to delete volume %(volname)s" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "Failed to Create Snapshot %(snapname)s" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "Failed to Delete Snapshot %(snapname)s" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "Failed to Create Volume from Snapshot %(snapname)s" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "volume group %s doesn't exist" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Unexpected error while running command." + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "Nexenta SA returned the error" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "Ignored target group creation error \"%s\" while ensuring export" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "Ignored target group member addition error \"%s\" while ensuring export" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "Ignored LU creation error \"%s\" while ensuring export" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "Ignored LUN mapping entry addition error \"%s\" while ensuring export" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "Connection to glance failed" + +#~ msgid "Invalid snapshot" +#~ msgstr "Invalid snapshot" + +#~ msgid "Invalid input received" +#~ msgstr "Invalid input received" + +#~ msgid "Invalid volume type" +#~ msgstr "Invalid volume type" + +#~ msgid "Invalid volume" +#~ msgstr "Invalid volume" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "Invalid input" + +#~ msgid "Invalid metadata" +#~ msgstr "Invalid metadata" + +#~ msgid "Invalid metadata size" +#~ msgstr "Invalid metadata size" + +#~ msgid "Migration error" +#~ msgstr "Migration error" + +#~ msgid "Quota exceeded" +#~ msgstr "Quota exceeded" + +#~ msgid "Connection to swift failed" +#~ msgstr "Connection to swift failed" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "debug in callback: %s" + +#~ msgid "Expected object of type: %s" +#~ msgstr "Expected object of type: %s" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "timefunc: '%(name)s' took %(total_time).2f secs" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "pool %s doesn't exist" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "Disk not found: %s" + +#~ msgid "read timed out" +#~ msgstr "read timed out" + +#~ msgid "check_for_setup_error." +#~ msgstr "check_for_setup_error." + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "check_for_setup_error: Can not get device type." + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "_get_device_type: Storage Pool must be configured." + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "create_volume:volume name: %s." + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "delete_volume:No need to delete volume. Volume %(name)s does not exist." + +#~ msgid "create_export: volume name:%s" +#~ msgstr "create_export: volume name:%s" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "create_export:Volume %(name)s does not exist." + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "terminate_connection:Host does not exist. Host name:%(host)s." + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "terminate_connection:volume does not exist. volume name:%(volume)s" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "create_snapshot:Device does not support snapshot." + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "create_snapshot:Resource pool needs 1GB valid size at least." + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "create_snapshot:Volume does not exist. Volume name:%(name)s" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "delete_snapshot:Device does not support snapshot." + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "_check_conf_file: %s" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "Write login information to xml error. %s" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "_get_login_info error. %s" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "_get_lun_set_info:%s" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "_get_iscsi_info:%s" + +#~ msgid "CLI command:%s" +#~ msgstr "CLI command:%s" + +#~ msgid "_execute_cli:%s" +#~ msgstr "_execute_cli:%s" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "_delete_hostport:Failed to delete host port. port id:%(portid)s" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "_get_tgt_iqn:iSCSI IP is %s." + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "_get_tgt_iqn:iSCSI target iqn is:%s" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "_delete_host: Failed delete host. host id:%(hostid)s.out:%(out)s" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "_is_resource_pool_enough:Resource pool for snapshot not be added." + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "The request is invalid." + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "Volume %(volume_id)s persistence file could not be found." + +#~ msgid "No disk at %(location)s" +#~ msgstr "No disk at %(location)s" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "Class %(class_name)s could not be found: %(exception)s" + +#~ msgid "Action not allowed." +#~ msgstr "Action not allowed." + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "Key pair %(key_name)s already exists." + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "Maximum volume/snapshot size exceeded" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "3PAR Host already exists: %(err)s. %(info)s" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "do_setup." + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "delete_volume: volume name: %s." + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "_create_copy: Source vdisk %s does not exist" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "Login to 3PAR array invalid" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "Size for volume: %s not found, skipping secure delete." + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "Could not find attribute for LUN named %s" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "Cleaning up incomplete backup operations" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "Resetting volume %s to available (was backing-up)" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "Resetting volume %s to error_restoring (was restoring-backup)" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "Resetting backup %s to error (was creating)" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "Resetting backup %s to available (was restoring)" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "Resuming delete on backup: %s" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "create_backup finished. backup: %s" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "delete_backup started, backup: %s" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "delete_backup finished, backup %s deleted" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "Requested volume or snapshot exceeds allowed Gigabytes quota" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "Deploy v1 of the Cinder API. " + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "Deploy v2 of the Cinder API. " + +#~ msgid "_read_xml:%s" +#~ msgstr "_read_xml:%s" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "rtstool is not installed correctly" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "Invalid request body" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "enter: _get_host_from_connector: prefix %s" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "No request spec, will not reschedule" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "Retry info not present, will not reschedule" + +#~ msgid "Clear capabilities" +#~ msgstr "Clear capabilities" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "This usually means the volume was never succesfully created." + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "Volume to be restored to is smaller than the backup to be restored" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "in looping call" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "Is the appropriate service running?" + +#~ msgid "Could not find another host" +#~ msgstr "Could not find another host" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "Not enough allocatable volume gigabytes remaining" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "Migration %(migration_id)s could not be found." + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "Instance %(instance_id)s could not be found." + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "SIGTERM received" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "Child %(pid)d exited with status %(code)d" + +#~ msgid "_wait_child %d" +#~ msgstr "_wait_child %d" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "wait wrap.failed %s" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "Failed to get license information." + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "do_setup: No configured nodes" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "enter: _get_chap_secret_for_host: host name %s" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "leave: _get_host_from_connector: host %s" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "enter: _create_host: host %s" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "_create_host: No connector ports" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "leave: _create_host: host %(host)s - %(host_name)s" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "volume %s mapping to multi host" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "enter: _delete_host: host %s " + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "leave: _delete_host: host %s " + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "_create_host failed to return the host name." + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "_get_host_from_connector failed to return the host name for connector" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "protocol must be specified as ' iSCSI' or ' FC'" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "enter: _create_vdisk: vdisk %s " + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "_prepare_fc_map: %s" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "enter: _get_flashcopy_mapping_attributes: mapping %s" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "enter: _is_vdisk_defined: vdisk %s " + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "enter: _delete_vdisk: vdisk %s" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "warning: Tried to delete vdisk %s but it does not exist." + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "leave: _delete_vdisk: vdisk %s" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "enter: _execute_command_and_parse_attributes: command %s" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "Did not find expected column in %(fun)s: %(hdr)s" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/es/LC_MESSAGES/cinder.po b/cinder/locale/es/LC_MESSAGES/cinder.po index 0822c7572a..7b4327601c 100644 --- a/cinder/locale/es/LC_MESSAGES/cinder.po +++ b/cinder/locale/es/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2012-03-10 06:08+0000\n" "Last-Translator: Oscar Rosario \n" "Language-Team: Spanish \n" @@ -15,8206 +15,10755 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "Nombre de fichero de la CA raíz" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Nombre de fichero de la clave privada" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "Una excepcion desconocida ha ocurrido" -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "Donde guardamos nuestras claves" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "Dónde guardamos nuestra CA raíz" - -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "¿Deberíamos usar una CA para cada proyecto?" - -#: cinder/crypto.py:67 +#: cinder/exception.py:107 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -"Sujeto (Subject) para el certificado de usuarios, %s para el proyecto, " -"usuario, marca de tiempo" -#: cinder/crypto.py:72 +#: cinder/exception.py:112 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" +msgid "Volume driver reported an error: %(message)s" msgstr "" -"Sujeto (Subject) para el certificado del proyecto, %s para el proyecto, " -"marca de tiempo" - -#: cinder/crypto.py:292 -#, python-format -msgid "Flags path: %s" -msgstr "Ruta a las opciones: %s" -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Error inesperado mientras el comando se ejecutaba" - -#: cinder/exception.py:59 +#: cinder/exception.py:116 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" -msgstr "" -"%(description)s\n" -"Comando: %(cmd)s\n" -"Código de salida: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" - -#: cinder/exception.py:94 -msgid "DB exception wrapped." -msgstr "" - -#: cinder/exception.py:155 -msgid "An unknown exception occurred." -msgstr "Una excepcion desconocida ha ocurrido" - -#: cinder/exception.py:178 -msgid "Failed to decrypt text" -msgstr "Fallo al desencriptar el texto" - -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" -msgstr "Creacion de interfaz virtual fallida" - -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" -msgstr "" - -#: cinder/exception.py:195 -msgid "Connection to glance failed" -msgstr "Coneccion con glance fallida" - -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" msgstr "" -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "No Autorizado" -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "El usuario no tiene privilegios de administrador" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, fuzzy, python-format msgid "Not authorized for image %(image_id)s." msgstr "no hay método para el mensaje: %s" -#: cinder/exception.py:220 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "Parametros inaceptables" -#: cinder/exception.py:225 -msgid "Invalid snapshot" -msgstr "Captura no valida" +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:154 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:237 +#: cinder/exception.py:163 msgid "Failed to load data into json format" msgstr "Fallo al ingresar informacion en formato json" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:167 +#, fuzzy +msgid "The results are invalid." msgstr "La petición es inválida." -#: cinder/exception.py:245 -#, python-format -msgid "Invalid signature %(signature)s for user %(user)s." -msgstr "Firma invalida %(signature)s para el usuario %(user)s" - -#: cinder/exception.py:249 -msgid "Invalid input received" -msgstr "Entrada invalida recibida" - -#: cinder/exception.py:253 +#: cinder/exception.py:171 #, python-format -msgid "Invalid instance type %(instance_type)s." -msgstr "" - -#: cinder/exception.py:257 -msgid "Invalid volume type" -msgstr "" - -#: cinder/exception.py:261 -msgid "Invalid volume" +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:175 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" -msgstr "Rango de puertos invalido %(from_port)s:%(to_port)s. %(msg)s" +msgid "Invalid volume type: %(reason)s" +msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:179 #, python-format -msgid "Invalid IP protocol %(protocol)s." -msgstr "Protocolo IP invalido %(protocol)s" +msgid "Invalid volume: %(reason)s" +msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:183 #, python-format msgid "Invalid content type %(content_type)s." msgstr "Tipo de contenido invalido %(content_type)s." -#: cinder/exception.py:277 +#: cinder/exception.py:187 #, python-format -msgid "Invalid cidr %(cidr)s." -msgstr "Cidr %(cidr)s invalido" - -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." -msgstr "Reuso invalido de una coneccion RPC" - -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format msgid "%(err)s" msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:197 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." -msgstr "" - -#: cinder/exception.py:301 -#, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:201 #, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:310 -#, python-format -msgid "Instance %(instance_id)s is not running." -msgstr "La instacia %(instance_id)s no se esta ejecutando" - -#: cinder/exception.py:314 -#, python-format -msgid "Instance %(instance_id)s is not suspended." -msgstr "La instacia %(instance_id)s no esta suspendida" - -#: cinder/exception.py:318 -#, python-format -msgid "Instance %(instance_id)s is not in rescue mode" -msgstr "La instancia %(instance_id)s no esta en modo de rescate" - -#: cinder/exception.py:322 -msgid "Failed to suspend instance" -msgstr "Fallo al suspender la instancia" - -#: cinder/exception.py:326 -msgid "Failed to resume server" -msgstr "Fallo al resumir el servidor" - -#: cinder/exception.py:330 -msgid "Failed to reboot instance" -msgstr "Fallo a reinicia la instancia" - -#: cinder/exception.py:334 -#, fuzzy -msgid "Failed to terminate instance" -msgstr "Fallo a reinicia la instancia" - -#: cinder/exception.py:338 +#: cinder/exception.py:206 msgid "Service is unavailable at this time." msgstr "El servicio no esta disponible en este momento" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." -msgstr "" - -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." -msgstr "El servicio de computo no esta disponible en este momento" - -#: cinder/exception.py:350 -#, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." -msgstr "" -"Incapaz de emigrar la instancia %(instance_id)s al actual anfitrion " -"(%(host)s)" - -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." -msgstr "El destino del anfitrion de computo no esta disponible en este momento" - -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." -msgstr "El anfitrion de computo no esta disponible en este momento" - -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." -msgstr "" - -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." -msgstr "" - -#: cinder/exception.py:372 -#, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." -msgstr "" - -#: cinder/exception.py:377 -#, python-format -msgid "The supplied device path (%(path)s) is invalid." -msgstr "" - -#: cinder/exception.py:381 -#, python-format -msgid "The supplied device (%(device)s) is busy." -msgstr "" - -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" -msgstr "Informacion del CPU inaceptable" - -#: cinder/exception.py:389 -#, python-format -msgid "%(address)s is not a valid IP v4/6 address." -msgstr "%(address)s no es una direccion IP v4/6 valida" - -#: cinder/exception.py:393 -#, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." -msgstr "" - -#: cinder/exception.py:399 -#, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." -msgstr "" - -#: cinder/exception.py:406 -#, python-format -msgid "Disk format %(disk_format)s is not acceptable" -msgstr "Formato de disco %(disk_format)s no es aceptable" - -#: cinder/exception.py:410 +#: cinder/exception.py:210 #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:214 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:218 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." +msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: cinder/exception.py:422 +#: cinder/exception.py:222 cinder/brick/exception.py:68 msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:427 +#: cinder/exception.py:228 #, python-format -msgid "Required flag %(flag)s not set." +msgid "Volume %(volume_id)s could not be found." msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:232 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:237 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:440 +#: cinder/exception.py:242 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:246 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:255 #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:457 +#: cinder/exception.py:259 #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:264 #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:278 #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:475 +#: cinder/exception.py:282 #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:287 #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:484 +#: cinder/exception.py:291 #, python-format -msgid "No disk at %(location)s" +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:295 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:299 #, python-format -msgid "Invalid image href %(image_href)s." -msgstr "" - -#: cinder/exception.py:496 -msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:501 +#: cinder/exception.py:303 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:505 +#: cinder/exception.py:307 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:509 +#: cinder/exception.py:311 #, python-format -msgid "User %(user_id)s could not be found." +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:513 +#: cinder/exception.py:315 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:517 +#: cinder/exception.py:319 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:323 #, python-format -msgid "Role %(role_id)s could not be found." +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" msgstr "" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:529 +#: cinder/exception.py:332 #, python-format -msgid "%(req)s is required to create a network." +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:336 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:340 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:541 +#: cinder/exception.py:344 #, python-format -msgid "Network could not be found for uuid %(uuid)s" +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:348 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:352 #, python-format -msgid "Network could not be found for instance %(instance_id)s." -msgstr "" - -#: cinder/exception.py:553 -msgid "No networks defined." +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:557 +#: cinder/exception.py:356 #, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:562 -#, python-format -msgid "Host is not set to the network (%(network_id)s)." -msgstr "" +#: cinder/exception.py:365 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "el grupo %s ya existe" -#: cinder/exception.py:566 +#: cinder/exception.py:369 #, python-format -msgid "Network %(network)s has active ports, cannot delete." -msgstr "" - -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:373 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Malformed message body: %(reason)s" msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:377 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "Could not find config at %(path)s" msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/exception.py:385 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:389 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:398 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:402 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:599 +#: cinder/exception.py:409 #, python-format msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:415 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:419 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." -msgstr "" - -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." -msgstr "" - -#: cinder/exception.py:620 +#: cinder/exception.py:423 #, python-format -msgid "Floating ip not found for id %(id)s." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:427 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:432 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:436 #, python-format -msgid "Floating ip not found for host %(host)s." -msgstr "" - -#: cinder/exception.py:636 -msgid "Zero floating ips available." +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" -#: cinder/exception.py:640 +#: cinder/exception.py:440 #, python-format -msgid "Floating ip %(address)s is associated." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" -#: cinder/exception.py:644 +#: cinder/exception.py:444 #, python-format -msgid "Floating ip %(address)s is not associated." -msgstr "" - -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" -#: cinder/exception.py:652 +#: cinder/exception.py:449 #, python-format -msgid "Interface %(interface)s not found." +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" -#: cinder/exception.py:656 +#: cinder/exception.py:453 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +msgid "Failed to export for volume: %(reason)s" msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:457 #, python-format -msgid "Certificate %(certificate_id)s not found." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:664 +#: cinder/exception.py:461 #, python-format -msgid "Service %(service_id)s could not be found." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:668 +#: cinder/exception.py:465 #, python-format -msgid "Host %(host)s could not be found." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:672 -#, python-format -msgid "Compute host %(host)s could not be found." +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:676 -#, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:680 -#, python-format -msgid "Auth token %(token)s could not be found." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:684 +#: cinder/exception.py:485 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:692 -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "" +#: cinder/exception.py:493 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "backend inválido: %s" -#: cinder/exception.py:696 +#: cinder/exception.py:497 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "Connection to swift failed: %(reason)s" msgstr "" -#: cinder/exception.py:700 +#: cinder/exception.py:501 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "Transfer %(transfer_id)s could not be found." msgstr "" -#: cinder/exception.py:704 +#: cinder/exception.py:505 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" -#: cinder/exception.py:709 +#: cinder/exception.py:509 #, python-format -msgid "Security group with rule %(rule_id)s not found." +msgid "SSH command injection detected: %(command)s" msgstr "" -#: cinder/exception.py:713 +#: cinder/exception.py:513 #, python-format -msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +msgid "QoS Specs %(specs_id)s already exists." msgstr "" -#: cinder/exception.py:718 +#: cinder/exception.py:517 #, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:723 +#: cinder/exception.py:522 #, python-format -msgid "Migration %(migration_id)s could not be found." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:727 +#: cinder/exception.py:527 #, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:732 +#: cinder/exception.py:531 #, python-format -msgid "Console pool %(pool_id)s could not be found." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:736 +#: cinder/exception.py:536 #, python-format -msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:742 +#: cinder/exception.py:541 #, python-format -msgid "Console %(console_id)s could not be found." +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:746 +#: cinder/exception.py:546 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:750 +#: cinder/exception.py:550 #, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:755 +#: cinder/exception.py:554 #, python-format -msgid "Invalid console type %(console_type)s " +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:763 -#, python-format -msgid "Instance type %(instance_type_id)s could not be found." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:767 -#, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:772 -#, python-format -msgid "Flavor %(flavor_id)s could not be found." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:776 +#: cinder/exception.py:576 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:780 +#: cinder/exception.py:580 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:784 +#: cinder/exception.py:584 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:789 -#, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:793 -#, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +#: cinder/exception.py:593 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" msgstr "" +"No es posible crear el VDI en SR %(sr_ref)s para la instancia " +"%(instance_name)s" -#: cinder/exception.py:798 +#: cinder/exception.py:597 #, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:807 +#: cinder/exception.py:605 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:811 +#: cinder/exception.py:609 #, python-format -msgid "LDAP group %(group_id)s could not be found." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:815 +#: cinder/exception.py:613 #, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:819 -#, python-format -msgid "File %(file_path)s could not be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:827 +#: cinder/exception.py:626 #, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:832 +#: cinder/exception.py:630 #, python-format -msgid "Network adapter %(adapter)s could not be found." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:836 +#: cinder/exception.py:636 #, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/exception.py:641 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Una excepcion desconocida ha ocurrido" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:844 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Unable to use global role %(role_id)s" +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/exception.py:654 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Una excepcion desconocida ha ocurrido" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:861 -#, python-format -msgid "Key pair %(key_name)s already exists." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:865 -#, python-format -msgid "User %(user)s already exists." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:869 +#: cinder/quota.py:105 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:873 +#: cinder/quota.py:748 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:877 +#: cinder/quota.py:770 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" -msgstr "El usuario %(uid)s es actualmente miembro del grupo %(group_dn)s" +msgid "Failed to commit reservations %s" +msgstr "" -#: cinder/exception.py:882 +#: cinder/quota.py:790 #, python-format -msgid "Project %(project)s already exists." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:886 -#, python-format -msgid "Instance %(name)s already exists." +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:890 -#, python-format -msgid "Instance Type %(name)s already exists." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:894 +#: cinder/quota_utils.py:46 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:898 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" -msgstr "" - -#: cinder/exception.py:902 -msgid "Migration error" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:906 +#: cinder/service.py:95 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:910 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Could not find config at %(path)s" +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:914 +#: cinder/service.py:148 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "" - -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" -msgstr "" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "Se detuvo un servicio sin entrada en la base de datos" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" -msgstr "" +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "El servicio objeto de base de datos ha desaparecido, recreándolo." -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." -msgstr "" +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "Recuperada la conexión al servidor de modelos." -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." -msgstr "" +#: cinder/service.py:276 +msgid "model server went away" +msgstr "el servidor de modelos se ha ido" -#: cinder/exception.py:938 +#: cinder/service.py:298 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:946 +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Conjunto completo de opciones (FLAGS):" + +#: cinder/service.py:387 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:950 +#: cinder/utils.py:96 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:958 -#, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:963 +#: cinder/utils.py:228 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Conectando a libvirt: %s" + +#: cinder/utils.py:412 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." -msgstr "" +msgid "Invalid backend: %s" +msgstr "backend inválido: %s" -#: cinder/exception.py:967 +#: cinder/utils.py:423 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." -msgstr "" +msgid "backend %s" +msgstr "backend %s" -#: cinder/exception.py:971 +#: cinder/utils.py:698 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:975 +#: cinder/utils.py:759 #, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:980 -#, python-format -msgid "Host %(host)s already member of another aggregate." +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:984 +#: cinder/wsgi.py:169 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:988 +#: cinder/wsgi.py:206 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:992 -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/wsgi.py:313 +msgid "You must implement __call__" msgstr "" -#: cinder/exception.py:1005 -#, python-format -msgid "Error in SolidFire API response: status=%(status)s" +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:1009 -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:1013 -#, python-format -msgid "Detected existing vlan with id %(vlan)d" +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:1017 -#, python-format -msgid "Instance %(instance_id)s could not be found." +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:1025 -#, fuzzy, python-format -msgid "Could not fetch image %(image)s" -msgstr "No se puede unir la imagen con el loopback: %s" +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" -#: cinder/log.py:315 +#: cinder/api/common.py:162 #, python-format -msgid "syslog facility must be one of: %s" +msgid "marker [%s] not found" msgstr "" -#: cinder/manager.py:146 +#: cinder/api/common.py:189 #, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgid "href %s does not contain version" msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/manager.py:159 +#: cinder/api/extensions.py:197 #, python-format -msgid "Error during %(full_task_name)s: %(e)s" +msgid "Loaded extension: %s" msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" msgstr "" -#: cinder/service.py:177 +#: cinder/api/extensions.py:240 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Ext updated: %s" msgstr "" -#: cinder/service.py:195 +#: cinder/api/extensions.py:242 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "Exception loading extension: %s" msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" -msgstr "Se detuvo un servicio sin entrada en la base de datos" - -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." -msgstr "El servicio objeto de base de datos ha desaparecido, recreándolo." - -#: cinder/service.py:334 -msgid "Recovered model server connection!" -msgstr "Recuperada la conexión al servidor de modelos." - -#: cinder/service.py:340 -msgid "model server went away" -msgstr "el servidor de modelos se ha ido" - -#: cinder/service.py:433 -msgid "Full set of FLAGS:" -msgstr "Conjunto completo de opciones (FLAGS):" - -#: cinder/service.py:440 +#: cinder/api/extensions.py:256 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "Loading extension %s" msgstr "" -#: cinder/utils.py:79 +#: cinder/api/extensions.py:262 #, python-format -msgid "Inner Exception: %s" -msgstr "Excepción interna: %s" +msgid "Calling extension factory %s" +msgstr "" -#: cinder/utils.py:165 +#: cinder/api/extensions.py:276 #, python-format -msgid "Fetching %s" -msgstr "Obteniendo %s" +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" -#: cinder/utils.py:210 +#: cinder/api/extensions.py:278 #, python-format -msgid "Got unknown keyword args to utils.execute: %r" +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/utils.py:220 +#: cinder/api/extensions.py:287 #, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Ejecutando cmd (subprocesos): %s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 +#: cinder/api/extensions.py:356 #, python-format -msgid "Result was %s" -msgstr "El resultado fue %s" +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" -#: cinder/utils.py:249 +#: cinder/api/extensions.py:381 #, python-format -msgid "%r failed. Retrying." +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" -msgstr "corriendo cmd (SSH): %s" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/utils.py:352 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "debug in callback: %s" -msgstr "Depuración de la devolución de llamada: %s" +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" -#: cinder/utils.py:534 -#, python-format -msgid "Link Local address is not found.:%s" -msgstr "No se encuentra la dirección del enlace local.:%s" +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" -#: cinder/utils.py:537 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" -msgstr "No se pudo obtener enlace de la ip local de %(interface)s :%(ex)s" +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" -#: cinder/utils.py:648 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Invalid backend: %s" -msgstr "backend inválido: %s" +msgid "show called for member %s" +msgstr "" -#: cinder/utils.py:659 -#, python-format -msgid "backend %s" -msgstr "backend %s" +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "Cambio de clave secreta para el usuario %s" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" msgstr "" -#: cinder/utils.py:927 +#: cinder/api/contrib/backups.py:185 #, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgid "Creating new backup %s" msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/utils.py:935 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/utils.py:942 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/utils.py:1001 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/utils.py:1028 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgid "Host '%s' could not be found." msgstr "" -#: cinder/utils.py:1138 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Expected object of type: %s" +msgid "Invalid status: '%s'" msgstr "" -#: cinder/utils.py:1169 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Invalid server_string: %s" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/utils.py:1298 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/utils.py:1495 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/wsgi.py:97 -#, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/wsgi.py:117 -#, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/api/direct.py:299 -#, python-format -msgid "Returned non-serializeable type: %s" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/api/validator.py:142 -#, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/api/ec2/__init__.py:73 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "%(code)s: %(message)s" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -#: cinder/api/ec2/__init__.py:95 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "FaultWrapper: %s" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." -msgstr "Demasiados intentos de autenticacion fallidos." +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" -#: cinder/api/ec2/__init__.py:180 -#, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "Authentication Failure: %s" -msgstr "Fallo de autenticación: %s" +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" -#: cinder/api/ec2/__init__.py:404 -#, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/api/ec2/__init__.py:435 -#, python-format -msgid "action: %s" -msgstr "acción: %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" -#: cinder/api/ec2/__init__.py:437 -#, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/api/ec2/__init__.py:584 -#, python-format -msgid "InstanceNotFound raised: %s" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/api/ec2/__init__.py:590 -#, python-format -msgid "VolumeNotFound raised: %s" +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/api/ec2/__init__.py:596 -#, python-format -msgid "SnapshotNotFound raised: %s" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" -msgstr "No encontrado: %s" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" -#: cinder/api/ec2/__init__.py:605 -#, python-format -msgid "EC2APIError raised: %s" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/api/ec2/__init__.py:613 -#, python-format -msgid "KeyPairExists raised: %s" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/api/ec2/__init__.py:617 -#, python-format -msgid "InvalidParameterValue raised: %s" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/api/ec2/__init__.py:621 -#, python-format -msgid "InvalidPortRange raised: %s" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/api/ec2/__init__.py:625 -#, python-format -msgid "NotAuthorized raised: %s" +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/api/ec2/__init__.py:629 +#: cinder/api/contrib/volume_transfer.py:147 +#, fuzzy, python-format +msgid "Creating new volume transfer %s" +msgstr "Crear volumen de %s GB" + +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "InvalidRequest raised: %s" +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/api/ec2/__init__.py:633 +#: cinder/api/contrib/volume_transfer.py:196 #, fuzzy, python-format -msgid "QuotaError raised: %s" -msgstr "Sucedió un error inexperado: %s" +msgid "Accepting transfer %s" +msgstr "Crear volumen de %s GB" -#: cinder/api/ec2/__init__.py:637 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:646 -#, python-format -msgid "Unexpected error raised: %s" -msgstr "Sucedió un error inexperado: %s" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" -#: cinder/api/ec2/__init__.py:647 -#, python-format -msgid "Environment: %s" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." -msgstr "Ha sucedido un error desconocido. Por favor repite el intento de nuevo." +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" -#: cinder/api/ec2/apirequest.py:64 -#, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/api/ec2/cloud.py:336 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Create snapshot of volume %s" +msgid "Valid control location are: %s" msgstr "" -#: cinder/api/ec2/cloud.py:372 -#, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/api/ec2/cloud.py:378 -#, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/api/ec2/cloud.py:382 +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "Create key pair %s" -msgstr "Creando par de claves %s" +msgid "Caught error: %s" +msgstr "Capturado error: %s" -#: cinder/api/ec2/cloud.py:391 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Import key %s" +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "Delete key pair %s" -msgstr "Borrar para de claves %s" +msgid "%(exception)s: %(explanation)s" +msgstr "" -#: cinder/api/ec2/cloud.py:551 -#, fuzzy, python-format -msgid "Invalid CIDR" -msgstr "Cidr %(cidr)s invalido" +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "Revoke security group ingress %s" -msgstr "Revocar ingreso al grupo de seguridad %s" +msgid "Extended resource: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "%s Not enough parameters to build a valid rule" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." -msgstr "No hay regla para los parámetros especificados." - -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/api/openstack/__init__.py:110 #, python-format -msgid "Authorize security group ingress %s" -msgstr "Autorizar ingreso al grupo de seguridad %s" - -#: cinder/api/ec2/cloud.py:725 -#, fuzzy, python-format -msgid "%s - This rule already exists in group" -msgstr "Esta regla ya existe en el grupo %s" +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" -#: cinder/api/ec2/cloud.py:769 -#, python-format +#: cinder/api/openstack/__init__.py:126 msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/api/ec2/cloud.py:776 -#, python-format +#: cinder/api/openstack/urlmap.py:25 msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" -msgstr "Crear Grupo de Seguridad %s" +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "group %s already exists" -msgstr "el grupo %s ya existe" +msgid "Exception handling resource: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/api/openstack/wsgi.py:682 #, python-format -msgid "Delete security group %s" -msgstr "Borrar grupo de seguridad %s" +msgid "Fault thrown: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "Get console output for instance %s" -msgstr "Obtener salida de la consola para la instancia %s" +msgid "HTTP exception thrown: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:894 -#, python-format -msgid "Create volume from snapshot %s" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 -#, python-format -msgid "Create volume of %s GB" -msgstr "Crear volumen de %s GB" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 -#, python-format -msgid "Detach volume %s" -msgstr "Desasociar volumen %s" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" -#: cinder/api/ec2/cloud.py:959 -#, fuzzy, python-format -msgid "Detach Volume Failed." -msgstr "Desasociar volumen %s" +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "attribute not supported: %s" -msgstr "atributo no soportado: %s" +msgid "%(url)s returned a fault: %(e)s" +msgstr "" -#: cinder/api/ec2/cloud.py:1107 -#, python-format -msgid "vol = %s\n" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" -msgstr "Asignar dirección" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" -#: cinder/api/ec2/cloud.py:1267 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "Release address %s" -msgstr "Liberar dirección %s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" -#: cinder/api/ec2/cloud.py:1272 -#, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/cloud.py:1282 -#, python-format -msgid "Disassociate address %s" -msgstr "Desasociar dirección %s" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instancia %s: creando snapshot" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" -msgstr "Se va a iniciar la finalización de las instancias" - -#: cinder/api/ec2/cloud.py:1343 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Reboot instance %r" -msgstr "Reiniciar instancia %r" - -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +msgid "Delete snapshot with id: %s" msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "De-registering image %s" -msgstr "Des-registrando la imagen %s" - -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" -msgstr "usuario o grupo no especificado" - -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" -msgstr "sólo el grupo \"all\" está soportado" +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +#, fuzzy +msgid "volume does not exist" +msgstr "el grupo de volumenes %s no existe" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" -msgstr "operation_type debe ser añadir o eliminar" +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "Updating image %s publicity" -msgstr "Actualizando imagen %s públicamente" +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "Fallo al generar metadatos para la ip %s" +msgid "Create volume of %s GB" +msgstr "Crear volumen de %s GB" -#: cinder/api/openstack/__init__.py:43 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "Caught error: %s" -msgstr "Capturado error: %s" +msgid "Removing options '%(bad_options)s' from query" +msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Extended resource: %s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/backup/api.py:176 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/api.py:181 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/manager.py:100 #, python-format -msgid "Could not find %s in request." +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/manager.py:107 #, python-format -msgid "Successfully authenticated '%s'" +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." msgstr "" -#: cinder/api/openstack/auth.py:258 -#, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/manager.py:158 #, python-format -msgid "marker [%s] not found" +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/manager.py:165 #, python-format -msgid "href %s does not contain version" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/manager.py:189 #, python-format -msgid "Converting nw_info: %s" +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/manager.py:194 #, python-format -msgid "Converted networks: %s" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/manager.py:206 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/manager.py:212 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/backup/manager.py:237 #, python-format -msgid "Loaded extension: %s" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/manager.py:249 #, python-format -msgid "Ext name: %s" +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/backup/manager.py:282 #, python-format -msgid "Ext alias: %s" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/manager.py:286 #, python-format -msgid "Ext description: %s" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/manager.py:299 #, python-format -msgid "Ext namespace: %s" +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/manager.py:310 #, python-format -msgid "Ext updated: %s" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/manager.py:329 #, python-format -msgid "Exception loading extension: %s" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/manager.py:360 #, python-format -msgid "Loading extension %s" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/backup/manager.py:379 #, python-format -msgid "Calling extension factory %s" +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/manager.py:386 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/manager.py:399 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/manager.py:422 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "Exception handling resource: %s" +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "Fault thrown: %s" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "HTTP exception thrown: %s" +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "There is no such action: %s" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:389 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/ceph.py:555 #, python-format msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "Invalid server status: %(status)s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "Bad personality format: missing %s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:550 -#, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/ceph.py:846 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:566 -#, python-format -msgid "Duplicate networks (%s) are not allowed" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "Bad network format: missing %s" -msgstr "" - -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/ceph.py:1023 #, python-format -msgid "Error in confirm-resize %s" +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "Error in revert-resize %s" +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/ceph.py:1039 #, python-format -msgid "Error in reboot %s" +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/swift.py:146 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/swift.py:234 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "La petición es inválida." + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/backup/drivers/swift.py:301 #, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pause %s" +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/backup/drivers/swift.py:304 #, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::unpause %s" +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 -#, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::suspend %s" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" +msgid "backup %s finished." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/backup/drivers/swift.py:345 #, python-format -msgid "Error in migrate %s" +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/backup/drivers/swift.py:350 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "Compute.api::reset_network %s" +msgid "metadata_object_names = %s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/backup/drivers/swift.py:362 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/backup/drivers/swift.py:378 #, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::lock %s" +msgid "decompressing data using %s algorithm" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/backup/drivers/swift.py:401 #, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::unlock %s" +msgid "v1 swift volume backup restore of %s finished" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/backup/drivers/swift.py:409 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/backup/drivers/swift.py:455 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/backup/drivers/tsm.py:85 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/backup/drivers/tsm.py:143 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/backup/drivers/tsm.py:173 #, python-format -msgid "Aggregates does not have %s action" +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/backup/drivers/tsm.py:199 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/backup/drivers/tsm.py:206 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/backup/drivers/tsm.py:213 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/backup/drivers/tsm.py:286 +#, python-format msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/backup/drivers/tsm.py:338 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/backup/drivers/tsm.py:421 #, python-format -msgid "No more floating ips in pool %s." +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/exception.py:97 #, python-format -msgid "Invalid status: '%s'" +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 -#, fuzzy, python-format -msgid "Invalid mode: '%s'" -msgstr "backend inválido: %s" - -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/exception.py:101 #, python-format -msgid "Invalid update setting: '%s'" +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." -msgstr "" +#: cinder/brick/exception.py:105 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Fallo al generar metadatos para la ip %s" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/exception.py:109 #, python-format -msgid "Setting host %(host)s to %(state)s." -msgstr "" - -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" -msgstr "" - -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" -msgstr "" - -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" -msgstr "" - -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/exception.py:113 #, python-format -msgid "Key pair '%s' already exists." +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" -msgstr "" - -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" -msgstr "" - -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/exception.py:117 #, python-format -msgid "Unable to find address %r" +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/initiator/connector.py:127 #, python-format -msgid "Network does not have %s action" +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Disassociating network with id %s" +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "Showing network with id %s" +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/initiator/connector.py:317 #, python-format -msgid "Deleting network with id %s" +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -#, fuzzy, python-format -msgid "Security group is still in use" -msgstr "Revocar ingreso al grupo de seguridad %s" - -#: cinder/api/openstack/compute/contrib/security_groups.py:295 -#, python-format -msgid "Security group %s already exists" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "Security group %s is not a string or unicode" +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "Security group %s cannot be empty." +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "Security group %s should not be greater than 255 characters." +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "Security group (%s) not found" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "This rule already exists in group %s" -msgstr "Esta regla ya existe en el grupo %s" - -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 -#, python-format -msgid "Rule (%s) not found" +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "start instance %r" +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/brick/initiator/linuxscsi.py:145 #, python-format -msgid "stop instance %r" +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/brick/initiator/linuxscsi.py:149 #, python-format -msgid "vol=%s" +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 -#, python-format -msgid "Delete volume with id: %s" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/brick/iscsi/iscsi.py:177 #, python-format -msgid "Delete snapshot with id: %s" +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/brick/iscsi/iscsi.py:184 #, python-format -msgid "Create snapshot from volume %s" +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" -msgstr "Intento de instanciar sigleton" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Fallo al generar metadatos para la ip %s" -#: cinder/auth/ldapdriver.py:650 +#: cinder/brick/iscsi/iscsi.py:227 #, python-format msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -"Se ha intentado eliminar el último miembro de un grupo. Eliminando el " -"grupo %s en su lugar." -#: cinder/auth/manager.py:298 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Looking up user: %r" -msgstr "Buscando usuario: %r" +msgid "Removing iscsi_target for: %s" +msgstr "" -#: cinder/auth/manager.py:302 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Failed authorization for access key %s" -msgstr "Fallo de autorización para la clave de acceso %s" +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" -#: cinder/auth/manager.py:308 -#, python-format -msgid "Using project name = user name (%s)" -msgstr "Utilizando nombre de proyecto = nombre de usuario (%s)" +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Fallo al generar metadatos para la ip %s" -#: cinder/auth/manager.py:315 -#, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/auth/manager.py:324 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#: cinder/brick/iscsi/iscsi.py:489 #, python-format -msgid "expected_signature: %s" +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 #, python-format -msgid "signature: %s" +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" -msgstr "Firma invalida para el usuario %s" +#: cinder/brick/iscsi/iscsi.py:532 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Reiniciando instancia %s" -#: cinder/auth/manager.py:353 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "host_only_signature: %s" +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" -msgstr "Debes especificar un proyecto" - -#: cinder/auth/manager.py:490 +#: cinder/brick/iscsi/iscsi.py:571 #, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgid "Cmd :%s" msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +msgid "StdOut :%s" msgstr "" -#: cinder/auth/manager.py:595 -#, python-format -msgid "Created project %(name)s with manager %(manager_user)s" -msgstr "Creado el proyecto %(name)s con administrador %(manager_user)s" - -#: cinder/auth/manager.py:613 -#, python-format -msgid "modifying project %s" -msgstr "Modificando proyecto %s" - -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" -msgstr "Agregando usuario %(uid)s para el proyecto %(pid)s" - -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" -msgstr "Borrar usuario %(uid)s del proyecto %(pid)s" - -#: cinder/auth/manager.py:676 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "Deleting project %s" -msgstr "Borrando proyecto %s" - -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgid "StdErr :%s" msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" -msgstr "Borrando usuario %s" +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Imposible encontrar volumen %s" -#: cinder/auth/manager.py:753 -#, python-format -msgid "Access Key change for user %s" -msgstr "Cambio de clave de acceso para el usuario %s" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" -#: cinder/auth/manager.py:755 -#, python-format -msgid "Secret Key change for user %s" -msgstr "Cambio de clave secreta para el usuario %s" +#: cinder/brick/local_dev/lvm.py:370 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Imposible encontrar SR en VBD %s" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" -msgstr "No hay datos vpn para el proyecto %s" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" -#: cinder/cloudpipe/pipelib.py:46 +#: cinder/brick/local_dev/lvm.py:489 #, fuzzy, python-format -msgid "Instance type for vpn instances" -msgstr "Obtener salida de la consola para la instancia %s" +msgid "Unable to find LV: %s" +msgstr "Imposible encontrar SR en VBD %s" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" -msgstr "Red a insertar en la configuración de openvpn" - -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" -msgstr "Mascara de red a insertar en la configuración de openvpn" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Launching VPN for %s" -msgstr "Lanzando VPN para %s" +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/compute/api.py:144 -#, python-format -msgid "Unable to find host for Instance %s" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/compute/api.py:192 -#, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/compute/api.py:203 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "Already mounted: %s" msgstr "" -#: cinder/compute/api.py:257 -#, fuzzy, python-format -msgid "Cannot run any more instances of this type." +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -"Quota de instancias superada. Sólo puedes ejecutar %s instancias más de " -"este tipo." -#: cinder/compute/api.py:259 -#, fuzzy, python-format -msgid "Can only run %s more instances of this type." +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -"Quota de instancias superada. Sólo puedes ejecutar %s instancias más de " -"este tipo." -#: cinder/compute/api.py:261 -#, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" -msgstr "Creando una instancia raw" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" -#: cinder/compute/api.py:312 +#: cinder/compute/nova.py:97 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/compute/api.py:383 -#, python-format -msgid "Going to run %s instances..." -msgstr "Vamos a ejecutar %s insntacias..." +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "El uso de una petición de contexto vacía está en desuso" -#: cinder/compute/api.py:447 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "bdm %s" +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "block_device_mapping %s" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/compute/api.py:871 -#, fuzzy, python-format -msgid "Going to try to soft delete instance" -msgstr "Vamos a ejecutar %s insntacias..." - -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" msgstr "" -#: cinder/compute/api.py:939 +#: cinder/db/sqlalchemy/api.py:2615 #, fuzzy -msgid "Going to try to terminate instance" -msgstr "Se va a iniciar la finalización de las instancias" - -#: cinder/compute/api.py:977 -#, fuzzy, python-format -msgid "Going to try to stop instance" -msgstr "Vamos a ejecutar %s insntacias..." - -#: cinder/compute/api.py:996 -#, fuzzy, python-format -msgid "Going to try to start instance" -msgstr "Vamos a ejecutar %s insntacias..." +msgid "Volume must be available" +msgstr "El estado del volumen debe estar disponible" -#: cinder/compute/api.py:1000 +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "Searching by: %s" +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/compute/api.py:1201 -#, python-format -msgid "Image type not recognized %s" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/compute/api.py:1377 -#, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "Table |%s| not created!" msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 -#, python-format -msgid "DB error: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/compute/instance_types.py:86 -#, python-format -msgid "Instance type %s not found for deletion" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/compute/manager.py:138 -#, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: decorating: |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" -#: cinder/compute/manager.py:140 -#, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/compute/manager.py:144 -#, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: locked: |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" -#: cinder/compute/manager.py:146 -#, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: admin: |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" -#: cinder/compute/manager.py:151 -#, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: ejecutando: |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" -#: cinder/compute/manager.py:155 -#, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "check_instance_lock: no ejecutando |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" -#: cinder/compute/manager.py:201 -#, python-format -msgid "Unable to load the virtualization driver: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Table |%s| not created" msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "Exception while dropping table %s." msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/compute/manager.py:329 -#, python-format -msgid "Setting up bdm %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/compute/manager.py:400 -#, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/compute/manager.py:406 -#, fuzzy, python-format -msgid "Exception encountered while terminating the instance %s" -msgstr "Después de terminar las instancias: %s" - -#: cinder/compute/manager.py:444 -#, python-format -msgid "Instance %s not found." +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" -msgstr "La instancia ha sido creada previamente" - -#: cinder/compute/manager.py:523 -#, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/compute/manager.py:528 -#, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/compute/manager.py:565 -#, python-format -msgid "Instance network_info: |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "%(action_str)s instance" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/manager.py:699 -#, python-format -msgid "Ignoring DiskNotFound: %s" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/image/image_utils.py:101 #, python-format -msgid "terminating bdm %s" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/image/image_utils.py:157 #, python-format msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/manager.py:816 -#, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/image/image_utils.py:178 #, python-format -msgid "Rebuilding instance %s" +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/image/image_utils.py:206 #, python-format -msgid "Rebooting instance %s" -msgstr "Reiniciando instancia %s" +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/image/image_utils.py:224 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/image/image_utils.py:260 #, python-format -msgid "instance %s: snapshotting" -msgstr "instancia %s: creando snapshot" +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" -#: cinder/compute/manager.py:939 -#, python-format +#: cinder/keymgr/conf_key_mgr.py:78 msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/compute/manager.py:995 -#, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "Rotating out %d backups" +msgid "Not deleting key %s" msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Deleting image %s" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/openstack/common/excutils.py:48 #, python-format -msgid "Failed to set admin password. Instance %s is not running" +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "Instance %s: Root password set" +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/manager.py:1079 -#, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "instance %s: rescuing" -msgstr "instancia %s: rescatando" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "instance %s: unrescuing" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" -msgstr "" +#: cinder/openstack/common/log.py:301 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "recibido %s" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "instance %s: migrating" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "instance %s: pausing" -msgstr "instancia %s: pausando" +msgid "syslog facility must be one of: %s" +msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/openstack/common/log.py:623 #, python-format -msgid "instance %s: unpausing" -msgstr "instancia %s: continuando tras pausa" +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "instancia %s: obteniendo los diagnosticos" +msgid "task run outlasted interval by %s sec" +msgstr "" -#: cinder/compute/manager.py:1534 -#, python-format -msgid "instance %s: suspending" -msgstr "instancia %s: suspendiendo" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "instance %s: resuming" -msgstr "instancia %s: continuando" +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" -#: cinder/compute/manager.py:1579 -#, python-format -msgid "instance %s: locking" -msgstr "instancia %s: bloqueando" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "instance %s: unlocking" -msgstr "instancia %s: desbloqueando" +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "instance %s: getting locked state" -msgstr "instancia %s: pasando a estado bloqueado" +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "instance %s: reset network" -msgstr "instancia %s: reiniciar redes" +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "instance %s: inject network info" +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "network_info to inject: |%s|" +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:1655 +#: cinder/openstack/common/policy.py:149 #, python-format -msgid "instance %s: getting vnc console" +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/policy.py:163 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/manager.py:1714 -#, fuzzy, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" -msgstr "La instancia %(instance_id)s no esta en modo de rescate" - -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:1752 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" -msgstr "" +msgid "Running cmd (subprocess): %s" +msgstr "Ejecutando cmd (subprocesos): %s" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "Desvinculando volumen de instancia desconocida %s" +msgid "Result was %s" +msgstr "El resultado fue %s" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "corriendo cmd (SSH): %s" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:1916 -#, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." -msgstr "" +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Unhandled exception" +msgstr "Excepción interna: %s" -#: cinder/compute/manager.py:2040 -#, python-format -msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." -msgstr "" - -#: cinder/compute/manager.py:2075 -msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" -msgstr "" +#: cinder/openstack/common/service.py:337 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "configurando la red del host" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/versionutils.py:69 #, python-format msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/versionutils.py:73 #, python-format msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/console/manager.py:97 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/console/vmrc_manager.py:122 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "SQL connection failed. %s attempts left." msgstr "" -#: cinder/console/vmrc_manager.py:125 -#, python-format -msgid "Removing console %(console_id)s." +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" msgstr "" -#: cinder/console/xvp.py:116 +#: cinder/openstack/common/notifier/api.py:145 #, python-format -msgid "Re-wrote %s" +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "Error starting xvp: %s" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" -msgstr "" - -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" -msgstr "El uso de una petición de contexto vacía está en desuso" +msgid "unpacked context: %s" +msgstr "contenido desempaquetado: %s" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "No ComputeNode for %(host)s" -msgstr "" +msgid "received %s" +msgstr "recibido %s" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "No backend config with id %(sm_backend_id)s" -msgstr "" +msgid "no method for message: %s" +msgstr "no hay método para el mensaje: %s" -#: cinder/db/sqlalchemy/api.py:4103 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "No sm_flavor called %(sm_flavor)s" -msgstr "" +msgid "No method for message: %s" +msgstr "No hay método para el mensaje: %s" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" -msgstr "" - -#: cinder/db/sqlalchemy/session.py:137 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "SQL connection failed. %s attempts left." +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" -msgstr "" +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID es %s" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "Table |%s| not created!" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "join list for moving mac_addresses |%s|" -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" -msgstr "" +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Una excepcion desconocida ha ocurrido" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" -msgstr "" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Reuso invalido de una coneccion RPC" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/impl_kombu.py:477 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/image/glance.py:281 +#: cinder/openstack/common/rpc/impl_kombu.py:499 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/image/glance.py:289 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" -msgstr "" - -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/image/glance.py:410 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/image/s3.py:309 +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 #, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/image/s3.py:328 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/image/s3.py:340 +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 #, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/image/s3.py:353 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/image/s3.py:379 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 -#, python-format -msgid "Bad mac for to_global_ipv6: %s" +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 -#, python-format -msgid "Bad prefix for to_global_ipv6: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "Deserializing: %s" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." -msgstr "" - -#: cinder/network/linux_net.py:166 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "Unknown chain: %r" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/network/linux_net.py:694 -#, python-format -msgid "Hupping dnsmasq threw %s" -msgstr "Excepción al recargar la configuración de dnsmasq: %s" - -#: cinder/network/linux_net.py:696 +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" -msgstr "El pid %d está pasado, relanzando dnsmasq" +msgid "Subscribing to %s" +msgstr "" -#: cinder/network/linux_net.py:756 -#, python-format -msgid "killing radvd threw %s" -msgstr "Matando radvd lanzado %s" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" -#: cinder/network/linux_net.py:758 -#, python-format -msgid "Pid %d is stale, relaunching radvd" -msgstr "Pid %d corrupto, relanzando radvd" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" -#: cinder/network/linux_net.py:967 -#, python-format -msgid "Starting VLAN inteface %s" -msgstr "Iniciando interfaz VLAN %s" +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "contenido desempaquetado: %s" -#: cinder/network/linux_net.py:999 -#, python-format -msgid "Starting Bridge interface for %s" -msgstr "Iniciando interfaz puente para %s" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" -#: cinder/network/linux_net.py:1142 -#, fuzzy, python-format -msgid "Starting bridge %s " -msgstr "Iniciando interfaz puente para %s" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" -#: cinder/network/linux_net.py:1149 -#, fuzzy, python-format -msgid "Done starting bridge %s" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +#, fuzzy +msgid "Registering reactor" msgstr "Des-registrando la imagen %s" -#: cinder/network/linux_net.py:1167 -#, python-format -msgid "Failed unplugging gateway interface '%s'" +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/network/linux_net.py:1170 -#, python-format -msgid "Unplugged gateway interface '%s'" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/network/manager.py:291 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 -#, python-format -msgid "Interface %(interface)s not found" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/network/manager.py:315 -#, python-format -msgid "floating IP allocation for instance |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/network/manager.py:353 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "floating IP deallocation for instance |%s|" +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/network/manager.py:386 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/openstack/common/rpc/impl_zmq.py:506 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/network/manager.py:402 -#, python-format -msgid "Quota exceeded for %s, tried to allocate address" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/network/manager.py:614 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/network/manager.py:660 -#, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/network/manager.py:670 -#, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/network/manager.py:778 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" -msgstr "configurando la red del host" - -#: cinder/network/manager.py:896 -#, python-format -msgid "network allocations for instance |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/network/manager.py:901 -#, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/network/manager.py:930 -#, python-format -msgid "network deallocation for instance |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/network/manager.py:1152 -#, python-format -msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/network/manager.py:1227 -#, python-format -msgid "Unable to release %s because vif doesn't exist." -msgstr "" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "recibido %s" -#: cinder/network/manager.py:1244 -#, python-format -msgid "Leased IP |%(address)s|" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/network/manager.py:1248 -#, python-format -msgid "IP %s leased that is not associated" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/network/manager.py:1256 -#, python-format -msgid "IP |%s| leased that isn't allocated" -msgstr "" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "La petición es inválida." -#: cinder/network/manager.py:1261 +#: cinder/openstack/common/rpc/impl_zmq.py:721 #, python-format -msgid "Released IP |%(address)s|" +msgid "%(msg)s" msgstr "" -#: cinder/network/manager.py:1265 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "IP %s released that is not associated" +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/network/manager.py:1268 -#, python-format -msgid "IP %s released that was not leased" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "topic is %s." msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/openstack/common/rpc/impl_zmq.py:815 #, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" -msgstr "" - -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/network/manager.py:1423 -#, python-format -msgid "Network must be disassociated from project %s before delete" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" -msgstr "" +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake no tiene una implementación para %s" -#: cinder/network/manager.py:1839 +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" -msgstr "" - -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/network/quantum/client.py:180 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Quantum entity not found: %s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/network/quantum/client.py:210 -#, python-format -msgid "Unable to connect to server. Got error: %s" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." -msgstr "" +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "Debe de implementar un horario de reserva" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/scheduler/driver.py:82 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "Debe de implementar un horario de reserva" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/network/quantum/manager.py:301 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "network allocations for instance %s" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +msgid "Filtered %s" msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/scheduler/filter_scheduler.py:276 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "Choosing %s" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Server returned error: %s" +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/scheduler/host_manager.py:294 #, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "No network with net_id = %s" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "No fixed IPs to deallocate for vif %s" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/notifier/list_notifier.py:65 -#, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Returning exception %s to caller" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" -msgstr "contenido desempaquetado: %s" - -#: cinder/rpc/amqp.py:231 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "received %s" -msgstr "recibido %s" +msgid "FAKE ISCSI: %s" +msgstr "Falso ISCSI: %s" -#: cinder/rpc/amqp.py:236 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "no method for message: %s" -msgstr "no hay método para el mensaje: %s" +msgid "FAKE ISER: %s" +msgstr "" -#: cinder/rpc/amqp.py:237 -#, python-format -msgid "No method for message: %s" -msgstr "No hay método para el mensaje: %s" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" -#: cinder/rpc/amqp.py:321 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Making asynchronous call on %s ..." +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID es %s" +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "Faked command matched %s" msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/rpc/amqp.py:379 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "Sending notification on %s..." +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/tests/test_misc.py:58 #, python-format msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "unrecognized argument %s" msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "Run CLI command: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "Given data: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "Result data: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 -#, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" -msgstr "" +#: cinder/tests/api/contrib/test_backups.py:741 +#, fuzzy +msgid "Invalid input" +msgstr "Captura no valida" -#: cinder/rpc/impl_qpid.py:341 -#, python-format -msgid "Unable to connect to AMQP server: %s " -msgstr "" +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Desasociar volumen %s" -#: cinder/rpc/impl_qpid.py:346 +#: cinder/tests/integrated/api/client.py:32 #, python-format -msgid "Connected to AMQP server on %s" -msgstr "" - -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +msgid "Body: %s" msgstr "" -#: cinder/scheduler/driver.py:89 +#: cinder/tests/integrated/api/client.py:121 #, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" -msgstr "Debe de implementar un horario de reserva" - -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/transfer/api.py:136 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/transfer/api.py:182 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/transfer/api.py:199 #, python-format -msgid "No host selection for %s defined." +msgid "Volume %s has been transferred." msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/api.py:143 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 -#, python-format -msgid "Filtered %(hosts)s" +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 +#: cinder/volume/api.py:214 #, python-format -msgid "Weighted %(weighted_host)s" +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/scheduler/host_manager.py:144 -#, python-format -msgid "Host filter fails for ignored host %(host)s" +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/api.py:229 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "Searching by: %s" msgstr "" -#: cinder/scheduler/host_manager.py:163 -#, python-format -msgid "Host filter passes for %(host)s" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/scheduler/host_manager.py:272 -#, python-format -msgid "Received %(service_name)s service update from %(host)s." +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/scheduler/host_manager.py:323 -#, python-format -msgid "No service for compute ID %s" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/api.py:490 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/api.py:502 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/scheduler/manager.py:159 -#, fuzzy, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." -msgstr "La instacia %(instance_id)s no esta suspendida" +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "El estado del volumen debe estar disponible" -#: cinder/scheduler/scheduler_options.py:66 -#, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 -#, python-format -msgid "Could not decode scheduler options: '%(e)s'" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/tests/fake_utils.py:72 +#: cinder/volume/api.py:757 #, python-format -msgid "Faking execution of cmd (subprocess): %s" +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/tests/fake_utils.py:80 -#, python-format -msgid "Faked command matched %s" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/tests/fake_utils.py:96 -#, python-format -msgid "Faked command raised an exception %s" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/api.py:797 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "No available service named %s" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 -#, python-format -msgid "Running instances: %s" -msgstr "Ejecutando instancias: %s" +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/api.py:842 #, python-format -msgid "After terminating instances: %s" -msgstr "Después de terminar las instancias: %s" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/api.py:862 #, python-format -msgid "After force-killing instances: %s" +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/api.py:868 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" -msgstr "" - -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" -msgstr "" - -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" -msgstr "" - -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" -msgstr "" - -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Target %s allocated" -msgstr "Destino %s asignado" +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recuperandose de una ejecución fallida. Intenta el número %s" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/driver.py:282 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/tests/test_volume_types.py:58 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "Given data: %s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/tests/test_volume_types.py:59 +#: cinder/volume/driver.py:327 #, python-format -msgid "Result data: %s" +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/driver.py:340 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/driver.py:358 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/volume/driver.py:394 #, python-format -msgid "Quota exceeded: code=%(code)s" +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/driver.py:433 #, python-format -msgid "_create: %s" +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/driver.py:451 #, python-format -msgid "_delete: %s" +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 -#, python-format -msgid "_get: %s" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 -#, python-format -msgid "_get_all: %s" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/driver.py:546 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/driver.py:548 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 -#, python-format -msgid "test_snapshot_create_force: param=%s" -msgstr "" +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Imposible encontrar SR en VBD %s" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 -#, python-format -msgid "test_snapshot_show: resp=%s" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 -#, python-format -msgid "test_snapshot_detail: resp_dict=%s" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" msgstr "" -#: cinder/tests/integrated/test_login.py:31 -#, python-format -msgid "flavor: %s" +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/manager.py:203 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" - -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/manager.py:235 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/volume/manager.py:244 #, python-format -msgid "Body: %s" -msgstr "" +msgid "Re-exporting %s volumes" +msgstr "Exportando de nuevo los volumenes %s" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/manager.py:257 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/manager.py:264 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" -msgstr "" +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: saltando exportación" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/manager.py:273 #, python-format -msgid "Decoding JSON: %s" +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/tests/rpc/common.py:133 -#, python-format -msgid "Nested received %(queue)s, %(value)s" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/manager.py:286 #, python-format -msgid "Nested return %s" +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "Received %s" -msgstr "Recibido %s" +msgid "volume %s: deleting" +msgstr "volumen %s: eliminando" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" -msgstr "Fallo al abrir conexión con el hypervisor" +#: cinder/volume/manager.py:380 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volumen no local a este nodo" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/manager.py:389 #, python-format -msgid "Compute_service record created for %s " -msgstr "" +msgid "volume %s: removing export" +msgstr "volumen %s: eliminando exportación" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/manager.py:394 #, python-format -msgid "Compute_service record updated for %s " +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/virt/firewall.py:130 -#, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/manager.py:427 #, python-format -msgid "Filters added to instance %s" -msgstr "" - -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/manager.py:430 #, python-format -msgid "Adding security group rule: %r" +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/manager.py:434 #, python-format -msgid "Adding provider rule: %s" -msgstr "" - -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." -msgstr "" +msgid "volume %s: deleted successfully" +msgstr "volumen %s: eliminado satisfactoriamente" -#: cinder/virt/images.py:92 +#: cinder/volume/manager.py:451 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "snapshot %s: creating" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/manager.py:462 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 +#: cinder/volume/manager.py:490 +#, python-format msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/manager.py:496 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "snapshot %s: deleting" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/manager.py:526 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Cannot delete snapshot %s: snapshot is busy" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 -#, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/manager.py:559 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -"Debes especificar xenapi_connection_url, xenapi_connection_username " -"(opcional), y xenapi_connection_password para usar connection_type=xenapi" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/manager.py:760 #, python-format -msgid "Got exception: %s" -msgstr "Obtenida excepción %s" - -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/manager.py:807 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/manager.py:921 #, python-format -msgid "No such domain (%s)" +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/manager.py:940 #, python-format -msgid "Failed power down Bare-metal node %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/manager.py:1024 #, python-format -msgid "No such domain %s" +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/manager.py:1044 #, python-format -msgid "Domains: %s" +msgid "Notification {%s} received" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/manager.py:1091 #, python-format -msgid "Nodes: %s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/manager.py:1103 #, python-format -msgid "After storing domains: %s" +msgid "volume %s: extending" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Created new domain: %s" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/virt/baremetal/dom.py:226 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "change_domain_state: to new state %s" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "DB error: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "instance %s: rebooted" -msgstr "instancia %s: reiniciada" - -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "instance %s: rescued" -msgstr "instancia %s: rescatada" - -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/utils.py:144 #, python-format -msgid "instance %s: is building" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "instance %s: booted" -msgstr "instancia %s: arrancada" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "instance %s spawned successfully" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "instance %s:not booted" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "instance %s: Creating image" -msgstr "instancia %s: Creando imagen" +msgid "Performing secure delete on volume: %s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 -#, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Crear volumen de %s GB" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/virt/baremetal/proxy.py:529 -#, python-format -msgid "instance %s: starting toXML method" -msgstr "instancia %s: comenzando método toXML" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "instance %s: finished toXML method" -msgstr "instancia %s: finalizado método toXML" +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 -msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/virt/baremetal/tilera.py:216 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "unknown disk image handler: %s" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 -#, python-format -msgid "Failed to mount filesystem: %s" -msgstr "Fallo al montar el sistema de ficheros: %s" +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "Failed to remove container: %s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "User %(username)s not found in password file." +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Error inesperado mientras el comando se ejecutaba" + +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "unsupported partition: %s" +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "No se puede unir la imagen con el loopback: %s" - -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "Failed to map partitions: %s" +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "qemu-nbd error: %s" +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/eqlx.py:374 #, python-format -msgid "nbd device %s did not show up" +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/eqlx.py:384 #, python-format -msgid "Connecting to libvirt: %s" -msgstr "Conectando a libvirt: %s" - -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" -msgstr "Conexión a libvirt rota" +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/eqlx.py:405 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/eqlx.py:415 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/glusterfs.py:86 #, python-format -msgid "Deleting instance files %(target)s" +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -#, fuzzy, python-format -msgid "Instance soft rebooted successfully." -msgstr "instancia %s: reiniciada" - -#: cinder/virt/libvirt/connection.py:696 -#, fuzzy -msgid "Failed to soft reboot instance." -msgstr "Fallo a reinicia la instancia" +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Automatically confirming migration %d" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/glusterfs.py:403 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "nova call result: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -#, fuzzy -msgid "Guest does not have a console available" -msgstr "El usuario no tiene privilegios de administrador" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" -#: cinder/virt/libvirt/connection.py:1020 -#, python-format -msgid "Path '%(path)s' supports direct I/O" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/glusterfs.py:431 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/glusterfs.py:457 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "volume id: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 -#, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "block_device_list %s" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." msgstr "" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "'' must be 1, but %d\n" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/libvirt/connection.py:2067 +#: cinder/volume/drivers/glusterfs.py:690 #, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/libvirt/connection.py:2079 +#: cinder/volume/drivers/glusterfs.py:701 #, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +msgid "No file depends on %s." msgstr "" -#: cinder/virt/libvirt/connection.py:2136 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "Timeout migrating for %s. nwfilter not found." +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 +#: cinder/volume/drivers/glusterfs.py:1038 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 -#, python-format -msgid "%s is a valid instance name" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 -#, python-format -msgid "%s has a disk file" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/gpfs.py:160 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/gpfs.py:169 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "%s is not a directory." msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/gpfs.py:197 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/gpfs.py:556 #, python-format -msgid "Base file too young to remove: %s" +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "Removing base file: %s" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/gpfs.py:637 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "%s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "Unknown base file: %s" +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 -#, python-format -msgid "Active base files: %s" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/nfs.py:263 #, python-format -msgid "Corrupt base files: %s" +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "Removable base files: %s" +msgid " but size is now %d" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" -msgstr "" +#: cinder/volume/drivers/nfs.py:361 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "el grupo %s ya existe" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/vif.py:99 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "Ensuring bridge %s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "Failed while unplugging vif of instance '%s'" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/volume.py:163 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "iSCSI device not found at %s" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/volume.py:166 +#: cinder/volume/drivers/nfs.py:526 #, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/rbd.py:160 #, python-format -msgid "%(text)s: _db_content => %(content)s" -msgstr "%(text)s: _db_content => %(content)s" +msgid "Invalid argument - whence=%s not supported" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 -#, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 -#, python-format -msgid "Virtual Machine with ref %s is not there" +#: cinder/volume/drivers/rbd.py:210 +#, fuzzy, python-format +msgid "error opening rbd image %s" +msgstr "Desasociar volumen %s" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 -#, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +#: cinder/volume/drivers/rbd.py:265 +#, fuzzy +msgid "error connecting to ceph cluster" +msgstr "Conectando a libvirt: %s" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/rbd.py:435 #, python-format -msgid "Glance image %s is in killed state" +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "httplib error in %s: " +msgid "deleting parent %s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/rbd.py:593 #, python-format -msgid "Socket error in %s: " +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 -#, python-format -msgid "Type error in %s: " +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "Exception in %s " +msgid "deleting rbd volume %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 -#, python-format -msgid "Got total of %s instances" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/rbd.py:696 +#, fuzzy, python-format +msgid "connection data: %s" +msgstr "Conectando a libvirt: %s" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 -#, python-format -msgid "Creating VM with the name %s on the ESX host" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Captura no valida" + +#: cinder/volume/drivers/rbd.py:724 +#, fuzzy, python-format +msgid "not cloneable: %s" +msgstr "respuesta %s" + +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 -#, python-format -msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 -#, python-format -msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 -#, python-format -msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 -#, python-format -msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/scality.py:78 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 -#, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 -#, python-format -msgid "Powering on the VM instance %s" +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "Powered on the VM instance %s" +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/sheepdog.py:59 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 -#, python-format -msgid "Created Snapshot of the VM instance %s " +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Uploading image %s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/solidfire.py:161 #, python-format -msgid "Uploaded image %s" +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "Deleted temporary vmdk file %s" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "respuesta %s" + +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "Did hard reboot of VM %s" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "instance - %s not present" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "Powering off the VM %s" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/solidfire.py:398 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "Powered off the VM %s" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "Unregistering the VM %s" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "Unregistered the VM %s" +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 -#, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "Suspending the VM %s " +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Suspended the VM %s " +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 -#, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 -#, python-format -msgid "Resuming the VM %s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 -#, python-format -msgid "Resumed the VM %s " +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 -#, python-format -msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +#: cinder/volume/drivers/solidfire.py:673 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Fallo al suspender la instancia" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 -#, python-format -msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Creating directory with path %s" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Created directory with path %s" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/zadara.py:357 #, python-format -msgid "Downloading image %s from glance image server" +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/zadara.py:438 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Getting image size for the image %s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/zadara.py:464 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" -msgstr "Lanzando NotImplemented" - -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/zadara.py:472 #, python-format -msgid "xenapi.fake does not have an implementation for %s" -msgstr "xenapi.fake no tiene una implementación para %s" +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "Calling %(localname)s %(impl)s" -msgstr "Llamando %(localname)s %(impl)s" +msgid "Creating volume from snapshot: %s" +msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Calling getter %s" -msgstr "Llanado al adquiridor %s" +msgid "Snapshot %(name)s not found" +msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "Attach properties: %(properties)s" msgstr "" -"xenapi.fake no tiene una implementación para %s o ha sido llamada con un " -"número incorrecto de argumentos" -#: cinder/virt/xenapi/host.py:67 -#, python-format +#: cinder/volume/drivers/emc/emc_smis_common.py:40 msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/xenapi/host.py:137 -#, python-format -msgid "Unable to get SR for this host: %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: creando lv del tamaño %(vol_size)sG" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "Found non-unique network for bridge %s" -msgstr "Encontrada una red no única para el puente %s" +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "Found no network for bridge %s" -msgstr "No se ha encontrado red para el puente %s" +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/pool.py:146 -#, python-format -msgid "Unable to join %(host)s in the pool" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "Imposible desasociar volumen %s" - -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format -msgid "Found no PIF for device %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format -msgid "VBD not found in instance %s" -msgstr "VBD no encontrado en la instancia %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 -#, fuzzy, python-format -msgid "VBD %s already detached" -msgstr "el grupo %s ya existe" +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format -msgid "Unable to unplug VBD %s" -msgstr "Imposible desconectar VBD %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Unable to destroy VBD %s" -msgstr "Imposible destruir VBD %s" +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 -#, fuzzy, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -msgstr "Creado el VBD %(vbd_ref)s para VM %(vm_ref)s, VDI %(vdi_ref)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." -msgstr "Creado el VBD %(vbd_ref)s para VM %(vm_ref)s, VDI %(vdi_ref)s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Unable to destroy VDI %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -"VDI creado %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) " -"sobre %(sr_ref)s." -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 -#, fuzzy, python-format -msgid "Snapshotting with label '%(label)s'" -msgstr "Creando snapshot de la VM %(vm_ref)s con etiqueta '%(label)s'..." - -#: cinder/virt/xenapi/vm_utils.py:392 -#, fuzzy, python-format -msgid "Created snapshot %(template_vm_ref)s" -msgstr "Instantánea creada %(template_vm_ref)s de la VM %(vm_ref)s." - -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" -msgstr "Pidiendo xapi a subir %(vdi_uuids)s como ID %(image_id)s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "download_vhd failed: %r" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 -#, fuzzy, python-format -msgid "Size for image %(image)s: %(virtual_size)d" -msgstr "Tamaño para imagen %(image)s:%(virtual_size)d" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 #, python-format -msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" -msgstr "Copiando VDI %s a /boot/guest on dom0" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" -msgstr "Kernel/Ramdisk VDI %s destruído" - -#: cinder/virt/xenapi/vm_utils.py:895 -#, fuzzy -msgid "Failed to fetch glance image" -msgstr "Fallo a reinicia la instancia" +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "Buscando vid %s para el kernel PV" +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "VDI %s is still available" -msgstr "VDI %s está todavía disponible" +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 -#, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" -msgstr "(VM_UTILS) xenserver vm state -> |%s|" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" -msgstr "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "Re-scanning SR %s" -msgstr "Re-escaneando SR %s" +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Exportando de nuevo los volumenes %s" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Desasociar volumen %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" -msgstr "VHD %(vdi_uuid)s tiene origen en %(parent_ref)s" +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Plugging VBD %s ... " +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Plugging VBD %s done." +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "Destroying VBD for VDI %s ... " +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Reconectado a la cola" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "Destroying VBD for VDI %s done." +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "Running pygrub against %s" -msgstr "Ejecutando pygrub contra %s" +msgid "Found Storage Configuration Service: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "Found Xen kernel %s" -msgstr "Kernel Xen Encontrado %s" +msgid "Found Controller Configuration Service: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." -msgstr "Kernel Xen no encontrado. Reiniciando HVM" +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Writing partition table %s done." +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 -msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "Updating progress to %(progress)d" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -#, fuzzy -msgid "Starting instance" -msgstr "Creando una instancia raw" - -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -#, fuzzy -msgid "Failed to spawn, rolling back" -msgstr "Fallo al suspender la instancia" - -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "Injecting file path: '%s'" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:554 -#, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Instance agent version: %s" +msgid "Add target WWN: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Updating Agent to %s" +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -#, fuzzy, python-format -msgid "Finished snapshot and upload for VM" -msgstr "Finalizado el snapshot y la subida de la VM %s" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:677 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 #, fuzzy, python-format -msgid "Starting snapshot for VM" -msgstr "Comenzando snapshot para la VM %s" +msgid "Cannot find device number for volume %s" +msgstr "Imposible encontrar SR en VBD %s" -#: cinder/virt/xenapi/vmops.py:686 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "Imposible adjuntar volumen a la instancia %s" - -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" -msgstr "" +msgid "Found iSCSI endpoint: %s" +msgstr "No encontrado: %s" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:893 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/hds/hds.py:178 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/hds/hds.py:197 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/hds/hds.py:250 +#, fuzzy, python-format +msgid "HDP not found: %s" +msgstr "No se encuentra la dirección del enlace local.:%s" + +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 -#, fuzzy, python-format -msgid "Instance %(instance_uuid)s not found" -msgstr "La instacia %(instance_id)s no esta suspendida" - -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -#, fuzzy -msgid "Injecting network info to xenstore" -msgstr "configurando la red del host" +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 -#, fuzzy, python-format -msgid "Creating VIF for network %(network_ref)s" -msgstr "Creando VIF para VM %(vm_ref)s, red %(network_ref)s." +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 -#, fuzzy, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" -msgstr "Creando VIF para VM %(vm_ref)s, red %(network_ref)s." +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/huawei_utils.py:129 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/rest_common.py:59 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "OpenSSL error: %s" +msgid "Bad response from server: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "type is = %s" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/huawei/rest_common.py:166 #, python-format -msgid "name = %s" +msgid "" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/huawei/rest_common.py:173 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" -msgstr "Imposible crear el repositorio de almacenamiento" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 #, python-format -msgid "Forgetting SR %s..." +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/huawei/rest_common.py:553 #, python-format -msgid "Introducing %s..." -msgstr "Introduciendo %s..." +msgid "the free wwns %s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Unable to find SR from VBD %s" -msgstr "Imposible encontrar SR en VBD %s" +msgid "the fc server properties is:%s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/huawei/rest_common.py:874 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/huawei/rest_common.py:937 #, python-format -msgid "Unable to introduce VDI on SR %s" -msgstr "Incapaz de insertar VDI en SR %s" +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/huawei/rest_common.py:964 #, python-format -msgid "Unable to get record of VDI %s on" -msgstr "Imposible obtener copia del VDI %s en" +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format -msgid "Unable to introduce VDI for SR %s" -msgstr "Inposible insertar VDI para SR %s" +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "Error finding vdis in SR %s" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/huawei/rest_common.py:1101 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/huawei/rest_common.py:1124 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Mountpoint cannot be translated: %s" -msgstr "Punto de montaje no puede ser traducido: %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "Creating SR %s" +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Introducing SR %s" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Checking for SR %s" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "SR %s not found in the xapi database" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -"No es posible crear el VDI en SR %(sr_ref)s para la instancia " -"%(instance_name)s" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "No es posible usar SR %(sr_ref)s para la instancia %(instance_name)s" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 -#, python-format -msgid "Unable to attach volume to instance %s" -msgstr "Imposible adjuntar volumen a la instancia %s" +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/huawei/ssh_common.py:421 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -"El punto de montaje %(mountpoint)s esta unido a la instancia " -"%(instance_name)s" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/huawei/ssh_common.py:436 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "Volume_separado: %(instance_name)s, %(mountpoint)s" +msgid "CLI command: %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/huawei/ssh_common.py:466 #, python-format -msgid "Unable to locate volume %s" -msgstr "Imposible encontrar volumen %s" +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/huawei/ssh_common.py:501 #, python-format -msgid "Unable to detach volume %s" -msgstr "Imposible desasociar volumen %s" +msgid "_execute_cli: %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/huawei/ssh_common.py:511 #, python-format -msgid "Unable to destroy vbd %s" +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/huawei/ssh_common.py:516 #, python-format -msgid "Error purging SR %s" +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/huawei/ssh_common.py:570 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -"El punto de montaje %(mountpoint)s se desligó de la instancia " -"%(instance_name)s" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/huawei/ssh_common.py:580 #, python-format -msgid "Error in handshake: %s" +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Invalid request: %s" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "Request: %s" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/huawei/ssh_common.py:697 #, python-format -msgid "Request made with missing token: %s" +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Request made with invalid token: %s" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "Unexpected error: %s" +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/huawei/ssh_common.py:792 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "El estado del volumen debe estar disponible" +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/huawei/ssh_common.py:865 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/volume/api.py:325 -#, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "El estado del volumen debe estar disponible" +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/huawei/ssh_common.py:1102 #, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "Recuperandose de una ejecución fallida. Intenta el número %s" +msgid "remove_map: Host %s does not exist." +msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "volume group %s doesn't exist" -msgstr "el grupo de volumenes %s no existe" +msgid "remove_map: Volume %s does not exist." +msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "Could not find iSCSI export for volume %s" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/huawei/ssh_common.py:1413 #, python-format -msgid "FAKE ISCSI: %s" -msgstr "Falso ISCSI: %s" +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "rbd has no pool %s" +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Sheepdog is not working: %s" +msgid "%s is not set" msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Re-exporting %s volumes" -msgstr "Exportando de nuevo los volumenes %s" +msgid "ensure_export: Volume %s not found on storage" +msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "volume %s: skipping export" -msgstr "volume %s: saltando exportación" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "volume %s: creating" -msgstr "volumen %s: creando" +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" -msgstr "volume %(vol_name)s: creando lv del tamaño %(vol_size)sG" +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "volume %s: creating export" -msgstr "volumen %s: exportando" +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 #, python-format -msgid "volume %s: created successfully" -msgstr "volumen %s: creado satisfactoriamente" +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" -msgstr "El volumen todavía está asociado" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" -msgstr "Volumen no local a este nodo" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 #, python-format -msgid "volume %s: removing export" -msgstr "volumen %s: eliminando exportación" +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "volume %s: deleting" -msgstr "volumen %s: eliminando" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 #, python-format -msgid "volume %s: volume is busy" +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "volume %s: deleted successfully" -msgstr "volumen %s: eliminado satisfactoriamente" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "snapshot %s: creating" +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "snapshot %s: created successfully" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "snapshot %s: deleting" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/volume/manager.py:214 -#, fuzzy, python-format -msgid "snapshot %s: snapshot is busy" -msgstr "instancia %s: creando snapshot" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "New capabilities found: %s" +msgid "Could not find key in output of command %(cmd)s: %(out)s" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Notification {%s} received" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "%s is not set" +msgid "Failed to find host %s" msgstr "" -#: cinder/volume/netapp.py:128 -#, fuzzy -msgid "Connected to DFM server" -msgstr "Reconectado a la cola" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" -#: cinder/volume/netapp.py:159 -#, fuzzy, python-format -msgid "Job failed: %s" -msgstr "No encontrado: %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 -#, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "Imposible encontrar SR en VBD %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, fuzzy, python-format -msgid "No LUN ID for volume %s" -msgstr "Imposible encontrar volumen %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, fuzzy, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "Fallo al generar metadatos para la ip %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" -#: cinder/volume/netapp.py:614 -#, fuzzy, python-format -msgid "Failed to get host details for host ID %s" -msgstr "Fallo al generar metadatos para la ip %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" -#: cinder/volume/netapp.py:620 -#, fuzzy, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "Fallo al generar metadatos para la ip %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" -#: cinder/volume/netapp.py:625 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 #, fuzzy, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "Fallo al generar metadatos para la ip %s" +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de salida: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" msgstr "" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "Ejecutando instancias: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "respuesta %s" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Error inesperado mientras el comando se ejecutaba" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "el grupo %s ya existe" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Borrando usuario %s" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "El estado del volumen debe estar disponible" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "Capturado error: %s" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Error inesperado mientras el comando se ejecutaba" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "Obtenida excepción %s" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "volume %(vol_name)s: creando lv del tamaño %(vol_size)sG" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "volumen %s: exportando" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "volumen %s: exportando" + +#~ msgid "volume %s: creating from image" +#~ msgstr "volumen %s: creando" + +#~ msgid "volume %s: creating" +#~ msgstr "volumen %s: creando" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "volumen %s: exportando" + +#~ msgid "volume %s: create failed" +#~ msgstr "volumen %s: creando" + +#~ msgid "volume %s: created successfully" +#~ msgstr "volumen %s: creado satisfactoriamente" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "Fallo al generar metadatos para la ip %s" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "Obteniendo %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "No se encuentra la dirección del enlace local.:%s" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "No se pudo obtener enlace de la ip local de %(interface)s :%(ex)s" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "Imposible desasociar volumen %s" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "Imposible encontrar volumen %s" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "Imposible encontrar SR en VBD %s" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "respuesta %s" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "Imposible encontrar volumen %s" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "Imposible encontrar volumen %s" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "Fallo a reinicia la instancia" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "el grupo de volumenes %s no existe" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Error inesperado mientras el comando se ejecutaba" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "Coneccion con glance fallida" + +#~ msgid "Invalid snapshot" +#~ msgstr "Captura no valida" + +#~ msgid "Invalid input received" +#~ msgstr "Entrada invalida recibida" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "Captura no valida" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "Coneccion con glance fallida" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "Depuración de la devolución de llamada: %s" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "el grupo de volumenes %s no existe" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "No se encuentra la dirección del enlace local.:%s" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, fuzzy, python-format -msgid "Got response: %s" -msgstr "respuesta %s" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, fuzzy, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "el grupo de volumenes %s no existe" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "The request is invalid." +#~ msgstr "La petición es inválida." + +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" -#~ msgstr "intentando finalizar una instancia que ya había sido finalizada: %s" +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "topic is %s" +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "message %s" -#~ msgstr "mensaje %s" +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" -#~ msgstr "(%(nm)s) publica (key: %(routing_key)s) %(message)s" +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" -#~ msgid "Publishing to route %s" -#~ msgstr "Publicando la ruta %s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Declaring queue %s" -#~ msgstr "Declarando cola %s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Declaring exchange %s" -#~ msgstr "Declarando intercambio %s" +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" -#~ msgstr "Enlazando %(queue)s a %(exchange)s con la llave %(routing_key)s" +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" -#~ msgstr "Obtendiendo desde %(queue)s: %(message)s" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "virsh said: %r" -#~ msgstr "virsh dijo: %r" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" -#~ msgid "cool, it's a device" -#~ msgstr "genial, es un dispositivo" +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Created VM %s..." -#~ msgstr "Creada VM %s..." +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." -#~ msgstr "VM creada %(instance_name)s como %(vm_ref)s." +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " -#~ msgstr "Creando VBD para VDI %s ... " +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." -#~ msgstr "Creando VBF para VDI %s terminado" +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" -#~ msgid "VBD.unplug successful first time." -#~ msgstr "VBD.Primera desconexión satisfactoria." +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." -#~ msgstr "VBD. Desconexión rechazada: reintentándolo..." +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." -#~ msgstr "VBD.Finalmente logro desconectar." +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" -#~ msgstr "Instancia %s: no se pudo iniciar" +#~ msgid "SIGTERM received" +#~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "_wait_child %d" #~ msgstr "" +#~ msgid "wait wrap.failed %s" +#~ msgstr "No encontrado: %s" + #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." -#~ msgstr "Iniciando VM %s..." - -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "volume %s mapping to multi host" +#~ msgstr "volume %s: saltando exportación" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." -#~ msgstr "Creado el VIF %(vif_ref)s para VM %(vm_ref)s, red %(network_ref)s." +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" #~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" #~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -#~ msgstr "Creando VBD para VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/es_MX/LC_MESSAGES/cinder.po b/cinder/locale/es_MX/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..bc890dbe1c --- /dev/null +++ b/cinder/locale/es_MX/LC_MESSAGES/cinder.po @@ -0,0 +1,10448 @@ +# Spanish (Mexico) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-08-30 09:12+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Spanish (Mexico) " +"(http://www.transifex.com/projects/p/openstack/language/es_MX/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/eu/LC_MESSAGES/cinder.po b/cinder/locale/eu/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..2a1d92af14 --- /dev/null +++ b/cinder/locale/eu/LC_MESSAGES/cinder.po @@ -0,0 +1,10040 @@ +# Basque translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-11-06 03:20+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: Basque " +"(http://www.transifex.com/projects/p/openstack/language/eu/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/eu_ES/LC_MESSAGES/cinder.po b/cinder/locale/eu_ES/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..dc94394d71 --- /dev/null +++ b/cinder/locale/eu_ES/LC_MESSAGES/cinder.po @@ -0,0 +1,10001 @@ +# Basque (Spain) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-11-26 20:45+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Basque (Spain) " +"(http://www.transifex.com/projects/p/openstack/language/eu_ES/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/fa/LC_MESSAGES/cinder.po b/cinder/locale/fa/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..ae76fafd94 --- /dev/null +++ b/cinder/locale/fa/LC_MESSAGES/cinder.po @@ -0,0 +1,10001 @@ +# Persian translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-11-26 20:45+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Persian " +"(http://www.transifex.com/projects/p/openstack/language/fa/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/fi_FI/LC_MESSAGES/cinder.po b/cinder/locale/fi_FI/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..d0ae6fe0de --- /dev/null +++ b/cinder/locale/fi_FI/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Finnish (Finland) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Finnish (Finland) " +"(http://www.transifex.com/projects/p/openstack/language/fi_FI/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/fil/LC_MESSAGES/cinder.po b/cinder/locale/fil/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..189e72dd26 --- /dev/null +++ b/cinder/locale/fil/LC_MESSAGES/cinder.po @@ -0,0 +1,10412 @@ +# Filipino translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-09-26 22:10+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: Filipino " +"(http://www.transifex.com/projects/p/openstack/language/fil/)\n" +"Plural-Forms: nplurals=2; plural=(n > 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/fr/LC_MESSAGES/cinder.po b/cinder/locale/fr/LC_MESSAGES/cinder.po index f24fca003e..650b3a8f82 100644 --- a/cinder/locale/fr/LC_MESSAGES/cinder.po +++ b/cinder/locale/fr/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2012-04-06 14:54+0000\n" "Last-Translator: EmmanuelLeNormand \n" "Language-Team: French \n" @@ -15,8237 +15,10761 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "Nom du fichier contenant la racine de l'autorité de certification" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Nom de fichier de la clé privée" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" -msgstr "Nom du fichier de la liste de révocation du Certificat Racine" - -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "Emplacement de sauvegarde des clefs" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "Emplacement de sauvegarde des racines d'autorité de certification" - -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "Doit-on utiliser une autorité de certification pour chaque projet ?" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "Une exception inconnue s'est produite." -#: cinder/crypto.py:67 -#, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -"Sujet pour les certificats utilisateurs, %s pour le projet, utilisateur, " -"timestamp" -#: cinder/crypto.py:72 +#: cinder/exception.py:107 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" -msgstr "Sujet de certificat pour projets, %s pour le projet, timestamp" - -#: cinder/crypto.py:292 -#, python-format -msgid "Flags path: %s" -msgstr "Chemin des propriétés: %s" - -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Erreur imprévue lors de l'éxecution de la commande" +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" -#: cinder/exception.py:59 +#: cinder/exception.py:112 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +msgid "Volume driver reported an error: %(message)s" msgstr "" -"%(description)s\n" -"Commande : %(cmd)s\n" -"Valeur retournée : %(exit_code)s\n" -"Sortie standard : %(stdout)r\n" -"Sortie d'erreur : %(stderr)r" -#: cinder/exception.py:94 -msgid "DB exception wrapped." +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:155 -msgid "An unknown exception occurred." -msgstr "Une exception inconnue s'est produite." - -#: cinder/exception.py:178 -msgid "Failed to decrypt text" -msgstr "Échec du décryptage du text" - -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" -msgstr "Échec de la pagination à travers les images depuis le service d'image" - -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" -msgstr "La création de l'Interface Virtuelle a échoué" - -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" msgstr "" -"Les 5 tentatives de création de l'interface virtuelle avec une adresse " -"MAC unique ont échoué" - -#: cinder/exception.py:195 -msgid "Connection to glance failed" -msgstr "La connexion à Glance a échoué" -#: cinder/exception.py:199 -msgid "Connection to melange failed" -msgstr "La connexion à Melange a échoué" - -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "Non autorisé." -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "L’utilisateur n'a pas les privilèges administrateur" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Le réglage des droits n'autorise pas %(action)s à être effectué(e)(s)" -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, fuzzy, python-format msgid "Not authorized for image %(image_id)s." msgstr "Pas de méthode pour le message : %s" -#: cinder/exception.py:220 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "Paramètres inacceptables." -#: cinder/exception.py:225 -msgid "Invalid snapshot" -msgstr "Snapshot invalide" +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:154 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" -msgstr "Le volume %(volume_id)s n'est lié à rien" +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" -msgstr "La paire de clés de donnée n'est pas valide" +#: cinder/exception.py:159 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Le volume %(volume_id)s n'est lié à rien" -#: cinder/exception.py:237 +#: cinder/exception.py:163 msgid "Failed to load data into json format" msgstr "Échec du chargement des données au format JSON" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:167 +#, fuzzy +msgid "The results are invalid." msgstr "La requête est invalide." -#: cinder/exception.py:245 -#, python-format -msgid "Invalid signature %(signature)s for user %(user)s." -msgstr "La signature %(signature)s est invalide pour l'utilisateur %(user)s." - -#: cinder/exception.py:249 -msgid "Invalid input received" -msgstr "Entrée invalide reçue" - -#: cinder/exception.py:253 +#: cinder/exception.py:171 #, python-format -msgid "Invalid instance type %(instance_type)s." -msgstr "L'instance de type %(instance_type)s est invalide." - -#: cinder/exception.py:257 -msgid "Invalid volume type" -msgstr "Type de volume invalide" - -#: cinder/exception.py:261 -msgid "Invalid volume" -msgstr "Volume invalide" +msgid "Invalid input received: %(reason)s" +msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:175 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" -msgstr "La plage de port %(from_port)s:%(to_port)s. %(msg)s est invalide" +msgid "Invalid volume type: %(reason)s" +msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:179 #, python-format -msgid "Invalid IP protocol %(protocol)s." -msgstr "Le protocole IP %(protocol)s est invalide" +msgid "Invalid volume: %(reason)s" +msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:183 #, python-format msgid "Invalid content type %(content_type)s." msgstr "Le type de contenu %(content_type)s est invalide" -#: cinder/exception.py:277 +#: cinder/exception.py:187 #, python-format -msgid "Invalid cidr %(cidr)s." -msgstr "Le cidr %(cidr)s est invalide" - -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." -msgstr "Réutilisation invalide d'une connexion RPC" - -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format msgid "%(err)s" msgstr "%(err)s" -#: cinder/exception.py:296 +#: cinder/exception.py:197 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." -msgstr "" -"Impossible d'effectuer l'action '%(action)s' sur l'ensemble " -"%(aggregate_id)s. Raison: %(reason)s." - -#: cinder/exception.py:301 -#, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:201 #, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -"L'instance %(instance_uuid)s dans %(attr)s %(state)s. Impossible de " -"%(method)s pendant que l'instance est dans cet état." - -#: cinder/exception.py:310 -#, python-format -msgid "Instance %(instance_id)s is not running." -msgstr "L'instance %(instance_id)s ne fonctionne pas." - -#: cinder/exception.py:314 -#, python-format -msgid "Instance %(instance_id)s is not suspended." -msgstr "L'instance %(instance_id)s n'est pas suspendue" - -#: cinder/exception.py:318 -#, python-format -msgid "Instance %(instance_id)s is not in rescue mode" -msgstr "L'instance %(instance_id)s n'est pas en mode secours" - -#: cinder/exception.py:322 -msgid "Failed to suspend instance" -msgstr "Échec de la suspension de l'instance" - -#: cinder/exception.py:326 -msgid "Failed to resume server" -msgstr "Échec de la reprise du serveur" - -#: cinder/exception.py:330 -msgid "Failed to reboot instance" -msgstr "Échec du redémarrage de l'instance" - -#: cinder/exception.py:334 -#, fuzzy -msgid "Failed to terminate instance" -msgstr "Échec du redémarrage de l'instance" -#: cinder/exception.py:338 +#: cinder/exception.py:206 msgid "Service is unavailable at this time." msgstr "Le service est indisponible actuellement." -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." -msgstr "Le service de volume est indisponible actuellement." - -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." -msgstr "Le service de calcul est indisponible actuellement." - -#: cinder/exception.py:350 -#, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." -msgstr "" -"Impossible de migrer l'instance (%(instance_id)s) vers l'hôte actuel " -"(%(host)s)." - -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." -msgstr "L'hôte destinataire de calcul est indisponible actuellement." - -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." -msgstr "L'hôte original de calcul est indisponible actuellement." - -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." -msgstr "Le type de l'hyperviseur fourni n'est pas valide." - -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." -msgstr "" -"L'instance nécessite une version plus récente de l'hyperviseur que celle " -"fournie." - -#: cinder/exception.py:372 +#: cinder/exception.py:210 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:377 +#: cinder/exception.py:214 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:381 +#: cinder/exception.py:218 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:389 +#: cinder/exception.py:228 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "Volume %(volume_id)s could not be found." msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:232 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:237 #, python-format msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:242 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:246 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:250 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:255 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." -msgstr "" - -#: cinder/exception.py:422 -msgid "Resource could not be found." +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:427 +#: cinder/exception.py:259 #, python-format -msgid "Required flag %(flag)s not set." +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:264 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:269 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:440 +#: cinder/exception.py:274 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:278 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "" - -#: cinder/exception.py:449 -msgid "Zero volume types found." +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:282 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:457 +#: cinder/exception.py:287 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:291 #, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:295 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:299 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:475 +#: cinder/exception.py:303 #, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:307 #, python-format -msgid "No target id found for volume %(volume_id)s." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:484 +#: cinder/exception.py:311 #, python-format -msgid "No disk at %(location)s" +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:315 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:319 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:496 +#: cinder/exception.py:323 +#, python-format msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +"Change would make usage less than 0 for the following resources: " +"%(unders)s" msgstr "" -#: cinder/exception.py:501 -#, python-format -msgid "Image %(image_id)s could not be found." +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:505 +#: cinder/exception.py:332 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:509 +#: cinder/exception.py:336 #, python-format -msgid "User %(user_id)s could not be found." +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:513 +#: cinder/exception.py:340 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:517 +#: cinder/exception.py:344 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:348 #, python-format -msgid "Role %(role_id)s could not be found." +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." -msgstr "" - -#: cinder/exception.py:529 +#: cinder/exception.py:352 #, python-format -msgid "%(req)s is required to create a network." +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:356 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:537 -#, python-format -msgid "Network could not be found for bridge %(bridge)s" -msgstr "" +#: cinder/exception.py:365 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "le groupe %s existe déjà" -#: cinder/exception.py:541 +#: cinder/exception.py:369 #, python-format -msgid "Network could not be found for uuid %(uuid)s" +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:373 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Malformed message body: %(reason)s" msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:377 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "Could not find config at %(path)s" msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." -msgstr "" +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" -#: cinder/exception.py:557 +#: cinder/exception.py:385 #, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -#: cinder/exception.py:562 +#: cinder/exception.py:389 #, python-format -msgid "Host is not set to the network (%(network_id)s)." +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:566 +#: cinder/exception.py:398 #, python-format -msgid "Network %(network)s has active ports, cannot delete." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:409 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:415 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:419 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:423 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:427 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:432 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:599 +#: cinder/exception.py:436 #, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:440 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:444 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" msgstr "" -#: cinder/exception.py:620 +#: cinder/exception.py:457 #, python-format -msgid "Floating ip not found for id %(id)s." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:461 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:465 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:632 -#, python-format -msgid "Floating ip not found for host %(host)s." +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:644 +#: cinder/exception.py:485 #, python-format -msgid "Floating ip %(address)s is not associated." +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:652 -#, python-format -msgid "Interface %(interface)s not found." -msgstr "" +#: cinder/exception.py:493 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Backend invalide : %s" -#: cinder/exception.py:656 +#: cinder/exception.py:497 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +msgid "Connection to swift failed: %(reason)s" msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:501 #, python-format -msgid "Certificate %(certificate_id)s not found." +msgid "Transfer %(transfer_id)s could not be found." msgstr "" -#: cinder/exception.py:664 +#: cinder/exception.py:505 #, python-format -msgid "Service %(service_id)s could not be found." +msgid "Volume migration failed: %(reason)s" msgstr "" -#: cinder/exception.py:668 +#: cinder/exception.py:509 #, python-format -msgid "Host %(host)s could not be found." +msgid "SSH command injection detected: %(command)s" msgstr "" -#: cinder/exception.py:672 +#: cinder/exception.py:513 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" -#: cinder/exception.py:676 +#: cinder/exception.py:517 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:680 +#: cinder/exception.py:522 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:684 +#: cinder/exception.py:527 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:692 +#: cinder/exception.py:536 #, python-format -msgid "Quota for project %(project_id)s could not be found." +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:696 +#: cinder/exception.py:541 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:700 +#: cinder/exception.py:546 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:704 +#: cinder/exception.py:550 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:709 +#: cinder/exception.py:554 #, python-format -msgid "Security group with rule %(rule_id)s not found." +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:713 -#, python-format -msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:718 -#, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:723 -#, python-format -msgid "Migration %(migration_id)s could not be found." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:732 +#: cinder/exception.py:576 #, python-format -msgid "Console pool %(pool_id)s could not be found." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:736 +#: cinder/exception.py:580 #, python-format -msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:742 +#: cinder/exception.py:584 #, python-format -msgid "Console %(console_id)s could not be found." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:746 -#, python-format -msgid "Console for instance %(instance_id)s could not be found." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:750 -#, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +#: cinder/exception.py:593 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" msgstr "" +"Impossible de créer VDI sur SR %(sr_ref)s pour l'instance " +"%(instance_name)s" -#: cinder/exception.py:755 +#: cinder/exception.py:597 #, python-format -msgid "Invalid console type %(console_type)s " +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:763 +#: cinder/exception.py:605 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:767 +#: cinder/exception.py:609 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:772 +#: cinder/exception.py:613 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:776 -#, python-format -msgid "Cell %(cell_id)s could not be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:780 -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:784 +#: cinder/exception.py:626 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:789 +#: cinder/exception.py:630 #, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:793 +#: cinder/exception.py:636 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:798 -#, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" +#: cinder/exception.py:641 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Une exception inconnue s'est produite." -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:807 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:811 -#, python-format -msgid "LDAP group %(group_id)s could not be found." -msgstr "" +#: cinder/exception.py:654 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Une exception inconnue s'est produite." -#: cinder/exception.py:815 -#, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:819 -#, python-format -msgid "File %(file_path)s could not be found." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:827 -#, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:832 +#: cinder/quota.py:105 #, python-format -msgid "Network adapter %(adapter)s could not be found." +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:836 +#: cinder/quota.py:748 #, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:844 +#: cinder/quota.py:790 #, python-format -msgid "Unable to use global role %(role_id)s" +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:861 +#: cinder/quota_utils.py:46 #, python-format -msgid "Key pair %(key_name)s already exists." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:865 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "User %(user)s already exists." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:869 +#: cinder/service.py:95 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Démarrage du noeud %(topic)s (version %(vcs_string)s)" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:873 +#: cinder/service.py:148 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:877 -#, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" -msgstr "L'utilisateur %(uid)s est déjà membre du groupe %(group_dn)s" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "Service détruit sans entrée dans la base de données" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "L'objet du service de base de données à disparru, re-création en cours." + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "Récupération du modelle de connexion serveur terminée!" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "Le modèle de serveur à disparu" -#: cinder/exception.py:882 +#: cinder/service.py:298 #, python-format -msgid "Project %(project)s already exists." +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:886 -#, python-format -msgid "Instance %(name)s already exists." +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:890 +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Ensemble de propriétés complet :" + +#: cinder/service.py:387 #, python-format -msgid "Instance Type %(name)s already exists." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:894 +#: cinder/utils.py:96 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:898 +#: cinder/utils.py:127 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:906 +#: cinder/utils.py:228 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Connexion à libvirt: %s" + +#: cinder/utils.py:412 #, python-format -msgid "Malformed message body: %(reason)s" -msgstr "" +msgid "Invalid backend: %s" +msgstr "Backend invalide : %s" -#: cinder/exception.py:910 +#: cinder/utils.py:423 #, python-format -msgid "Could not find config at %(path)s" -msgstr "" +msgid "backend %s" +msgstr "backend %s" -#: cinder/exception.py:914 +#: cinder/utils.py:698 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" -msgstr "" +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Impossible de trouver SR du VDB %s" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" -msgstr "" +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Impossible de trouver SR du VDB %s" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." -msgstr "" +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Impossible de trouver SR du VDB %s" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:938 +#: cinder/wsgi.py:169 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:946 -#, python-format -msgid "No valid host was found. %(reason)s" +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:950 -#, python-format -msgid "Host %(host)s is not up or doesn't exist." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "Vous devez implémenter __call__" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:958 -#, python-format +#: cinder/api/auth.py:34 msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:963 -#, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:967 -#, python-format -msgid "Aggregate %(aggregate_name)s already exists." +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:971 -#, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:975 -#, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:980 +#: cinder/api/common.py:162 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:984 +#: cinder/api/common.py:189 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:988 -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:992 +#: cinder/api/extensions.py:197 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:1005 +#: cinder/api/extensions.py:237 #, python-format -msgid "Error in SolidFire API response: status=%(status)s" +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:1009 +#: cinder/api/extensions.py:239 #, python-format -msgid "Error in SolidFire API response: data=%(data)s" +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:1013 +#: cinder/api/extensions.py:240 #, python-format -msgid "Detected existing vlan with id %(vlan)d" +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:1017 +#: cinder/api/extensions.py:242 #, python-format -msgid "Instance %(instance_id)s could not be found." +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:1021 +#: cinder/api/extensions.py:256 #, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:1025 -#, fuzzy, python-format -msgid "Could not fetch image %(image)s" -msgstr "Impossible de lier l'image au loopback : %s" - -#: cinder/log.py:315 +#: cinder/api/extensions.py:262 #, python-format -msgid "syslog facility must be one of: %s" +msgid "Calling extension factory %s" msgstr "" -#: cinder/manager.py:146 +#: cinder/api/extensions.py:276 #, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/manager.py:152 +#: cinder/api/extensions.py:278 #, python-format -msgid "Running periodic task %(full_task_name)s" +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/manager.py:159 +#: cinder/api/extensions.py:287 #, python-format -msgid "Error during %(full_task_name)s: %(e)s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/service.py:177 -#, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" -msgstr "Démarrage du noeud %(topic)s (version %(vcs_string)s)" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" -#: cinder/service.py:195 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" -msgstr "Service détruit sans entrée dans la base de données" - -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." -msgstr "L'objet du service de base de données à disparru, re-création en cours." - -#: cinder/service.py:334 -msgid "Recovered model server connection!" -msgstr "Récupération du modelle de connexion serveur terminée!" - -#: cinder/service.py:340 -msgid "model server went away" -msgstr "Le modèle de serveur à disparu" - -#: cinder/service.py:433 -msgid "Full set of FLAGS:" -msgstr "Ensemble de propriétés complet :" +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" -#: cinder/service.py:440 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/utils.py:79 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Inner Exception: %s" -msgstr "Exception interne : %s" +msgid "show called for member %s" +msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" -msgstr "Récupèration de %s" +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "Clef secrète changée pour l'utilisateur %s" -#: cinder/utils.py:210 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "Got unknown keyword args to utils.execute: %r" +msgid "Delete backup with id: %s" msgstr "" -#: cinder/utils.py:220 +#: cinder/api/contrib/backups.py:185 #, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Execution de la commande (sous-processus) : %s" +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Result was %s" -msgstr "Le résultat était %s" +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" -#: cinder/utils.py:249 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "%r failed. Retrying." +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/utils.py:291 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Running cmd (SSH): %s" -msgstr "Execution de la cmd (SSH): %s" +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/utils.py:352 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "debug in callback: %s" -msgstr "Debug dans le rappel : %s" +msgid "Host '%s' could not be found." +msgstr "" -#: cinder/utils.py:534 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Link Local address is not found.:%s" -msgstr "L'adresse du lien local n'a pas été trouvé :%s" +msgid "Invalid status: '%s'" +msgstr "" -#: cinder/utils.py:537 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" -msgstr "Impossible de trouver l'IP du lien local de %(interface)s :%(ex)s" +msgid "Invalid update setting: '%s'" +msgstr "" -#: cinder/utils.py:648 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Invalid backend: %s" -msgstr "Backend invalide : %s" +msgid "Setting host %(host)s to %(state)s." +msgstr "" -#: cinder/utils.py:659 -#, python-format -msgid "backend %s" -msgstr "backend %s" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/utils.py:942 -#, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/utils.py:1169 -#, python-format -msgid "Invalid server_string: %s" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/utils.py:1298 +#: cinder/api/contrib/quotas.py:111 #, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" msgstr "" -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/utils.py:1495 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -#: cinder/utils.py:1497 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Function %(name)s in %(location)s is deprecated" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -#: cinder/wsgi.py:97 -#, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/wsgi.py:117 -#, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" -msgstr "Vous devez implémenter __call__" - -#: cinder/api/direct.py:218 -msgid "not available" -msgstr "non disponible" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" -#: cinder/api/direct.py:299 -#, python-format -msgid "Returned non-serializeable type: %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/api/validator.py:142 -#, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/api/ec2/__init__.py:95 -#, python-format -msgid "FaultWrapper: %s" +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." -msgstr "Trop d'erreur d'authentification" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" -#: cinder/api/ec2/__init__.py:180 -#, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -"La clef d'accès %(access_key)s a rencontrée %(failures)d echecs " -"d'authentification et sera par conséquent vérouillée pour %(lock_mins)d " -"minutes." -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/api/ec2/__init__.py:388 -#, python-format -msgid "Authentication Failure: %s" -msgstr "Echec d'authentification : %s" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" -#: cinder/api/ec2/__init__.py:404 -#, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" -msgstr "Requête authentifiée pour : %(uname)s:%(pname)s)" +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" -#: cinder/api/ec2/__init__.py:435 -#, python-format -msgid "action: %s" -msgstr "action: %s" +#: cinder/api/contrib/volume_transfer.py:147 +#, fuzzy, python-format +msgid "Creating new volume transfer %s" +msgstr "Création d'un volume de %s Go" -#: cinder/api/ec2/__init__.py:437 -#, python-format -msgid "arg: %(key)s\t\tval: %(value)s" -msgstr "arg: %(key)s\t\tval: %(value)s" +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "Tentative de suppression d'une console non existente %(console_id)s." -#: cinder/api/ec2/__init__.py:512 +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgid "Accepting volume transfer %s" msgstr "" -"Requête non authorisé pour le controlleur=%(controller)s et " -"l'action=%(action)s" -#: cinder/api/ec2/__init__.py:584 -#, python-format -msgid "InstanceNotFound raised: %s" -msgstr "\"Instance non trouvée\" remontée : %s" +#: cinder/api/contrib/volume_transfer.py:196 +#, fuzzy, python-format +msgid "Accepting transfer %s" +msgstr "Création d'un volume de %s Go" -#: cinder/api/ec2/__init__.py:590 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "VolumeNotFound raised: %s" -msgstr "\"Volume non trouvé\" remonté : %s" +msgid "Delete transfer with id: %s" +msgstr "" -#: cinder/api/ec2/__init__.py:596 -#, python-format -msgid "SnapshotNotFound raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" -msgstr "\"Non trouvé\" remonté : %s" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" -#: cinder/api/ec2/__init__.py:605 -#, python-format -msgid "EC2APIError raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/api/ec2/__init__.py:613 -#, python-format -msgid "KeyPairExists raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/api/ec2/__init__.py:617 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "InvalidParameterValue raised: %s" +msgid "Valid control location are: %s" msgstr "" -#: cinder/api/ec2/__init__.py:621 -#, python-format -msgid "InvalidPortRange raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/api/ec2/__init__.py:625 -#, python-format -msgid "NotAuthorized raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/api/ec2/__init__.py:633 -#, fuzzy, python-format -msgid "QuotaError raised: %s" -msgstr "\"Erreur inopinée\" remontée : %s" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" -#: cinder/api/ec2/__init__.py:637 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" -msgstr "" +msgid "Caught error: %s" +msgstr "Erreur interceptée : %s" -#: cinder/api/ec2/__init__.py:646 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Unexpected error raised: %s" -msgstr "\"Erreur inopinée\" remontée : %s" +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" -#: cinder/api/ec2/__init__.py:647 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "Environment: %s" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." -msgstr "Une erreur inopinée à eu lieu. Merci d'essayer votre requête à nouveau." +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" -#: cinder/api/ec2/apirequest.py:64 -#, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -"Requête API non supportée : controleur = %(controller)s, action = " -"%(action)s" -#: cinder/api/ec2/cloud.py:336 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "Create snapshot of volume %s" +msgid "Extended resource: %s" msgstr "" -#: cinder/api/ec2/cloud.py:372 +#: cinder/api/openstack/__init__.py:104 #, python-format msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/api/ec2/cloud.py:378 +#: cinder/api/openstack/__init__.py:110 #, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/api/ec2/cloud.py:382 -#, python-format -msgid "Create key pair %s" -msgstr "Création du bi-clef %s" - -#: cinder/api/ec2/cloud.py:391 -#, python-format -msgid "Import key %s" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/api/ec2/cloud.py:409 -#, python-format -msgid "Delete key pair %s" -msgstr "Suppression du bi-clef %s" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" -#: cinder/api/ec2/cloud.py:551 -#, fuzzy, python-format -msgid "Invalid CIDR" -msgstr "Le cidr %(cidr)s est invalide" +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "Revoke security group ingress %s" -msgstr "Révocation de groupe de sécurité %s" - -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, fuzzy, python-format -msgid "%s Not enough parameters to build a valid rule" -msgstr "Pas assez de parametres pour contruire un règle valide." - -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." -msgstr "Pas de règle pour les paramètres spécifiés." +msgid "Exception handling resource: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/api/openstack/wsgi.py:682 #, python-format -msgid "Authorize security group ingress %s" -msgstr "Authorisation du groupe de sécurité %s" +msgid "Fault thrown: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:725 -#, fuzzy, python-format -msgid "%s - This rule already exists in group" -msgstr "Cette règle existe déjà dans le groupe %s" - -#: cinder/api/ec2/cloud.py:769 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/ec2/cloud.py:776 -#, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" -msgstr "Création du groupe de sécurité %s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" -#: cinder/api/ec2/cloud.py:783 -#, python-format -msgid "group %s already exists" -msgstr "le groupe %s existe déjà" +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "Delete security group %s" -msgstr "Suppression du groupe de sécurité %s" +msgid "There is no such action: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 -#, python-format -msgid "Get console output for instance %s" -msgstr "Récupération de la sortie de la console de l'instance %s" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" -#: cinder/api/ec2/cloud.py:894 -#, python-format -msgid "Create volume from snapshot %s" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "Create volume of %s GB" -msgstr "Création d'un volume de %s Go" +msgid "%(url)s returned a fault: %(e)s" +msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant " -"que %(device)s" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instance %s: création d'un instantané (snapshot)" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Detach volume %s" -msgstr "Dé-montage du volume %s" +msgid "Delete snapshot with id: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:959 -#, fuzzy, python-format -msgid "Detach Volume Failed." -msgstr "Dé-montage du volume %s" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "attribute not supported: %s" -msgstr "attribut non reconnu : %s" +msgid "Create snapshot from volume %s" +msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "vol = %s\n" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" -msgstr "Allocation d'adresse" +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +#, fuzzy +msgid "volume does not exist" +msgstr "Le groupe de volume %s n'existe pas" -#: cinder/api/ec2/cloud.py:1267 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "Release address %s" -msgstr "Désallocation de l'adresse %s" +msgid "vol=%s" +msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" -msgstr "Association de l'adresse %(public_ip)s avec l'instance %(instance_id)s" +msgid "Delete volume with id: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:1282 -#, python-format -msgid "Disassociate address %s" -msgstr "Désassociation de l'adresse %s" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" -msgstr "Début de la destruction d'instance" +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "Reboot instance %r" -msgstr "Re-démarrage de l'instance %r" +msgid "Create volume of %s GB" +msgstr "Création d'un volume de %s Go" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "De-registering image %s" -msgstr "Dé-enregitrement de l'image %s" - -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/cloud.py:1490 -#, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" -msgstr "Image %(image_location)s enregistré avec l'id %(image_id)s" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" -msgstr "Utilisateur ou groupe non spécifié" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" -msgstr "Seul le group \"tous\" est supporté" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" msgstr "" -"le type d'opération (operation_type) doit être ajout (add) ou suppression" -" (remove)" -#: cinder/api/ec2/cloud.py:1542 -#, python-format -msgid "Updating image %s publicity" -msgstr "Mis à jour de la publication de l'image %s" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 -#, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Le status du volume doit être disponible" -#: cinder/api/openstack/__init__.py:43 -#, python-format -msgid "Caught error: %s" -msgstr "Erreur interceptée : %s" +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Le status du volume doit être disponible" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "" +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "Le status du volume doit être disponible" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/api.py:154 #, python-format -msgid "Extended resource: %s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Le status du volume doit être disponible" + +#: cinder/backup/api.py:176 #, python-format msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/api.py:181 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/openstack/auth.py:90 -#, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/manager.py:100 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" -msgstr "" - -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/manager.py:107 #, python-format -msgid "Could not find %s in request." +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/manager.py:117 #, python-format -msgid "Successfully authenticated '%s'" +msgid "Manager requested for volume_backend '%s'." msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/manager.py:123 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" -msgstr "" - -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/manager.py:158 #, python-format -msgid "marker [%s] not found" +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/manager.py:165 #, python-format -msgid "href %s does not contain version" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/manager.py:189 #, python-format -msgid "Converting nw_info: %s" +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/manager.py:194 #, python-format -msgid "Converted networks: %s" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/manager.py:206 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/manager.py:212 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/backup/manager.py:237 #, python-format -msgid "Loaded extension: %s" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/manager.py:249 #, python-format -msgid "Ext name: %s" +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/backup/manager.py:282 #, python-format -msgid "Ext alias: %s" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/manager.py:286 #, python-format -msgid "Ext description: %s" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/manager.py:299 #, python-format -msgid "Ext namespace: %s" +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/manager.py:310 #, python-format -msgid "Ext updated: %s" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/manager.py:329 #, python-format -msgid "Exception loading extension: %s" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/manager.py:360 #, python-format -msgid "Loading extension %s" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/backup/manager.py:379 #, python-format -msgid "Calling extension factory %s" +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/manager.py:386 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/manager.py:399 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/manager.py:422 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "Exception handling resource: %s" +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "Fault thrown: %s" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "HTTP exception thrown: %s" +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "There is no such action: %s" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:389 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/ceph.py:555 #, python-format msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "Invalid server status: %(status)s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "Bad personality format: missing %s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:550 -#, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/ceph.py:846 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:566 -#, python-format -msgid "Duplicate networks (%s) are not allowed" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "Bad network format: missing %s" -msgstr "" - -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/ceph.py:1023 #, python-format -msgid "Error in confirm-resize %s" +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "Error in revert-resize %s" +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/ceph.py:1039 #, python-format -msgid "Error in reboot %s" +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/swift.py:146 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/swift.py:234 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "La requête est invalide." + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/backup/drivers/swift.py:301 #, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pause %s" +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/backup/drivers/swift.py:304 #, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::unpause %s" +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 -#, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::suspend %s" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" +msgid "backup %s finished." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/backup/drivers/swift.py:345 #, python-format -msgid "Error in migrate %s" +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/backup/drivers/swift.py:350 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "Compute.api::reset_network %s" +msgid "metadata_object_names = %s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/backup/drivers/swift.py:362 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/backup/drivers/swift.py:378 #, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::lock %s" +msgid "decompressing data using %s algorithm" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/backup/drivers/swift.py:401 #, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::unlock %s" +msgid "v1 swift volume backup restore of %s finished" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/backup/drivers/swift.py:409 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/backup/drivers/swift.py:455 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/backup/drivers/tsm.py:85 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/backup/drivers/tsm.py:143 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/backup/drivers/tsm.py:173 #, python-format -msgid "Aggregates does not have %s action" +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/backup/drivers/tsm.py:199 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/backup/drivers/tsm.py:206 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/backup/drivers/tsm.py:213 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/backup/drivers/tsm.py:286 +#, python-format msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/backup/drivers/tsm.py:338 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/backup/drivers/tsm.py:421 #, python-format -msgid "No more floating ips in pool %s." +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/exception.py:97 #, python-format -msgid "Invalid status: '%s'" +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 #, fuzzy, python-format -msgid "Invalid mode: '%s'" -msgstr "Backend invalide : %s" +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Tentative de suppression d'une console non existente %(console_id)s." + +#: cinder/brick/exception.py:109 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Tentative de suppression d'une console non existente %(console_id)s." -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/exception.py:113 #, python-format -msgid "Invalid update setting: '%s'" +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 +#: cinder/brick/exception.py:117 #, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/initiator/connector.py:127 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "Key pair '%s' already exists." +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "Unable to find address %r" +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "Network does not have %s action" +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "Disassociating network with id %s" +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Showing network with id %s" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Deleting network with id %s" +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -#, fuzzy, python-format -msgid "Security group is still in use" -msgstr "Révocation de groupe de sécurité %s" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 -#, python-format -msgid "Security group %s already exists" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "Security group %s is not a string or unicode" +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/initiator/linuxscsi.py:145 #, python-format -msgid "Security group %s cannot be empty." +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#: cinder/brick/initiator/linuxscsi.py:149 #, python-format -msgid "Security group %s should not be greater than 255 characters." +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "Security group (%s) not found" +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." -msgstr "Pas assez de parametres pour contruire un règle valide." - -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/iscsi/iscsi.py:177 #, python-format -msgid "This rule already exists in group %s" -msgstr "Cette règle existe déjà dans le groupe %s" - -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" -msgstr "" - -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#: cinder/brick/iscsi/iscsi.py:184 #, python-format -msgid "Rule (%s) not found" +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" -msgstr "" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Tentative de suppression d'une console non existente %(console_id)s." -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "start instance %r" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "stop instance %r" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 -#, python-format -msgid "vol=%s" +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Tentative de suppression d'une console non existente %(console_id)s." + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Delete volume with id: %s" +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 -#, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/brick/iscsi/iscsi.py:489 #, python-format -msgid "Delete snapshot with id: %s" +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 #, python-format -msgid "Create snapshot from volume %s" +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" -msgstr "Tentative d'instanciation d'un singleton" +#: cinder/brick/iscsi/iscsi.py:532 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Redémarrage de l'instance %s" -#: cinder/auth/ldapdriver.py:650 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -"Tentative de suppression du dernier membre d'un groupe. Essayez plutôt " -"de supprimer le group sur %s." -#: cinder/auth/manager.py:298 +#: cinder/brick/iscsi/iscsi.py:571 #, python-format -msgid "Looking up user: %r" -msgstr "Recherche de l'utilisateur : %r" +msgid "Failed to add initiator iqn %s to target" +msgstr "" -#: cinder/auth/manager.py:302 -#, python-format -msgid "Failed authorization for access key %s" -msgstr "Autorisation refusée pour la clef d'accès %s" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Using project name = user name (%s)" -msgstr "Nom de projet utilisé = nom d'utilisateur (%s)" +msgid "Cmd :%s" +msgstr "" -#: cinder/auth/manager.py:315 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgid "StdOut :%s" msgstr "" -"Autorisation refusée : pas de projet nommé %(pjid)s " -"(utilisateur=%(uname)s)" -#: cinder/auth/manager.py:324 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +msgid "StdErr :%s" msgstr "" -"Autorisation refusée : utilisateur %(uname)s n'est ni admin ni membre du " -"projet %(pjname)s" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 -#, python-format -msgid "expected_signature: %s" +#: cinder/brick/local_dev/lvm.py:370 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" -msgstr "Signature non valide pour l'utilisateur %s" +#: cinder/brick/local_dev/lvm.py:489 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Impossible de trouver SR du VDB %s" -#: cinder/auth/manager.py:353 -#, python-format -msgid "host_only_signature: %s" +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" -msgstr "Le projet doit être spécifié" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" -#: cinder/auth/manager.py:490 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" -msgstr "Ajout du rôle %(role)s à l'utilisateur %(uid)s pour le projet %(pid)s" +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" -msgstr "Ajout du rôle global %(role)s pour l'utilisateur %(uid)s" +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" -#: cinder/auth/manager.py:519 -#, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -"Suppression du rôle %(role)s pour l'utilisateur %(uid)s dans le projet " -"%(pid)s" -#: cinder/auth/manager.py:522 -#, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" -msgstr "Suppression du role global %(role)s pour l'utilisateur %(uid)s" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" -#: cinder/auth/manager.py:595 -#, python-format -msgid "Created project %(name)s with manager %(manager_user)s" -msgstr "Création du projet %(name)s ayant pour manager %(manager_user)s" +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" -#: cinder/auth/manager.py:613 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "modifying project %s" -msgstr "modification du projet %s" +msgid "Already mounted: %s" +msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" -msgstr "Ajout de l'utilisateur %(uid)s au projet %(pid)s" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" -msgstr "Suppression de l'utilisateur %(uid)s du projet %(pid)s" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" -#: cinder/auth/manager.py:676 -#, python-format -msgid "Deleting project %s" -msgstr "Suppression du projet %s" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" -msgstr "Utilisateur créé %(rvname)s (admin: %(rvadmin)r)" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" -#: cinder/auth/manager.py:743 +#: cinder/compute/nova.py:97 #, python-format -msgid "Deleting user %s" -msgstr "Suppression de l'utilisateur %s" +msgid "Novaclient connection created using URL: %s" +msgstr "" -#: cinder/auth/manager.py:753 -#, python-format -msgid "Access Key change for user %s" -msgstr "Clef d'accès changée pour l'utilisateur %s" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "L'utilisation d'une requête de contexte vide est dévalué" -#: cinder/auth/manager.py:755 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "Secret Key change for user %s" -msgstr "Clef secrète changée pour l'utilisateur %s" +msgid "Unrecognized read_deleted value '%s'" +msgstr "" -#: cinder/auth/manager.py:757 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" -msgstr "Statut admin changé en %(admin)r pour l'utilisateur %(uid)s" +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" -#: cinder/auth/manager.py:802 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "No vpn data for project %s" -msgstr "Pas de données VPN pour le projet %s" - -#: cinder/cloudpipe/pipelib.py:46 -#, fuzzy, python-format -msgid "Instance type for vpn instances" -msgstr "L'instance de type %(instance_type)s est invalide." - -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" -msgstr "Réseau à passer à la configuration d'openvpn" - -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" -msgstr "Masque réseau à passer à la configuration d'openvpn" - -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Launching VPN for %s" -msgstr "Démarrage du VPN pour %s" - -#: cinder/compute/api.py:141 -msgid "No compute host specified" +msgid "No backup with id %s" msgstr "" -#: cinder/compute/api.py:144 +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "Le status du volume doit être disponible" + +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "Unable to find host for Instance %s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/compute/api.py:192 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/compute/api.py:203 -#, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/compute/api.py:257 -#, fuzzy, python-format -msgid "Cannot run any more instances of this type." +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -"Quota d'instances dépassé. Vous ne pouvez éxécuter que %s instances de ce" -" type de plus." -#: cinder/compute/api.py:259 -#, fuzzy, python-format -msgid "Can only run %s more instances of this type." +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -"Quota d'instances dépassé. Vous ne pouvez éxécuter que %s instances de ce" -" type de plus." -#: cinder/compute/api.py:261 -#, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" -msgstr "Création d'une instance raw" - -#: cinder/compute/api.py:312 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Table |%s| not created!" msgstr "" -#: cinder/compute/api.py:383 -#, python-format -msgid "Going to run %s instances..." -msgstr "Démarrage de %s instances..." - -#: cinder/compute/api.py:447 -#, python-format -msgid "bdm %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/compute/api.py:474 -#, python-format -msgid "block_device_mapping %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/compute/api.py:591 -#, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/compute/api.py:871 -#, fuzzy, python-format -msgid "Going to try to soft delete instance" -msgstr "Va essayer d'arrêter %s" - -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/compute/api.py:939 -#, fuzzy, python-format -msgid "Going to try to terminate instance" -msgstr "Va essayer d'arrêter %s" - -#: cinder/compute/api.py:977 -#, fuzzy, python-format -msgid "Going to try to stop instance" -msgstr "Va essayer d'arrêter %s" - -#: cinder/compute/api.py:996 -#, fuzzy, python-format -msgid "Going to try to start instance" -msgstr "Va essayer d'arrêter %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" -#: cinder/compute/api.py:1000 -#, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 -#, python-format -msgid "Searching by: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/compute/api.py:1201 -#, python-format -msgid "Image type not recognized %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" msgstr "" -#: cinder/compute/api.py:1377 -#, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "Table |%s| not created" msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "DB error: %s" +msgid "Exception while creating table %s." msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "Instance type %s not found for deletion" +msgid "Column |%s| not created!" msgstr "" -#: cinder/compute/manager.py:138 -#, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: décoration : |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" -#: cinder/compute/manager.py:140 -#, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/compute/manager.py:144 -#, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: vérouillé : |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" -#: cinder/compute/manager.py:146 -#, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: admin : |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" -#: cinder/compute/manager.py:151 -#, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: exécution : |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" -#: cinder/compute/manager.py:155 -#, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "check_instance_lock: ne s'éxécute pas |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" -#: cinder/compute/manager.py:201 -#, python-format -msgid "Unable to load the virtualization driver: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/compute/manager.py:223 -#, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/compute/manager.py:240 -#, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/compute/manager.py:329 -#, python-format -msgid "Setting up bdm %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/manager.py:406 -#, fuzzy, python-format -msgid "Exception encountered while terminating the instance %s" -msgstr "Après l'arrêt d'instances : %s" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/image/image_utils.py:101 #, python-format -msgid "Instance %s not found." +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" -msgstr "L'instance a déjà été crée" - -#: cinder/compute/manager.py:523 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/image/image_utils.py:157 #, python-format msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/manager.py:565 +#: cinder/image/image_utils.py:224 #, python-format -msgid "Instance network_info: |%s|" +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "%(action_str)s instance" +msgid "Not deleting key %s" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/excutils.py:48 #, python-format -msgid "terminating bdm %s" +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/manager.py:811 -#, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/manager.py:816 -#, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/manager.py:823 -#, python-format -msgid "Rebuilding instance %s" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "Rebooting instance %s" -msgstr "Redémarrage de l'instance %s" +msgid "Could not release the acquired lock `%s`" +msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "instance %s: snapshotting" -msgstr "instance %s: création d'un instantané (snapshot)" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/openstack/common/log.py:301 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "%s reçu" + +#: cinder/openstack/common/log.py:402 #, python-format -msgid "Rotating out %d backups" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "Deleting image %s" +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/openstack/common/log.py:623 #, python-format -msgid "Failed to set admin password. Instance %s is not running" +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "Instance %s: Root password set" +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/manager.py:1079 -#, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "instance %s: rescuing" -msgstr "instance %s: récupération" +msgid "Running periodic task %(full_task_name)s" +msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "instance %s: unrescuing" -msgstr "instance %s: dé-récupération" - -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/policy.py:149 #, python-format -msgid "instance %s: migrating" +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/policy.py:163 #, python-format -msgid "instance %s: pausing" -msgstr "instance %s: mise en pause" +msgid "Failed to understand rule %(match)r" +msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "instance %s: unpausing" -msgstr "instance %s: reprise après pause" +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "instance %s: récupération des diagnostiques" +msgid "No handler for matches of kind %s" +msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "instance %s: suspending" -msgstr "instance %s: suspension" +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "instance %s: resuming" -msgstr "instance %s: reprise après suspension" +msgid "Running cmd (subprocess): %s" +msgstr "Execution de la commande (sous-processus) : %s" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "instance %s: locking" -msgstr "instance %s: vérrouillage" +msgid "Result was %s" +msgstr "Le résultat était %s" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "instance %s: unlocking" -msgstr "instance %s: déverrouillage" +msgid "%r failed. Retrying." +msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "instance %s: getting locked state" -msgstr "instance %s: récupération de l'état de vérouillage" +msgid "Running cmd (SSH): %s" +msgstr "Execution de la cmd (SSH): %s" -#: cinder/compute/manager.py:1606 -#, python-format -msgid "instance %s: reset network" -msgstr "instance %s: redémarrage du réseau" +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" -#: cinder/compute/manager.py:1614 -#, python-format -msgid "instance %s: inject network info" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 #, python-format -msgid "network_info to inject: |%s|" +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:1655 -#, python-format -msgid "instance %s: getting vnc console" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:1685 -#, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:1703 -#, python-format -msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Unhandled exception" +msgstr "Exception interne : %s" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:1714 +#: cinder/openstack/common/service.py:337 #, fuzzy, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" -msgstr "L'instance %(instance_id)s n'est pas en mode secours" +msgid "Starting %d workers" +msgstr "adresse de départ" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:1752 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:1756 -#, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "Démontage de volume d'une instance inconnue %s" - -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/versionutils.py:69 #, python-format msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:2075 -msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:2350 +#: cinder/openstack/common/notifier/api.py:145 +#, python-format msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:2472 -#, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" -msgstr "Ajout de console" - -#: cinder/console/manager.py:97 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." -msgstr "Tentative de suppression d'une console non existente %(console_id)s." +msgid "unpacked context: %s" +msgstr "Contexte décompacté : %s" -#: cinder/console/vmrc_manager.py:122 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "Removing console %(console_id)s." -msgstr "" - -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" -msgstr "Reconstruction de la configuration xvp" +msgid "received %s" +msgstr "%s reçu" -#: cinder/console/xvp.py:116 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "Re-wrote %s" -msgstr "Ré-écriture de %s" - -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" -msgstr "Arrêt xvp" - -#: cinder/console/xvp.py:134 -msgid "Starting xvp" -msgstr "Démarrage xvp" +msgid "no method for message: %s" +msgstr "Pas de méthode pour le message : %s" -#: cinder/console/xvp.py:141 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "Error starting xvp: %s" -msgstr "Erreur au démarrage xvp : %s" - -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" -msgstr "Re-démarrage xvp" - -#: cinder/console/xvp.py:146 -msgid "xvp not running..." -msgstr "xvp non actif..." - -#: cinder/consoleauth/manager.py:63 -#, python-format -msgid "Deleting Expired Token: (%s)" -msgstr "" +msgid "No method for message: %s" +msgstr "Pas de méthode pour le message : %s" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/consoleauth/manager.py:79 -#, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" -msgstr "L'utilisation d'une requête de contexte vide est dévalué" - -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "No ComputeNode for %(host)s" -msgstr "" +msgid "MSG_ID is %s" +msgstr "MSG_ID est %s" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "No backend config with id %(sm_backend_id)s" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 -#, python-format -msgid "No sm_flavor called %(sm_flavor)s" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." -msgstr "python-migrate n'est pas installé. Fin d'éxécution." - -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" -msgstr "" +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Une exception inconnue s'est produite." -#: cinder/db/sqlalchemy/session.py:137 +#: cinder/openstack/common/rpc/common.py:104 #, python-format -msgid "SQL connection failed. %s attempts left." -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "Table |%s| not created!" +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "join list for moving mac_addresses |%s|" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" -msgstr "" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "Réutilisation invalide d'une connexion RPC" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/common.py:156 #, python-format msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" -msgstr "" +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Renvoi de l'exception %s à l'appelant" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/impl_kombu.py:477 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/image/glance.py:281 +#: cinder/openstack/common/rpc/impl_kombu.py:499 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/image/glance.py:289 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" -msgstr "" - -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/image/glance.py:410 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/image/s3.py:309 +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 #, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/image/s3.py:328 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/image/s3.py:340 +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 #, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/image/s3.py:353 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/image/s3.py:379 -#, python-format -msgid "Failed to decrypt private key: %s" -msgstr "Impossible de déchiffrer la clef privée : %s" - -#: cinder/image/s3.py:387 -#, python-format -msgid "Failed to decrypt initialization vector: %s" -msgstr "Impossible de déchiffrer le vecteur d'initialisation : %s" - -#: cinder/image/s3.py:398 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" -msgstr "Impossible de déchiffrer le fichier image %(image_file)s: %(err)s" - -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/ipv6/account_identifier.py:42 -#, python-format -msgid "Bad project_id for to_global_ipv6: %s" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/network/linux_net.py:166 -#, python-format -msgid "Attempted to remove chain %s which does not exist" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "Unknown chain: %r" +msgid "Deserializing: %s" msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" -msgstr "" - -#: cinder/network/linux_net.py:694 -#, python-format -msgid "Hupping dnsmasq threw %s" -msgstr "Hupping dnsmasq à renvoyé %s" - -#: cinder/network/linux_net.py:696 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" -msgstr "Pid %d est dépassé, re-démarrage de dnsmasq" +msgid "-> Subscribed to %(subscribe)s" +msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "killing radvd threw %s" -msgstr "La destruction de radvd à renvoyé %s" +msgid "-> bind: %(bind)s" +msgstr "" -#: cinder/network/linux_net.py:758 -#, python-format -msgid "Pid %d is stale, relaunching radvd" -msgstr "Pid %d est dépassé, re-démarrage radvd" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "Starting VLAN inteface %s" -msgstr "Démarrage de l'interface VLAN %s" +msgid "Subscribing to %s" +msgstr "" -#: cinder/network/linux_net.py:999 -#, python-format -msgid "Starting Bridge interface for %s" -msgstr "Démarrage de l'interface de Bridge %s" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" -#: cinder/network/linux_net.py:1142 -#, fuzzy, python-format -msgid "Starting bridge %s " -msgstr "Démarrage de l'interface de Bridge %s" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" -#: cinder/network/linux_net.py:1149 +#: cinder/openstack/common/rpc/impl_zmq.py:267 #, fuzzy, python-format -msgid "Done starting bridge %s" -msgstr "Erreur au démarrage xvp : %s" +msgid "Running func with context: %s" +msgstr "Contexte décompacté : %s" -#: cinder/network/linux_net.py:1167 -#, python-format -msgid "Failed unplugging gateway interface '%s'" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/network/linux_net.py:1170 -#, python-format -msgid "Unplugged gateway interface '%s'" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/network/manager.py:291 -#, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" -msgstr "" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +#, fuzzy +msgid "Registering reactor" +msgstr "Dé-enregitrement de l'image %s" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 -#, python-format -msgid "Interface %(interface)s not found" +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/network/manager.py:315 -#, python-format -msgid "floating IP allocation for instance |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/network/manager.py:353 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "floating IP deallocation for instance |%s|" +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/network/manager.py:386 -#, python-format -msgid "Address |%(address)s| is not allocated" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/network/manager.py:390 -#, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/network/manager.py:402 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/network/manager.py:614 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/network/manager.py:660 +#: cinder/openstack/common/rpc/impl_zmq.py:506 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/network/manager.py:670 -#, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/network/manager.py:778 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" -msgstr "réglage de l'hôte réseau" - -#: cinder/network/manager.py:896 -#, python-format -msgid "network allocations for instance |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/network/manager.py:901 -#, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/network/manager.py:930 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "network deallocation for instance |%s|" +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/network/manager.py:1152 -#, python-format -msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/network/manager.py:1227 -#, python-format -msgid "Unable to release %s because vif doesn't exist." +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/network/manager.py:1244 -#, python-format -msgid "Leased IP |%(address)s|" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/network/manager.py:1248 -#, python-format -msgid "IP %s leased that is not associated" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/network/manager.py:1256 -#, python-format -msgid "IP |%s| leased that isn't allocated" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "%s reçu" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/network/manager.py:1261 -#, python-format -msgid "Released IP |%(address)s|" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/network/manager.py:1265 +#: cinder/openstack/common/rpc/impl_zmq.py:698 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "La requête est invalide." + +#: cinder/openstack/common/rpc/impl_zmq.py:721 #, python-format -msgid "IP %s released that is not associated" +msgid "%(msg)s" msgstr "" -#: cinder/network/manager.py:1268 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "IP %s released that was not leased" -msgstr "IP %s libérée qui n'était pas allouée" +msgid "Sending message(s) to: %s" +msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/network/manager.py:1334 -#, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +msgid "topic is %s." msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/network/manager.py:1423 -#, python-format -msgid "Network must be disassociated from project %s before delete" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" -msgstr "La somme du nombre de réseau et le début de vlan ne peut excéder 4094" +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake n'a pas d'implémentation pour %s" -#: cinder/network/manager.py:1839 +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" -msgstr "" - -#: cinder/network/quantum/client.py:180 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Quantum entity not found: %s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/network/quantum/client.py:210 -#, python-format -msgid "Unable to connect to server. Got error: %s" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." -msgstr "" +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "Doit mettre en oeuvre un calendrier de retrait" + +#: cinder/scheduler/driver.py:82 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "Doit mettre en oeuvre un calendrier de retrait" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/network/quantum/manager.py:301 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "network allocations for instance %s" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +msgid "Filtered %s" msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/scheduler/filter_scheduler.py:276 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "Choosing %s" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Server returned error: %s" +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/scheduler/host_manager.py:294 #, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "No network with net_id = %s" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "No fixed IPs to deallocate for vif %s" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/notifier/list_notifier.py:65 -#, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Returning exception %s to caller" -msgstr "Renvoi de l'exception %s à l'appelant" - -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" -msgstr "Contexte décompacté : %s" +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" -#: cinder/rpc/amqp.py:231 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "received %s" -msgstr "%s reçu" +msgid "FAKE ISCSI: %s" +msgstr "FAUX ISCSI: %s" -#: cinder/rpc/amqp.py:236 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "no method for message: %s" -msgstr "Pas de méthode pour le message : %s" +msgid "FAKE ISER: %s" +msgstr "" -#: cinder/rpc/amqp.py:237 -#, python-format -msgid "No method for message: %s" -msgstr "Pas de méthode pour le message : %s" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" -#: cinder/rpc/amqp.py:321 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Making asynchronous call on %s ..." +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID est %s" +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "Faked command matched %s" msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/rpc/amqp.py:379 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "Sending notification on %s..." +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/tests/test_misc.py:58 #, python-format msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "unrecognized argument %s" msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "Run CLI command: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "Given data: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "Result data: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 -#, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" -msgstr "" +#: cinder/tests/api/contrib/test_backups.py:741 +#, fuzzy +msgid "Invalid input" +msgstr "Snapshot invalide" -#: cinder/rpc/impl_qpid.py:341 -#, python-format -msgid "Unable to connect to AMQP server: %s " -msgstr "" +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Dé-montage du volume %s" -#: cinder/rpc/impl_qpid.py:346 +#: cinder/tests/integrated/api/client.py:32 #, python-format -msgid "Connected to AMQP server on %s" -msgstr "" - -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +msgid "Body: %s" msgstr "" -#: cinder/scheduler/driver.py:89 +#: cinder/tests/integrated/api/client.py:121 #, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" -msgstr "Doit mettre en oeuvre un calendrier de retrait" - -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/transfer/api.py:136 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/transfer/api.py:182 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/transfer/api.py:199 #, python-format -msgid "No host selection for %s defined." +msgid "Volume %s has been transferred." msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/api.py:143 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 -#, python-format -msgid "Filtered %(hosts)s" +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 +#: cinder/volume/api.py:214 #, python-format -msgid "Weighted %(weighted_host)s" +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/scheduler/host_manager.py:144 -#, python-format -msgid "Host filter fails for ignored host %(host)s" +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/api.py:229 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "Searching by: %s" msgstr "" -#: cinder/scheduler/host_manager.py:163 -#, python-format -msgid "Host filter passes for %(host)s" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/scheduler/host_manager.py:272 -#, python-format -msgid "Received %(service_name)s service update from %(host)s." +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Le status du volume doit être disponible" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/scheduler/host_manager.py:323 -#, python-format -msgid "No service for compute ID %s" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/api.py:490 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/api.py:502 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/scheduler/manager.py:159 -#, fuzzy, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." -msgstr "L'instance %(instance_id)s n'est pas suspendue" +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Le status du volume doit être disponible" -#: cinder/scheduler/scheduler_options.py:66 -#, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 -#, python-format -msgid "Could not decode scheduler options: '%(e)s'" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" -msgstr "" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Le status du volume doit être disponible" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/tests/fake_utils.py:72 -#, python-format -msgid "Faking execution of cmd (subprocess): %s" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/api.py:757 #, python-format -msgid "Faked command matched %s" +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/tests/fake_utils.py:96 -#, python-format -msgid "Faked command raised an exception %s" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/tests/fake_utils.py:101 -#, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 -#, python-format -msgid "Running instances: %s" -msgstr "Instance actives : %s" - -#: cinder/tests/test_compute.py:371 -#, python-format -msgid "After terminating instances: %s" -msgstr "Après l'arrêt d'instances : %s" - -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." msgstr "" -#: cinder/tests/test_compute.py:1430 -#, python-format -msgid "After force-killing instances: %s" +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/api.py:842 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" -msgstr "adresse de départ" - -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Target %s allocated" -msgstr "Destination %s allouée" +msgid "Recovering from a failed execute. Try number %s" +msgstr "Récupération après une exécution erronée. Tentative numéro %s" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/driver.py:282 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/tests/test_volume_types.py:58 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "Given data: %s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/tests/test_volume_types.py:59 +#: cinder/volume/driver.py:327 #, python-format -msgid "Result data: %s" +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/driver.py:340 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/driver.py:358 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/volume/driver.py:394 #, python-format -msgid "Quota exceeded: code=%(code)s" +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/driver.py:433 #, python-format -msgid "_create: %s" +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/driver.py:451 #, python-format -msgid "_delete: %s" +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 -#, python-format -msgid "_get: %s" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 -#, python-format -msgid "_get_all: %s" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/driver.py:546 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/driver.py:548 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 -#, python-format -msgid "test_snapshot_create_force: param=%s" -msgstr "" +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 -#, python-format -msgid "test_snapshot_show: resp=%s" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 -#, python-format -msgid "test_snapshot_detail: resp_dict=%s" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" msgstr "" -#: cinder/tests/integrated/test_login.py:31 -#, python-format -msgid "flavor: %s" +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/manager.py:203 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" - -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" -msgstr "" - -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/manager.py:228 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/volume/manager.py:235 #, python-format -msgid "Body: %s" +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/manager.py:244 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" -msgstr "" +msgid "Re-exporting %s volumes" +msgstr "Ré-exportation de %s volumes" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/manager.py:257 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" -msgstr "" - -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/manager.py:264 #, python-format -msgid "Decoding JSON: %s" +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/manager.py:271 #, python-format -msgid "Nested received %(queue)s, %(value)s" -msgstr "Reception par Nested %(queue)s, %(value)s" +msgid "volume %s: skipping export" +msgstr "volume %s : exportation évitée" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/manager.py:273 #, python-format -msgid "Nested return %s" -msgstr "Nested renvoi %s" +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/manager.py:286 #, python-format -msgid "Received %s" -msgstr "%s Reçu" - -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" -msgstr "Échec lors de l'ouverture d'une connexion à l'hyperviseur" +msgid "Resuming delete on volume: %s" +msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 -#, python-format -msgid "Compute_service record created for %s " +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "Compute_service record updated for %s " -msgstr "" +msgid "volume %s: deleting" +msgstr "volume %s: suppression" + +#: cinder/volume/manager.py:380 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Le volume n'est pas local à ce noeud" -#: cinder/virt/firewall.py:130 +#: cinder/volume/manager.py:389 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" -msgstr "Tentative de suppression de filtre pour l'intance %s qui n'est pas filtrée" +msgid "volume %s: removing export" +msgstr "volume %s: suppression de l'exportation" -#: cinder/virt/firewall.py:137 +#: cinder/volume/manager.py:394 #, python-format -msgid "Filters added to instance %s" +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/manager.py:427 #, python-format -msgid "Adding security group rule: %r" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/manager.py:430 #, python-format -msgid "Adding provider rule: %s" +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." -msgstr "" +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: supprimé avec succès" -#: cinder/virt/images.py:92 +#: cinder/volume/manager.py:451 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "snapshot %s: creating" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/manager.py:462 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 +#: cinder/volume/manager.py:490 +#, python-format msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/manager.py:496 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "snapshot %s: deleting" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/manager.py:526 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Cannot delete snapshot %s: snapshot is busy" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 -#, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/manager.py:559 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -"Doit spécifier xenapi_connection_url, xenapi_connection_username " -"(optionel), et xenapi_connection_password pour utiliser " -"connection_type=xenapi" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/manager.py:698 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" +"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant " +"que %(device)s" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/manager.py:760 #, python-format -msgid "Got exception: %s" -msgstr "Reçu exception : %s" - -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/manager.py:807 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" -msgstr "" - -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/manager.py:880 #, python-format -msgid "No such domain (%s)" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/manager.py:909 #, python-format -msgid "Failed power down Bare-metal node %s" -msgstr "" - -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" -msgstr "" - -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/manager.py:921 #, python-format -msgid "No such domain %s" +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/manager.py:940 #, python-format -msgid "Domains: %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/manager.py:976 #, python-format -msgid "Nodes: %s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/virt/baremetal/dom.py:166 -#, python-format -msgid "After storing domains: %s" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/manager.py:1105 #, python-format -msgid "Created new domain: %s" +msgid "volume %s: extended successfully" msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/manager.py:1107 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/virt/baremetal/dom.py:226 -#, python-format -msgid "change_domain_state: to new state %s" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Stored fake domains to the file: %s" -msgstr "" - -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/virt/baremetal/proxy.py:148 -#, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/virt/baremetal/proxy.py:162 -#, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" -msgstr "instance %(instance_name)s: suppression des fichiers d'instance %(target)s" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "instance %s: rebooted" -msgstr "instance %s: re-démarrée" - -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "instance %s: rescued" -msgstr "instance %s: récupérée" - -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +msgid "DB error: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "instance %s: is building" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "instance %s: booted" -msgstr "instance %s: a démarrée" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "instance %s spawned successfully" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "instance %s:not booted" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/utils.py:144 #, python-format -msgid "instance %s: Creating image" -msgstr "instance %s : Création de l'image" - -#: cinder/virt/baremetal/proxy.py:473 -#, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/volume_types.py:130 #, python-format msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -"instance %(inst_name)s : l'erreur d'injection de donné dans l'image " -"%(img_id)s (%(e)s) a été ignorée" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "instance %s: starting toXML method" -msgstr "instance %s: démarrage de la méthode toXML" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "instance %s: finished toXML method" -msgstr "instance %s: fin d'éxécution de la méthode toXML" - -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "#### RLK: cpu_arch = %s " -msgstr "" - -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:216 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" -msgstr "" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Création d'un volume de %s Go" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/virt/baremetal/tilera.py:279 -#, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "unknown disk image handler: %s" -msgstr "" - -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Failed to mount filesystem: %s" -msgstr "Impossible de monter le système de fichier : %s" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "Failed to remove container: %s" +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "User %(username)s not found in password file." +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "unsupported partition: %s" +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "Impossible de lier l'image au loopback : %s" - -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Failed to map partitions: %s" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" -msgstr "Pas de device nbd libre" - -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "qemu-nbd error: %s" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/disk/nbd.py:93 -#, python-format -msgid "nbd device %s did not show up" -msgstr "Device nbd %s n'est pas apparu" - -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Connecting to libvirt: %s" -msgstr "Connexion à libvirt: %s" - -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" -msgstr "Connexion à libvirt interrompue" +msgid "Sending CLI command: '%s'" +msgstr "" -#: cinder/virt/libvirt/connection.py:388 -#, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." -msgstr "" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Erreur imprévue lors de l'éxecution de la commande" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "Deleting instance files %(target)s" +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -#, fuzzy, python-format -msgid "Instance soft rebooted successfully." -msgstr "instance %s: re-démarrée" - -#: cinder/virt/libvirt/connection.py:696 -#, fuzzy -msgid "Failed to soft reboot instance." -msgstr "Échec du redémarrage de l'instance" - -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/eqlx.py:374 #, python-format -msgid "Automatically confirming migration %d" +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/eqlx.py:415 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" -msgstr "data: %(data)r, fpath: %(fpath)r" - -#: cinder/virt/libvirt/connection.py:978 -#, fuzzy -msgid "Guest does not have a console available" -msgstr "L’utilisateur n'a pas les privilèges administrateur" +msgid "Failed to terminate connection to volume %s" +msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/eqlx.py:440 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/glusterfs.py:91 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 -#, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "block_device_list %s" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "Nested renvoi %s" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "'' must be 1, but %d\n" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 +#: cinder/volume/drivers/glusterfs.py:403 #, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +msgid "nova call result: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 -#, python-format -msgid "Timeout migrating for %s. nwfilter not found." +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/glusterfs.py:431 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 -#, python-format -msgid "Instance %s: Starting finish_revert_migration" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." msgstr "" -#: cinder/virt/libvirt/firewall.py:171 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "%s is a valid instance name" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 -#, python-format -msgid "%s has a disk file" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 -#, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "Base file too young to remove: %s" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "Removing base file: %s" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "file already exists at %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "Available shares: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/glusterfs.py:1038 #, python-format msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/gpfs.py:97 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 -#, python-format -msgid "%(id)s (%(base_file)s): image is in use" +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 -#, python-format -msgid "Skipping verification, no base directory at %s" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/gpfs.py:160 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/gpfs.py:169 #, python-format -msgid "Unknown base file: %s" +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "Active base files: %s" +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "Corrupt base files: %s" +msgid "%s is not a directory." msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/gpfs.py:197 #, python-format -msgid "Removable base files: %s" +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" -msgstr "Impossible de trouver un port ouvert" - -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/libvirt/vif.py:99 +#: cinder/volume/drivers/gpfs.py:637 #, python-format -msgid "Ensuring bridge %s" +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "Failed while unplugging vif of instance '%s'" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/libvirt/volume.py:163 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "iSCSI device not found at %s" +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/libvirt/volume.py:166 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "%(text)s: _db_content => %(content)s" -msgstr "%(text)s: _db_content => %(content)s" +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "%s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" -msgstr "" - -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" -msgstr "" - -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/nfs.py:263 #, python-format -msgid "Glance image %s is in killed state" +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid " but size is now %d" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" -msgstr "" +#: cinder/volume/drivers/nfs.py:361 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "le groupe %s existe déjà" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." -msgstr "" - -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/nfs.py:526 #, python-format -msgid "httplib error in %s: " +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Socket error in %s: " +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Type error in %s: " +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/rbd.py:160 #, python-format -msgid "Exception in %s " +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 -#, python-format -msgid "Got total of %s instances" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" -msgstr "" +#: cinder/volume/drivers/rbd.py:210 +#, fuzzy, python-format +msgid "error opening rbd image %s" +msgstr "Erreur au démarrage xvp : %s" -#: cinder/virt/vmwareapi/vmops.py:196 -#, python-format -msgid "Creating VM with the name %s on the ESX host" +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 -#, python-format -msgid "Created VM with the name %s on the ESX host" -msgstr "" +#: cinder/volume/drivers/rbd.py:265 +#, fuzzy +msgid "error connecting to ceph cluster" +msgstr "Connexion à libvirt: %s" -#: cinder/virt/vmwareapi/vmops.py:234 -#, python-format -msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/rbd.py:423 #, python-format -msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/rbd.py:435 #, python-format -msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 -#, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Powering on the VM instance %s" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Powered on the VM instance %s" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 -#, python-format -msgid "Creating Snapshot of the VM instance %s " +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/rbd.py:593 #, python-format -msgid "Uploading image %s" +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 -#, python-format -msgid "Uploaded image %s" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "deleting rbd volume %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 -#, python-format -msgid "Deleted temporary vmdk file %s" +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 -#, python-format -msgid "Rebooting guest OS of VM %s" -msgstr "" +#: cinder/volume/drivers/rbd.py:696 +#, fuzzy, python-format +msgid "connection data: %s" +msgstr "Connexion à libvirt: %s" -#: cinder/virt/vmwareapi/vmops.py:530 -#, python-format -msgid "Rebooted guest OS of VM %s" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 -#, python-format -msgid "Doing hard reboot of VM %s" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 -#, python-format -msgid "Did hard reboot of VM %s" -msgstr "" +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Snapshot invalide" -#: cinder/virt/vmwareapi/vmops.py:548 -#, python-format -msgid "instance - %s not present" -msgstr "" +#: cinder/volume/drivers/rbd.py:724 +#, fuzzy, python-format +msgid "not cloneable: %s" +msgstr "réponse %s" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Powering off the VM %s" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 -#, python-format -msgid "Powered off the VM %s" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 -#, python-format -msgid "Unregistering the VM %s" +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 -#, python-format -msgid "Unregistered the VM %s" +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 -#, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/scality.py:78 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "Suspending the VM %s " +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/sheepdog.py:59 #, python-format -msgid "Suspended the VM %s " -msgstr "" +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog n'est pas actif : %s" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." -msgstr "" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "Sheepdog n'est pas actif" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "Resuming the VM %s" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Resumed the VM %s " -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "Creating directory with path %s" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Created directory with path %s" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Downloading image %s from glance image server" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "réponse %s" + +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "Getting image size for the image %s" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" -msgstr "Fonction non implémentée" +#: cinder/volume/drivers/solidfire.py:398 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "xenapi.fake does not have an implementation for %s" -msgstr "xenapi.fake n'a pas d'implémentation pour %s" +msgid "Failed volume create: %s" +msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "Calling %(localname)s %(impl)s" -msgstr "Appel %(localname)s %(impl)s" +msgid "More than one valid preset was detected, using %s" +msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "Calling getter %s" -msgstr "Appel du getter %s" +msgid "Failed to get SolidFire Volume: %s" +msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -"xenapi.fake n'a pas d'implementation pour %s ou il a été appelé avec le " -"mauvais nombre d'arguments" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 -#, python-format -msgid "Found non-unique network for name_label %s" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "Found non-unique network for bridge %s" -msgstr "Réseau non unique trouvé pour le bridge %s" +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Found no network for bridge %s" -msgstr "Aucun réseau trouvé pour le bridge %s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" -#: cinder/virt/xenapi/pool.py:111 -#, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/xenapi/pool.py:126 -#, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/xenapi/pool.py:143 -#, python-format -msgid "Pool-Join failed: %(e)s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/xenapi/pool.py:146 -#, python-format -msgid "Unable to join %(host)s in the pool" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/xenapi/pool.py:162 -#, python-format -msgid "Pool-eject failed: %(e)s" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "Impossible de détacher le volume %s" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" -#: cinder/virt/xenapi/pool.py:185 -#, python-format -msgid "Pool-set_name_label failed: %(e)s" +#: cinder/volume/drivers/solidfire.py:673 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Échec de la suspension de l'instance" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/xenapi/vif.py:103 -#, python-format -msgid "Found no PIF for device %s" +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/zadara.py:357 #, python-format -msgid "VBD not found in instance %s" -msgstr "VBD non trouvé dans l'instance %s" - -#: cinder/virt/xenapi/vm_utils.py:262 -#, fuzzy, python-format -msgid "VBD %s already detached" -msgstr "le groupe %s existe déjà" +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/zadara.py:438 #, python-format -msgid "Unable to unplug VBD %s" -msgstr "Impossible de deconnecter le VBD %s" +msgid "Create snapshot: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Unable to destroy VBD %s" -msgstr "Impossible de supprimer le VBD %s" - -#: cinder/virt/xenapi/vm_utils.py:305 -#, fuzzy, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -msgstr "VBD créé %(vbd_ref)s pour VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Delete snapshot: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/zadara.py:464 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." -msgstr "VBD créé %(vbd_ref)s pour VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/zadara.py:472 #, python-format -msgid "Unable to destroy VDI %s" +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +msgid "Creating volume from snapshot: %s" msgstr "" -"VDI créé %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on" -" %(sr_ref)s." -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 -#, python-format -msgid "No primary VDI found for %(vm_ref)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 -#, fuzzy, python-format -msgid "Snapshotting with label '%(label)s'" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -"Création de l'instantané (snapshot) pour la VM %(vm_ref)s avec le label " -"'%(label)s'..." -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, fuzzy, python-format -msgid "Created snapshot %(template_vm_ref)s" -msgstr "Instantané (snapshot) créé %(template_vm_ref)s pour la VM %(vm_ref)s." - -#: cinder/virt/xenapi/vm_utils.py:431 -#, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" -msgstr "Demande de chargement à xapi de %(vdi_uuids)s en tant qu'ID %(image_id)s" +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: cŕeation d'un volume logique de %(vol_size)sG" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "download_vhd failed: %r" +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 -#, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 -#, fuzzy, python-format -msgid "Size for image %(image)s: %(virtual_size)d" -msgstr "Taille de l'image %(image)s:%(virtual_size)d" - -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 -#, python-format -msgid "Copying VDI %s to /boot/guest on dom0" -msgstr "Copie de VDI %s vers /boot/guest sur dom0" - -#: cinder/virt/xenapi/vm_utils.py:884 -#, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" -msgstr "Noyau/Ramdisk VDI %s détruit" - -#: cinder/virt/xenapi/vm_utils.py:895 -#, fuzzy -msgid "Failed to fetch glance image" -msgstr "Échec du redémarrage de l'instance" - -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "Recherche du VDI %s pour le PV kernel" - -#: cinder/virt/xenapi/vm_utils.py:973 -#, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 -#, python-format -msgid "VDI %s is still available" -msgstr "Le VDI %s est toujours disponible" - -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" -msgstr "(VM_UTILS) état xenserver vm -> |%s|" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" -msgstr "(VM_UTILS) xenapi power_state -> |%s|" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format -msgid "Re-scanning SR %s" -msgstr "Re-parcours de SR %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 -#, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" -msgstr "VHD %(vdi_uuid)s à pour parent %(parent_ref)s" +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -"L'UUID parent %(parent_uuid)s ne correspond pas au parent originel " -"%(original_parent_uuid)s, attente de coalesence..." -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 -#, python-format -msgid "Plugging VBD %s ... " -msgstr "Connexion de VBD %s ... " - -#: cinder/virt/xenapi/vm_utils.py:1476 -#, python-format -msgid "Plugging VBD %s done." -msgstr "Connexion de VBD %s terminée." - -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 -#, python-format -msgid "Destroying VBD for VDI %s ... " -msgstr "Destruction de VBD pour la VDI %s ... " - -#: cinder/virt/xenapi/vm_utils.py:1498 -#, python-format -msgid "Destroying VBD for VDI %s done." -msgstr "Destruction de VBD pour la VDI %s terminée." +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 #, python-format -msgid "Running pygrub against %s" -msgstr "Exécution de pygrub sur %s" +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "Found Xen kernel %s" -msgstr "Kernel Xen %s trouvé" - -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." -msgstr "Pas de kernel Xen trouvé. Démarrage en HVM." - -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 -#, python-format -msgid "Writing partition table %s done." -msgstr "Ecriture de la table de partitionnement %s terminée." - -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format -msgid "Updating progress to %(progress)d" +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -#, fuzzy -msgid "Starting instance" -msgstr "Création d'une instance raw" - -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -#, fuzzy -msgid "Failed to spawn, rolling back" -msgstr "Échec de la suspension de l'instance" - -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Ré-exportation de %s volumes" -#: cinder/virt/xenapi/vmops.py:515 -#, python-format -msgid "Invalid value for injected_files: %r" +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" +"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant " +"que %(device)s" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Injecting file path: '%s'" -msgstr "Injection du chemin d'accès : '%s'" - -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Erreur au démarrage xvp : %s" -#: cinder/virt/xenapi/vmops.py:544 -#, fuzzy -msgid "Starting VM" -msgstr "Re-démarrage xvp" +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Instance agent version: %s" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format -msgid "Updating Agent to %s" +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -#, fuzzy, python-format -msgid "Finished snapshot and upload for VM" -msgstr "Fin de l'instantané et du chargement de VM %s" - -#: cinder/virt/xenapi/vmops.py:677 -#, fuzzy, python-format -msgid "Starting snapshot for VM" -msgstr "Début de création d'instantané (snapshot) pour la VM %s" - -#: cinder/virt/xenapi/vmops.py:686 +#: cinder/volume/drivers/emc/emc_smis_common.py:775 #, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "Impossible d'attacher le volume à l'instance %s" - -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" -msgstr "" +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: supprimé avec succès" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" -msgstr "" - -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" -msgstr "Fichiers noyau/ramdisk supprimés" - -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 +#: cinder/volume/drivers/emc/emc_smis_common.py:959 #, fuzzy -msgid "Destroying VM" -msgstr "Re-démarrage xvp" +msgid "Cannot connect to ECOM server" +msgstr "Reconnection à la queue" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 -#, fuzzy, python-format -msgid "Instance %(instance_uuid)s not found" -msgstr "L'instance %(instance_id)s n'est pas suspendue" - -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" +"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant " +"que %(device)s" -#: cinder/virt/xenapi/vmops.py:1469 -#, fuzzy -msgid "Injecting network info to xenstore" -msgstr "réglage de l'hôte réseau" +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 -#, fuzzy, python-format -msgid "Creating VIF for network %(network_ref)s" -msgstr "Création du VIF pour la VM %(vm_ref)s, réseau %(network_ref)s." +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 -#, fuzzy, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" -msgstr "Création du VIF pour la VM %(vm_ref)s, réseau %(network_ref)s." +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format -msgid "OpenSSL error: %s" -msgstr "Erreur OpenSSL : %s" - -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "type is = %s" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "name = %s" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" -msgstr "Impossible de créer le dépot de stockage" - -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." -msgstr "%(label)s introduit comme %(sr_ref)s." - -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format -msgid "Forgetting SR %s..." +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Introducing %s..." -msgstr "Introduction de %s" +msgid "Add target WWN: %s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Unable to find SR from VBD %s" -msgstr "Impossible de trouver SR du VDB %s" +msgid "Target WWNs: %s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" -msgstr "Exception %(exc)s ignorée pendant l'obtention de PBDs pour %(sr_ref)s" +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" -msgstr "Exception %(exc)s ignorée pendant la deconnexion du PBD %(pbd)s" +msgid "Could not find iSCSI export for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" -#: cinder/virt/xenapi/volume_utils.py:234 -#, python-format -msgid "Unable to introduce VDI on SR %s" -msgstr "Impossible d'introduire VDI sur SR %s" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" -#: cinder/virt/xenapi/volume_utils.py:242 -#, python-format -msgid "Unable to get record of VDI %s on" -msgstr "Impossible de récuppérer l'enregistrement du VDI %s sur" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "\"Non trouvé\" remonté : %s" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "Unable to introduce VDI for SR %s" -msgstr "Impossible d'introduire le VDI pour SR %s" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "Error finding vdis in SR %s" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "Mountpoint cannot be translated: %s" -msgstr "Le point de montage ne peut pas être traduit : %s" - -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "Creating SR %s" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/hds/hds.py:250 +#, fuzzy, python-format +msgid "HDP not found: %s" +msgstr "Sheepdog n'est pas actif : %s" + +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "Introducing SR %s" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/hds/hds.py:372 #, python-format -msgid "Checking for SR %s" +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/hds/hds.py:395 #, python-format -msgid "SR %s not found in the xapi database" +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -"Impossible de créer VDI sur SR %(sr_ref)s pour l'instance " -"%(instance_name)s" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "Impossible d'utiliser SR %(sr_ref)s pour l'instance %(instance_name)s" +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "Impossible d'attacher le volume à l'instance %s" +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -"Le point de montage %(mountpoint)s a été attaché à l'instance " -"%(instance_name)s" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format -msgid "Unable to locate volume %s" -msgstr "Impossible de trouver le volume %s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Unable to detach volume %s" -msgstr "Impossible de détacher le volume %s" +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "Unable to destroy vbd %s" +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Error purging SR %s" +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -"Le point de montage %(mountpoint)s à été détaché de l'instance " -"%(instance_name)s" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "Error in handshake: %s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Invalid request: %s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Request: %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Request made with missing token: %s" +msgid "parse_xml_file: %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/huawei/huawei_utils.py:129 #, python-format -msgid "Request made with invalid token: %s" +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/huawei/rest_common.py:59 #, python-format -msgid "Unexpected error: %s" +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "Bad response from server: %s" msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "Le status du volume doit être disponible" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "Login error, reason is %s" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/volume/api.py:325 -#, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "Le status du volume doit être disponible" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "Récupération après une exécution erronée. Tentative numéro %s" +msgid "Invalid resource pool: %s" +msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "volume group %s doesn't exist" -msgstr "Le groupe de volume %s n'existe pas" +msgid "Get pool info error, pool name is:%s" +msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/huawei/rest_common.py:354 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/huawei/rest_common.py:474 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/huawei/rest_common.py:527 #, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" +msgid "host lun id is %s" +msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/huawei/rest_common.py:553 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "the free wwns %s" msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "the fc server properties is:%s" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "FAKE ISCSI: %s" -msgstr "FAUX ISCSI: %s" +msgid "JSON transfer data error. %s" +msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/huawei/rest_common.py:874 #, python-format -msgid "rbd has no pool %s" -msgstr "rbd n'as pas de file %s" +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/huawei/rest_common.py:937 #, python-format -msgid "Sheepdog is not working: %s" -msgstr "Sheepdog n'est pas actif : %s" +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" -msgstr "Sheepdog n'est pas actif" +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "Re-exporting %s volumes" -msgstr "Ré-exportation de %s volumes" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/huawei/rest_common.py:1101 #, python-format -msgid "volume %s: skipping export" -msgstr "volume %s : exportation évitée" +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/huawei/rest_common.py:1124 #, python-format -msgid "volume %s: creating" -msgstr "volume %s: création" +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" -msgstr "volume %(vol_name)s: cŕeation d'un volume logique de %(vol_size)sG" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "volume %s: creating export" -msgstr "volume %s: exportation en cours" +msgid "The config parameters are: %s" +msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "volume %s: created successfully" -msgstr "volume %s: crée avec succès" +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" -msgstr "Le volume est encore attaché" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" -msgstr "Le volume n'est pas local à ce noeud" +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "volume %s: removing export" -msgstr "volume %s: suppression de l'exportation" +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "volume %s: deleting" -msgstr "volume %s: suppression" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "volume %s: volume is busy" +msgid "_get_login_info: %s" msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "volume %s: deleted successfully" -msgstr "volume %s: supprimé avec succès" +msgid "create_volume: volume name: %s" +msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "snapshot %s: creating" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "snapshot %s: created successfully" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/huawei/ssh_common.py:421 #, python-format -msgid "snapshot %s: deleting" +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/volume/manager.py:214 -#, fuzzy, python-format -msgid "snapshot %s: snapshot is busy" -msgstr "instance %s: création d'un instantané (snapshot)" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/huawei/ssh_common.py:466 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/huawei/ssh_common.py:511 #, python-format -msgid "New capabilities found: %s" +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/huawei/ssh_common.py:570 #, python-format -msgid "Notification {%s} received" +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/huawei/ssh_common.py:580 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "%s is not set" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/volume/netapp.py:128 -#, fuzzy -msgid "Connected to DFM server" -msgstr "Reconnection à la queue" +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" -#: cinder/volume/netapp.py:159 -#, fuzzy, python-format -msgid "Job failed: %s" -msgstr "\"Non trouvé\" remonté : %s" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 -#, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, fuzzy, python-format -msgid "No LUN ID for volume %s" -msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, fuzzy, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" -#: cinder/volume/netapp.py:614 -#, fuzzy, python-format -msgid "Failed to get host details for host ID %s" -msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" -#: cinder/volume/netapp.py:620 -#, fuzzy, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" -#: cinder/volume/netapp.py:625 -#, fuzzy, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/volume/san.py:320 +#: cinder/volume/drivers/huawei/ssh_common.py:1102 #, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/volume/san.py:452 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "CLIQ command returned %s" +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/volume/san.py:458 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Commande : %(cmd)s\n" +"Valeur retournée : %(exit_code)s\n" +"Sortie standard : %(stdout)r\n" +"Sortie d'erreur : %(stderr)r" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "Instance actives : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "Nested renvoi %s" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "réponse %s" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "le groupe %s existe déjà" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Suppression de l'utilisateur %s" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "Le status du volume doit être disponible" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "Erreur interceptée : %s" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "Reçu exception : %s" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "volume %(vol_name)s: cŕeation d'un volume logique de %(vol_size)sG" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "volume %s: exportation en cours" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "volume %s: exportation en cours" + +#~ msgid "volume %s: creating from image" +#~ msgstr "volume %s: création" + +#~ msgid "volume %s: creating" +#~ msgstr "volume %s: création" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "volume %s: exportation en cours" + +#~ msgid "volume %s: create failed" +#~ msgstr "volume %s: création" + +#~ msgid "volume %s: created successfully" +#~ msgstr "volume %s: crée avec succès" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "Tentative de suppression d'une console non existente %(console_id)s." + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "Récupèration de %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "L'adresse du lien local n'a pas été trouvé :%s" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "Impossible de trouver l'IP du lien local de %(interface)s :%(ex)s" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "Impossible de détacher le volume %s" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "Impossible de trouver le volume %s" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "Impossible de trouver SR du VDB %s" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "réponse %s" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "Impossible de trouver le volume %s" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "Impossible de trouver le volume %s" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "Échec du redémarrage de l'instance" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "Le groupe de volume %s n'existe pas" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "La connexion à Glance a échoué" + +#~ msgid "Invalid snapshot" +#~ msgstr "Snapshot invalide" + +#~ msgid "Invalid input received" +#~ msgstr "Entrée invalide reçue" + +#~ msgid "Invalid volume type" +#~ msgstr "Type de volume invalide" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "Volume invalide" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "Snapshot invalide" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "La connexion à Glance a échoué" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "Debug dans le rappel : %s" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "Le groupe de volume %s n'existe pas" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "Sheepdog n'est pas actif : %s" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, fuzzy, python-format -msgid "Got response: %s" -msgstr "réponse %s" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, fuzzy, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "Le groupe de volume %s n'existe pas" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "The request is invalid." +#~ msgstr "La requête est invalide." + +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" -#~ msgstr "Tentative de destruction d'une instance déjà détruite: %s" +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" -#~ msgstr "Initialisation du Consomateur d'Adapteur pour %s" +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "topic is %s" -#~ msgstr "le sujet est %s" +#~ msgid "Create luncopy error." +#~ msgstr "" -#~ msgid "message %s" -#~ msgstr "message %s" +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" -#~ msgstr "(%(nm)s) publication (key: %(routing_key)s) %(message)s" +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" -#~ msgid "Publishing to route %s" -#~ msgstr "Publication vers la route %s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" -#~ msgid "Declaring queue %s" -#~ msgstr "Déclaration de la queue %s" +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" -#~ msgid "Declaring exchange %s" -#~ msgstr "Déclaration de l'échange %s" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ "Rattachement de %(queue)s vers %(exchange)s" -#~ " avec la clef %(routing_key)s" -#~ msgid "Getting from %(queue)s: %(message)s" -#~ msgstr "Récupération depuis %(queue)s: %(message)s" +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "zfs send/recv done, new volume %s created" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "rbd export-diff failed - %s" #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" -#~ msgstr "Tâche [%(name)s] %(task)s état : succès %(result)s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" -#~ msgstr "Tâche [%(name)s] %(task)s état : %(status)s %(error_info)s" +#~ msgid "iSER device not found at %s" +#~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "virsh said: %r" -#~ msgstr "virsh a retourné : %r" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" -#~ msgid "cool, it's a device" -#~ msgstr "super, c'est un device" +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "Created VM %s..." -#~ msgstr "VM %s crée..." +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." -#~ msgstr "VM %(instance_name)s crée en tant que %(vm_ref)s." +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Can't find lun or lun goup in array" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " -#~ msgstr "Création de VBD pour la VDI %s ... " +#~ msgid "in looping call" +#~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." -#~ msgstr "La création de VBD pour la VDI %s est terminée." +#~ msgid "Is the appropriate service running?" +#~ msgstr "" -#~ msgid "VBD.unplug successful first time." -#~ msgstr "VBD.unplug terminé dés la première tentative." +#~ msgid "Could not find another host" +#~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." -#~ msgstr "VBD.unplug refusé : nouvel essai..." +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." -#~ msgstr "VBD.unplug à enfin été achevée." +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" -#~ msgstr "XenAPI.Failure ignorée dans VBD.unplug: %s" +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" -#~ msgstr "XenAPI.Failure %s ignorée" +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" -#~ msgstr "instance %s: n'a pas pu être crée" +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "Starting VM %s..." -#~ msgstr "Démarrage de la VM %s..." +#~ msgid "SIGTERM received" +#~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "_wait_child %d" #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "wait wrap.failed %s" +#~ msgstr "\"Non trouvé\" remonté : %s" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "volume %s mapping to multi host" +#~ msgstr "volume %s : exportation évitée" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." -#~ msgstr "VIF créé %(vif_ref)s pour la VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" #~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" #~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -#~ msgstr "Création de VBD pour VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/gl/LC_MESSAGES/cinder.po b/cinder/locale/gl/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..b33b6504aa --- /dev/null +++ b/cinder/locale/gl/LC_MESSAGES/cinder.po @@ -0,0 +1,10049 @@ +# Galician translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-10-28 23:23+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: Galician " +"(http://www.transifex.com/projects/p/openstack/language/gl/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/he/LC_MESSAGES/cinder.po b/cinder/locale/he/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..4d1756c3f4 --- /dev/null +++ b/cinder/locale/he/LC_MESSAGES/cinder.po @@ -0,0 +1,9902 @@ +# Hebrew translations for cinder. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2014-01-24 11:45+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Hebrew " +"(http://www.transifex.com/projects/p/openstack/language/he/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/he_IL/LC_MESSAGES/cinder.po b/cinder/locale/he_IL/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..18f44b6faa --- /dev/null +++ b/cinder/locale/he_IL/LC_MESSAGES/cinder.po @@ -0,0 +1,9902 @@ +# Hebrew (Israel) translations for cinder. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2014-01-24 11:45+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Hebrew (Israel) " +"(http://www.transifex.com/projects/p/openstack/language/he_IL/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/hi/LC_MESSAGES/cinder.po b/cinder/locale/hi/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..166357fae4 --- /dev/null +++ b/cinder/locale/hi/LC_MESSAGES/cinder.po @@ -0,0 +1,10481 @@ +# Hindi translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-08-27 04:58+0000\n" +"Last-Translator: daisy.ycguo \n" +"Language-Team: Hindi " +"(http://www.transifex.com/projects/p/openstack/language/hi/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/hr/LC_MESSAGES/cinder.po b/cinder/locale/hr/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..e026d69996 --- /dev/null +++ b/cinder/locale/hr/LC_MESSAGES/cinder.po @@ -0,0 +1,10737 @@ +# Croatian translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Croatian " +"(http://www.transifex.com/projects/p/openstack/language/hr/)\n" +"Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/hu/LC_MESSAGES/cinder.po b/cinder/locale/hu/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..8879ba59c3 --- /dev/null +++ b/cinder/locale/hu/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Hungarian translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Hungarian " +"(http://www.transifex.com/projects/p/openstack/language/hu/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/id/LC_MESSAGES/cinder.po b/cinder/locale/id/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..fea84645d6 --- /dev/null +++ b/cinder/locale/id/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Indonesian translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Indonesian " +"(http://www.transifex.com/projects/p/openstack/language/id/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/is_IS/LC_MESSAGES/cinder.po b/cinder/locale/is_IS/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..aef57c8e9b --- /dev/null +++ b/cinder/locale/is_IS/LC_MESSAGES/cinder.po @@ -0,0 +1,9956 @@ +# Icelandic (Iceland) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-12-16 02:36+0000\n" +"Last-Translator: daisy.ycguo \n" +"Language-Team: Icelandic (Iceland) " +"(http://www.transifex.com/projects/p/openstack/language/is_IS/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/it/LC_MESSAGES/cinder.po b/cinder/locale/it/LC_MESSAGES/cinder.po index 0ac1786174..14056dc05c 100644 --- a/cinder/locale/it/LC_MESSAGES/cinder.po +++ b/cinder/locale/it/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2012-04-01 18:59+0000\n" "Last-Translator: simone.sandri \n" "Language-Team: Italian \n" @@ -15,8196 +15,10747 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "Filename di root CA" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Nome file della chiave privata" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" -msgstr "" - -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "Dove si conservano le chiavi" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "Dove si conserva root CA" - -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "Si dovrebbe usare un CA per ogni progetto?" - -#: cinder/crypto.py:67 -#, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" -msgstr "Soggetto per il certificato degli utenti, %s per progetto, utente, orario" - -#: cinder/crypto.py:72 -#, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" -msgstr "Soggetto per il certificato dei progetti, %s per progetto, orario" - -#: cinder/crypto.py:292 -#, python-format -msgid "Flags path: %s" -msgstr "Percorso dei flags: %s" - -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." - -#: cinder/exception.py:59 -#, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" -msgstr "" -"%(description)s\n" -"Comando: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" - -#: cinder/exception.py:94 -msgid "DB exception wrapped." -msgstr "" - -#: cinder/exception.py:155 +#: cinder/exception.py:66 cinder/brick/exception.py:33 msgid "An unknown exception occurred." msgstr "E' stato riscontrato un errore sconosciuto" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" -msgstr "" - -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:195 -msgid "Connection to glance failed" +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" msgstr "" -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "" -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "L'utente non ha i privilegi dell'amministratore" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, fuzzy, python-format msgid "Not authorized for image %(image_id)s." msgstr "nessun metodo per il messaggio: %s" -#: cinder/exception.py:220 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "Parametri inaccettabili." -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:154 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:237 +#: cinder/exception.py:163 msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:167 +#, fuzzy +msgid "The results are invalid." msgstr "La richiesta non è valida." -#: cinder/exception.py:245 +#: cinder/exception.py:171 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" -msgstr "E' stato ricevuto un input non valido" - -#: cinder/exception.py:253 +#: cinder/exception.py:175 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" -msgstr "Tipo del volume non valido" - -#: cinder/exception.py:261 -msgid "Invalid volume" -msgstr "Volume non valido" +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:183 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:187 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "%(err)s" msgstr "" -#: cinder/exception.py:277 +#: cinder/exception.py:197 #, python-format -msgid "Invalid cidr %(cidr)s." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:210 #, python-format -msgid "%(err)s" +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:214 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:218 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: cinder/exception.py:305 -#, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:228 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Volume %(volume_id)s could not be found." msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:232 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:237 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" -msgstr "Impossibile sospendere l'istanza" - -#: cinder/exception.py:326 -msgid "Failed to resume server" -msgstr "Impossibile ripristinare il server" - -#: cinder/exception.py:330 -msgid "Failed to reboot instance" -msgstr "Impossibile riavviare l'istanza" - -#: cinder/exception.py:334 -#, fuzzy -msgid "Failed to terminate instance" -msgstr "Impossibile riavviare l'istanza" - -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:255 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:278 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:377 +#: cinder/exception.py:282 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:381 +#: cinder/exception.py:287 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:389 +#: cinder/exception.py:295 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:299 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:303 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:307 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:311 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:315 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:319 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:422 -msgid "Resource could not be found." +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" msgstr "" -#: cinder/exception.py:427 -#, python-format -msgid "Required flag %(flag)s not set." +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:332 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:336 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:440 +#: cinder/exception.py:340 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:344 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:352 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:457 +#: cinder/exception.py:356 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:365 #, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Volume Type %(id)s already exists." msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:369 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:373 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Malformed message body: %(reason)s" msgstr "" -#: cinder/exception.py:475 +#: cinder/exception.py:377 #, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgid "Could not find config at %(path)s" msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/exception.py:385 #, python-format -msgid "No target id found for volume %(volume_id)s." +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -#: cinder/exception.py:484 +#: cinder/exception.py:389 #, python-format -msgid "No disk at %(location)s" +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:398 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:402 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:496 +#: cinder/exception.py:409 +#, python-format msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:501 +#: cinder/exception.py:415 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:505 +#: cinder/exception.py:419 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:509 +#: cinder/exception.py:423 #, python-format -msgid "User %(user_id)s could not be found." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" -#: cinder/exception.py:513 +#: cinder/exception.py:427 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:517 +#: cinder/exception.py:432 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:436 #, python-format -msgid "Role %(role_id)s could not be found." +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" -#: cinder/exception.py:529 +#: cinder/exception.py:444 #, python-format -msgid "%(req)s is required to create a network." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:449 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:453 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Failed to export for volume: %(reason)s" msgstr "" -#: cinder/exception.py:541 +#: cinder/exception.py:457 #, python-format -msgid "Network could not be found for uuid %(uuid)s" +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:461 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:465 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:557 -#, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:562 -#, python-format -msgid "Host is not set to the network (%(network_id)s)." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:566 +#: cinder/exception.py:485 #, python-format -msgid "Network %(network)s has active ports, cannot delete." +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:493 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Invalid backup: %(reason)s" msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:497 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "Connection to swift failed: %(reason)s" msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:501 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Transfer %(transfer_id)s could not be found." msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:505 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:509 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "SSH command injection detected: %(command)s" msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:513 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" -#: cinder/exception.py:599 +#: cinder/exception.py:517 #, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:522 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:527 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:620 +#: cinder/exception.py:541 #, python-format -msgid "Floating ip not found for id %(id)s." +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:546 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:550 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:554 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:644 -#, python-format -msgid "Floating ip %(address)s is not associated." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:652 +#: cinder/exception.py:576 #, python-format -msgid "Interface %(interface)s not found." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:656 +#: cinder/exception.py:580 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:584 #, python-format -msgid "Certificate %(certificate_id)s not found." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:668 +#: cinder/exception.py:593 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Impossible creare il VDI su SR %(sr_ref)s per l'istanza %(instance_name)s" + +#: cinder/exception.py:597 #, python-format -msgid "Host %(host)s could not be found." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:672 -#, python-format -msgid "Compute host %(host)s could not be found." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:676 +#: cinder/exception.py:605 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:680 +#: cinder/exception.py:609 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:684 +#: cinder/exception.py:613 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:692 -#, python-format -msgid "Quota for project %(project_id)s could not be found." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:696 +#: cinder/exception.py:626 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:700 +#: cinder/exception.py:630 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:704 +#: cinder/exception.py:636 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:709 -#, python-format -msgid "Security group with rule %(rule_id)s not found." +#: cinder/exception.py:641 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:713 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:718 -#, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +#: cinder/exception.py:654 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:723 -#, python-format -msgid "Migration %(migration_id)s could not be found." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:732 -#, python-format -msgid "Console pool %(pool_id)s could not be found." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:736 +#: cinder/quota.py:105 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:742 +#: cinder/quota.py:748 #, python-format -msgid "Console %(console_id)s could not be found." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:746 +#: cinder/quota.py:770 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:750 +#: cinder/quota.py:790 #, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:755 -#, python-format -msgid "Invalid console type %(console_type)s " +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:763 +#: cinder/quota_utils.py:46 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:767 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:772 +#: cinder/service.py:95 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:776 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:780 +#: cinder/service.py:148 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:784 -#, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." -msgstr "" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "Servizio terminato che non ha entry nel database" -#: cinder/exception.py:789 -#, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" -msgstr "" +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "Il servizio é scomparso dal database, ricreo." -#: cinder/exception.py:793 -#, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." -msgstr "" +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "Connessione al model server ripristinata!" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "model server é scomparso" -#: cinder/exception.py:798 +#: cinder/service.py:298 #, python-format msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:807 -#, python-format -msgid "LDAP user %(user_id)s could not be found." -msgstr "" +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Insieme di FLAGS:" -#: cinder/exception.py:811 +#: cinder/service.py:387 #, python-format -msgid "LDAP group %(group_id)s could not be found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:815 +#: cinder/utils.py:96 #, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:819 +#: cinder/utils.py:127 #, python-format -msgid "File %(file_path)s could not be found." +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:827 +#: cinder/utils.py:228 #, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgid "Error connecting via ssh: %s" msgstr "" -#: cinder/exception.py:832 +#: cinder/utils.py:412 #, python-format -msgid "Network adapter %(adapter)s could not be found." +msgid "Invalid backend: %s" msgstr "" -#: cinder/exception.py:836 +#: cinder/utils.py:423 #, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +msgid "backend %s" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:844 +#: cinder/utils.py:759 #, python-format -msgid "Unable to use global role %(role_id)s" +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" -msgstr "" +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Impossibile localizzare il volume %s" -#: cinder/exception.py:861 +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 #, python-format -msgid "Key pair %(key_name)s already exists." +msgid "Unable to find key_file : %s" msgstr "" -#: cinder/exception.py:865 -#, python-format -msgid "User %(user)s already exists." +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:869 +#: cinder/wsgi.py:169 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:873 +#: cinder/wsgi.py:206 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:877 -#, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:882 -#, python-format -msgid "Project %(project)s already exists." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:886 -#, python-format -msgid "Instance %(name)s already exists." +#: cinder/wsgi.py:313 +msgid "You must implement __call__" msgstr "" -#: cinder/exception.py:890 -#, python-format -msgid "Instance Type %(name)s already exists." +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:894 -#, python-format -msgid "Volume Type %(name)s already exists." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:898 -#, python-format -msgid "%(path)s is on shared storage: %(reason)s" +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:906 -#, python-format -msgid "Malformed message body: %(reason)s" +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:910 -#, python-format -msgid "Could not find config at %(path)s" +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/common.py:162 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "" - -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" -msgstr "" - -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" -msgstr "" - -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:938 +#: cinder/api/extensions.py:197 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/extensions.py:236 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/extensions.py:237 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:958 +#: cinder/api/extensions.py:240 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:963 +#: cinder/api/extensions.py:242 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:967 +#: cinder/api/extensions.py:256 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:971 +#: cinder/api/extensions.py:262 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:975 +#: cinder/api/extensions.py:276 #, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:980 +#: cinder/api/extensions.py:278 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:984 +#: cinder/api/extensions.py:287 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:988 +#: cinder/api/extensions.py:356 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:992 +#: cinder/api/extensions.py:381 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:1005 -#, python-format -msgid "Error in SolidFire API response: status=%(status)s" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:1009 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "Error in SolidFire API response: data=%(data)s" +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:1013 -#, python-format -msgid "Detected existing vlan with id %(vlan)d" +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:1017 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Instance %(instance_id)s could not be found." +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:1021 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:1025 +#: cinder/api/contrib/backups.py:140 #, python-format -msgid "Could not fetch image %(image)s" +msgid "delete called for member %s" msgstr "" -#: cinder/log.py:315 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "syslog facility must be one of: %s" +msgid "Delete backup with id: %s" msgstr "" -#: cinder/manager.py:146 +#: cinder/api/contrib/backups.py:185 #, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgid "Creating new backup %s" msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/manager.py:159 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Error during %(full_task_name)s: %(e)s" +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/service.py:177 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Host '%s' could not be found." msgstr "" -#: cinder/service.py:195 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "Invalid status: '%s'" msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" -msgstr "Servizio terminato che non ha entry nel database" - -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." -msgstr "Il servizio é scomparso dal database, ricreo." - -#: cinder/service.py:334 -msgid "Recovered model server connection!" -msgstr "Connessione al model server ripristinata!" - -#: cinder/service.py:340 -msgid "model server went away" -msgstr "model server é scomparso" - -#: cinder/service.py:433 -msgid "Full set of FLAGS:" -msgstr "Insieme di FLAGS:" - -#: cinder/service.py:440 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" -msgstr "Eccezione interna: %s" - -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" -msgstr "Prelievo %s" - -#: cinder/utils.py:210 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Got unknown keyword args to utils.execute: %r" +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Esecuzione del comando (sottoprocesso): %s" - -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" -msgstr "Il risultato é %s" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" -msgstr "Eseguendo cmd (SSH): %s" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" -msgstr "debug in callback: %s" +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" -#: cinder/utils.py:534 -#, python-format -msgid "Link Local address is not found.:%s" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/utils.py:537 -#, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/utils.py:648 -#, python-format -msgid "Invalid backend: %s" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/utils.py:659 -#, python-format -msgid "backend %s" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/utils.py:931 +#: cinder/api/contrib/quotas.py:111 #, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" msgstr "" -#: cinder/utils.py:942 -#, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/utils.py:1008 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -#: cinder/utils.py:1023 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/utils.py:1169 -#, python-format -msgid "Invalid server_string: %s" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/utils.py:1298 -#, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/utils.py:1461 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/utils.py:1495 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/wsgi.py:97 -#, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/wsgi.py:117 -#, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/api/direct.py:299 -#, python-format -msgid "Returned non-serializeable type: %s" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/api/validator.py:142 +#: cinder/api/contrib/volume_transfer.py:147 #, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgid "Creating new volume transfer %s" msgstr "" -#: cinder/api/ec2/__init__.py:73 +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "%(code)s: %(message)s" +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/api/ec2/__init__.py:95 +#: cinder/api/contrib/volume_transfer.py:196 #, python-format -msgid "FaultWrapper: %s" +msgid "Accepting transfer %s" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:180 -#, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Authentication Failure: %s" +msgid "Valid control location are: %s" msgstr "" -#: cinder/api/ec2/__init__.py:404 -#, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/api/ec2/__init__.py:435 -#, python-format -msgid "action: %s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/api/ec2/__init__.py:437 -#, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/api/ec2/__init__.py:584 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "InstanceNotFound raised: %s" +msgid "Caught error: %s" msgstr "" -#: cinder/api/ec2/__init__.py:590 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "VolumeNotFound raised: %s" +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/api/ec2/__init__.py:596 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "SnapshotNotFound raised: %s" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/api/ec2/__init__.py:605 -#, python-format -msgid "EC2APIError raised: %s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "KeyPairExists raised: %s" +msgid "Extended resource: %s" msgstr "" -#: cinder/api/ec2/__init__.py:617 +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "InvalidParameterValue raised: %s" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/api/ec2/__init__.py:621 +#: cinder/api/openstack/__init__.py:110 #, python-format -msgid "InvalidPortRange raised: %s" +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/api/ec2/__init__.py:625 -#, python-format -msgid "NotAuthorized raised: %s" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/api/ec2/__init__.py:633 -#, python-format -msgid "QuotaError raised: %s" +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/api/ec2/__init__.py:637 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgid "Exception handling resource: %s" msgstr "" -#: cinder/api/ec2/__init__.py:646 +#: cinder/api/openstack/wsgi.py:682 #, python-format -msgid "Unexpected error raised: %s" +msgid "Fault thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:647 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "Environment: %s" +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/ec2/apirequest.py:64 -#, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/ec2/cloud.py:336 -#, python-format -msgid "Create snapshot of volume %s" +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/ec2/cloud.py:372 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/cloud.py:378 -#, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/cloud.py:382 -#, python-format -msgid "Create key pair %s" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" msgstr "" -#: cinder/api/ec2/cloud.py:391 -#, python-format -msgid "Import key %s" +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "Delete key pair %s" +msgid "%(url)s returned a fault: %(e)s" msgstr "" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "Revoke security group ingress %s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, python-format -msgid "%s Not enough parameters to build a valid rule" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +#, fuzzy +msgid "snapshot does not exist" +msgstr "istanza %s: creazione snapshot in corso" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Authorize security group ingress %s" +msgid "Delete snapshot with id: %s" msgstr "" -#: cinder/api/ec2/cloud.py:725 -#, python-format -msgid "%s - This rule already exists in group" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" msgstr "" -#: cinder/api/ec2/cloud.py:769 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "group %s already exists" +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "Delete security group %s" +msgid "Delete volume with id: %s" msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 -#, python-format -msgid "Get console output for instance %s" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/cloud.py:894 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "Create volume from snapshot %s" +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "Create volume of %s GB" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "Detach volume %s" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/cloud.py:959 -msgid "Detach Volume Failed." +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 -#, python-format -msgid "attribute not supported: %s" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" msgstr "" -#: cinder/api/ec2/cloud.py:1107 -#, python-format -msgid "vol = %s\n" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" msgstr "" -#: cinder/api/ec2/cloud.py:1267 -#, python-format -msgid "Release address %s" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "Disassociate address %s" +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" -msgstr "" +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Lo stato del volume deve essere disponibile" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/api.py:154 #, python-format -msgid "Reboot instance %r" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" -msgstr "" +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Lo stato del volume deve essere disponibile" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/api.py:181 #, python-format -msgid "De-registering image %s" +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/manager.py:100 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/manager.py:123 #, python-format -msgid "Updating image %s publicity" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/backup/manager.py:129 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/manager.py:147 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/backup/manager.py:154 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "Registering default backend %s." msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/backup/manager.py:158 #, python-format -msgid "Caught error: %s" +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/manager.py:165 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/manager.py:189 #, python-format -msgid "Extended resource: %s" +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/manager.py:194 #, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/manager.py:206 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/manager.py:212 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/manager.py:217 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/manager.py:237 #, python-format -msgid "Could not find %s in request." +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/manager.py:249 #, python-format -msgid "Successfully authenticated '%s'" -msgstr "" - -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/manager.py:282 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/manager.py:360 #, python-format -msgid "marker [%s] not found" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/manager.py:379 #, python-format -msgid "href %s does not contain version" +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/manager.py:399 #, python-format -msgid "Converting nw_info: %s" +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/manager.py:422 #, python-format -msgid "Converted networks: %s" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/openstack/common.py:338 -#, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:147 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "invalid user '%s'" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "Loaded extension: %s" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "Ext name: %s" +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/backup/drivers/ceph.py:250 #, python-format -msgid "Ext alias: %s" +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "Ext description: %s" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Ext namespace: %s" +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "Ext updated: %s" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 #, python-format -msgid "Exception loading extension: %s" +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/drivers/ceph.py:361 #, python-format -msgid "Loading extension %s" +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "Calling extension factory %s" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/drivers/ceph.py:377 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/ceph.py:389 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "Exception handling resource: %s" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "Fault thrown: %s" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/ceph.py:488 #, python-format -msgid "HTTP exception thrown: %s" +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:555 #, python-format -msgid "There is no such action: %s" +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:697 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/ceph.py:964 #, python-format -msgid "Invalid server status: %(status)s" +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/ceph.py:1037 #, python-format -msgid "Bad personality format: missing %s" +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/swift.py:106 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 +#: cinder/backup/drivers/swift.py:123 #, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/swift.py:141 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:146 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "container %s does not exist" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/swift.py:151 #, python-format -msgid "Bad network format: missing %s" +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." -msgstr "" +#: cinder/backup/drivers/swift.py:234 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "La richiesta non è valida." -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/swift.py:248 #, python-format -msgid "Error in confirm-resize %s" +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/swift.py:278 #, python-format -msgid "Error in revert-resize %s" +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Error in reboot %s" +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/backup/drivers/tsm.py:85 #, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pause %s" +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/backup/drivers/tsm.py:143 #, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::unpause %s" +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/backup/drivers/tsm.py:173 #, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::suspend %s" +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/backup/drivers/tsm.py:199 #, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/backup/drivers/tsm.py:206 #, python-format -msgid "Error in migrate %s" +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/backup/drivers/tsm.py:213 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "Compute.api::reset_network %s" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/backup/drivers/tsm.py:260 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/backup/drivers/tsm.py:286 #, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::lock %s" +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/backup/drivers/tsm.py:298 #, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::unlock %s" +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/backup/drivers/tsm.py:308 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/backup/drivers/tsm.py:421 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#: cinder/backup/drivers/tsm.py:432 #, python-format msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/exception.py:55 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 -#, python-format -msgid "Cannot update aggregate: %(id)s" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 -#, python-format -msgid "Cannot delete aggregate: %(id)s" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/exception.py:93 #, python-format -msgid "Aggregates does not have %s action" +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/exception.py:97 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/brick/exception.py:101 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/exception.py:105 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 -msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 -#, python-format -msgid "No more floating ips in pool %s." +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "Invalid status: '%s'" +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "Invalid mode: '%s'" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Invalid update setting: '%s'" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/initiator/connector.py:834 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/initiator/linuxscsi.py:145 #, python-format -msgid "Key pair '%s' already exists." +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "Unable to find address %r" +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/iscsi/iscsi.py:177 #, python-format -msgid "Network does not have %s action" +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/iscsi/iscsi.py:184 #, python-format -msgid "Disassociating network with id %s" +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Showing network with id %s" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Deleting network with id %s" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#: cinder/brick/iscsi/iscsi.py:489 #, python-format -msgid "Security group %s already exists" +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 #, python-format -msgid "Security group %s is not a string or unicode" +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/iscsi/iscsi.py:532 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Riavviando l'istanza %s" + +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Security group %s cannot be empty." +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#: cinder/brick/iscsi/iscsi.py:571 #, python-format -msgid "Security group %s should not be greater than 255 characters." +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Security group (%s) not found" +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "This rule already exists in group %s" +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Impossibile smontare il volume %s" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +#: cinder/brick/local_dev/lvm.py:489 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Impossibile smontare il volume %s" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Rule (%s) not found" +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 -#, python-format -msgid "start instance %r" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 -#, python-format -msgid "stop instance %r" +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "vol=%s" +msgid "Already mounted: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 -#, python-format -msgid "Delete volume with id: %s" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 -#, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 -#, python-format -msgid "Delete snapshot with id: %s" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/compute/nova.py:97 #, python-format -msgid "Create snapshot from volume %s" +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/auth/ldapdriver.py:650 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/auth/manager.py:298 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "Looking up user: %r" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/auth/manager.py:302 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Failed authorization for access key %s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Using project name = user name (%s)" +msgid "No backup with id %s" msgstr "" -#: cinder/auth/manager.py:315 +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/auth/manager.py:324 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 -#, python-format -msgid "expected_signature: %s" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/auth/manager.py:353 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "host_only_signature: %s" +msgid "Table |%s| not created!" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/auth/manager.py:519 -#, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/auth/manager.py:522 -#, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/auth/manager.py:595 -#, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:613 -#, python-format -msgid "modifying project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" msgstr "" -#: cinder/auth/manager.py:676 -#, python-format -msgid "Deleting project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/auth/manager.py:734 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgid "Table |%s| not created" msgstr "" -#: cinder/auth/manager.py:743 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Deleting user %s" +msgid "Exception while dropping table %s." msgstr "" -#: cinder/auth/manager.py:753 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "Access Key change for user %s" +msgid "Exception while creating table %s." msgstr "" -#: cinder/auth/manager.py:755 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "Secret Key change for user %s" +msgid "Column |%s| not created!" msgstr "" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -msgid "Instance type for vpn instances" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/cloudpipe/pipelib.py:107 -#, python-format -msgid "Launching VPN for %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/compute/api.py:144 -#, python-format -msgid "Unable to find host for Instance %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/compute/api.py:192 -#, python-format +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/compute/api.py:203 -#, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/compute/api.py:259 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/api.py:261 +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/api.py:312 +#: cinder/image/image_utils.py:157 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:383 +#: cinder/image/image_utils.py:178 #, python-format -msgid "Going to run %s instances..." +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/api.py:447 +#: cinder/image/image_utils.py:206 #, python-format -msgid "bdm %s" +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/image/image_utils.py:224 #, python-format -msgid "block_device_mapping %s" +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/image/image_utils.py:260 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/api.py:871 -msgid "Going to try to soft delete instance" +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/compute/api.py:939 -msgid "Going to try to terminate instance" +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" msgstr "" -#: cinder/compute/api.py:977 -#, fuzzy -msgid "Going to try to stop instance" -msgstr "Impossibile riavviare l'istanza" - -#: cinder/compute/api.py:996 -#, fuzzy -msgid "Going to try to start instance" -msgstr "Impossibile riavviare l'istanza" - -#: cinder/compute/api.py:1000 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/openstack/common/excutils.py:48 #, python-format -msgid "Searching by: %s" +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "Image type not recognized %s" +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/api.py:1377 -#, python-format +#: cinder/openstack/common/gettextutils.py:261 msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" -msgstr "" - -#: cinder/compute/api.py:1644 -#, python-format -msgid "multiple fixedips exist, using the first: %s" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "DB error: %s" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "Instance type %s not found for deletion" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:138 -#, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: decorazione: |%s|" - -#: cinder/compute/manager.py:140 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:144 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: bloccato: |%s|" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" -#: cinder/compute/manager.py:146 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: admin: |%s|" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" -#: cinder/compute/manager.py:151 -#, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: esecuzione: |%s|" +#: cinder/openstack/common/log.py:301 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "ricevuto %s" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "check_instance_lock: non esecuzione |%s|" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/log.py:623 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "Setting up bdm %s" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "Instance %s not found." +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" -msgstr "L'istanza é stata già creata" - -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/openstack/common/policy.py:149 #, python-format msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" -msgstr "" - -#: cinder/compute/manager.py:538 -msgid "Starting instance..." -msgstr "" - -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:565 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Instance network_info: |%s|" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" -msgstr "" +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Esecuzione del comando (sottoprocesso): %s" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "%(action_str)s instance" -msgstr "" +msgid "Result was %s" +msgstr "Il risultato é %s" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "terminating bdm %s" +msgid "Running cmd (SSH): %s" +msgstr "Eseguendo cmd (SSH): %s" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 -#, python-format -msgid "%s. Setting instance vm_state to ERROR" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 #, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:816 -#, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:823 -#, python-format -msgid "Rebuilding instance %s" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:876 -#, python-format -msgid "Rebooting instance %s" -msgstr "Riavviando l'istanza %s" +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Unhandled exception" +msgstr "Eccezione interna: %s" -#: cinder/compute/manager.py:891 -#, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "instance %s: snapshotting" -msgstr "istanza %s: creazione snapshot in corso" +msgid "Started child %d" +msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/service.py:337 #, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Starting %d workers" msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "Rotating out %d backups" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "Deleting image %s" +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "Failed to set admin password. Instance %s is not running" +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "Instance %s: Root password set" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:1079 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/versionutils.py:69 #, python-format msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:1116 -#, python-format -msgid "instance %s: rescuing" +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "instance %s: unrescuing" -msgstr "" - -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "instance %s: migrating" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1471 -#, python-format -msgid "instance %s: pausing" -msgstr "istanza %s: in pausa" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" -#: cinder/compute/manager.py:1489 -#, python-format -msgid "instance %s: unpausing" -msgstr "istanza %s: fuori pausa" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "istanza %s: ricezione diagnostiche" +msgid "Got mysql server has gone away: %s" +msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "instance %s: suspending" -msgstr "istanza %s: sospensione in corso" +msgid "SQL connection failed. %s attempts left." +msgstr "" -#: cinder/compute/manager.py:1556 -#, python-format -msgid "instance %s: resuming" -msgstr "istanza %s: ripristino" +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "instance %s: locking" -msgstr "istanza %s: bloccato" +msgid "%s not in valid priorities" +msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/notifier/api.py:145 #, python-format -msgid "instance %s: unlocking" -msgstr "istanza %s: sbloccato" +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "instance %s: getting locked state" +msgid "Failed to load notifier %s. These notifications will not be sent." msgstr "" -#: cinder/compute/manager.py:1606 -#, python-format -msgid "instance %s: reset network" -msgstr "istanza %s: ripristino rete" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "instance %s: inject network info" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format -msgid "network_info to inject: |%s|" +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:1655 -#, python-format -msgid "instance %s: getting vnc console" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" -msgstr "" +msgid "unpacked context: %s" +msgstr "contesto decompresso: %s" -#: cinder/compute/manager.py:1714 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "Attach failed %(mountpoint)s, removing" -msgstr "" +msgid "received %s" +msgstr "ricevuto %s" -#: cinder/compute/manager.py:1752 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" -msgstr "" +msgid "no method for message: %s" +msgstr "nessun metodo per il messaggio: %s" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "" +msgid "No method for message: %s" +msgstr "nessun metodo per il messagggio: %s" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "Pre live migration failed at %(dest)s" -msgstr "" +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." -msgstr "" - -#: cinder/compute/manager.py:2075 msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/common.py:156 #, python-format msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" -msgstr "" +msgid "Returning exception %s to caller" +msgstr "Sollevando eccezione %s al chiamante" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" -msgstr "" - -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/console/manager.py:97 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/console/vmrc_manager.py:122 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "Removing console %(console_id)s." +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/console/xvp.py:116 -#, python-format -msgid "Re-wrote %s" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "Error starting xvp: %s" +msgid "Deserializing: %s" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/consoleauth/manager.py:75 -#, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +msgid "Subscribing to %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/db/sqlalchemy/api.py:198 -#, python-format -msgid "Unrecognized read_deleted value '%s'" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 -#, python-format -msgid "No ComputeNode for %(host)s" -msgstr "" +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "contesto decompresso: %s" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 -#, python-format -msgid "No backend config with id %(sm_backend_id)s" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 -#, python-format -msgid "No sm_flavor called %(sm_flavor)s" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 -#, python-format -msgid "No sm_volume with id %(volume_id)s" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "SQL connection failed. %s attempts left." +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 -#, python-format -msgid "Table |%s| not created!" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "join list for moving mac_addresses |%s|" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "ricevuto %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/impl_zmq.py:698 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "La richiesta non è valida." + +#: cinder/openstack/common/rpc/impl_zmq.py:721 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "%(msg)s" msgstr "" -#: cinder/image/glance.py:281 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/image/glance.py:289 -#, python-format -msgid "Metadata returned from Glance formatted for Base %s" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/image/glance.py:410 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "topic is %s." msgstr "" -#: cinder/image/s3.py:309 +#: cinder/openstack/common/rpc/impl_zmq.py:815 #, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/image/s3.py:328 -#, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." msgstr "" -#: cinder/image/s3.py:353 +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/image/s3.py:379 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 -#, python-format -msgid "Bad prefix for to_global_ipv6: %s" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/ipv6/account_identifier.py:42 -#, python-format -msgid "Bad project_id for to_global_ipv6: %s" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/scheduler/filter_scheduler.py:114 #, python-format -msgid "Unknown chain: %r" +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/network/linux_net.py:694 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/network/linux_net.py:696 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "killing radvd threw %s" +msgid "Filtered %s" msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/scheduler/filter_scheduler.py:276 #, python-format -msgid "Pid %d is stale, relaunching radvd" +msgid "Choosing %s" msgstr "" -#: cinder/network/linux_net.py:967 -#, python-format -msgid "Starting VLAN inteface %s" -msgstr "Avviando l'interfaccia VLAN %s" - -#: cinder/network/linux_net.py:999 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Starting Bridge interface for %s" -msgstr "Avviando l'interfaccia Bridge per %s" - -#: cinder/network/linux_net.py:1142 -#, fuzzy, python-format -msgid "Starting bridge %s " -msgstr "Avviando l'interfaccia Bridge per %s" +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" -#: cinder/network/linux_net.py:1149 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "Done starting bridge %s" +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/linux_net.py:1167 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "Failed unplugging gateway interface '%s'" +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/network/linux_net.py:1170 -#, python-format -msgid "Unplugged gateway interface '%s'" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/manager.py:291 -#, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 -#, python-format -msgid "Interface %(interface)s not found" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/manager.py:315 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "floating IP allocation for instance |%s|" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/manager.py:353 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "floating IP deallocation for instance |%s|" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/network/manager.py:402 -#, python-format -msgid "Quota exceeded for %s, tried to allocate address" +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/network/manager.py:614 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/network/manager.py:660 -#, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/network/manager.py:778 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/network/manager.py:896 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "network allocations for instance |%s|" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/network/manager.py:901 -#, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "network deallocation for instance |%s|" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Faked command matched %s" msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "Leased IP |%(address)s|" +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "IP %s leased that is not associated" +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/manager.py:1256 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "IP |%s| leased that isn't allocated" +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/tests/test_netapp_nfs.py:360 #, python-format -msgid "Released IP |%(address)s|" +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/manager.py:1265 -#, python-format -msgid "IP %s released that is not associated" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/manager.py:1268 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "IP %s released that was not leased" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "Result data: %s" msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/tests/api/contrib/test_backups.py:741 +#, fuzzy +msgid "Invalid input" +msgstr "E' stato ricevuto un input non valido" + +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "volume %s: rimuovendo" + +#: cinder/tests/integrated/api/client.py:32 #, python-format msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/network/quantum/client.py:180 -#, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "Quantum entity not found: %s" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "Body: %s" msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/tests/integrated/api/client.py:121 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "unable to deserialize object of type = '%s'" -msgstr "" - -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." -msgstr "" +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "Impossibile localizzare il volume %s" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/manager.py:301 +#: cinder/transfer/api.py:182 #, python-format -msgid "network allocations for instance %s" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/transfer/api.py:199 #, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +msgid "Volume %s has been transferred." msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/volume/api.py:143 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 -#, python-format -msgid "Server returned error: %s" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/api.py:214 #, python-format -msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 #, python-format -msgid "No network with net_id = %s" +msgid "Searching by: %s" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 -#, python-format -msgid "No fixed IPs to deallocate for vif %s" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 -#, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 -#, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/api.py:490 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/notifier/list_notifier.py:65 +#: cinder/volume/api.py:502 #, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/rpc/amqp.py:146 -#, python-format -msgid "Returning exception %s to caller" -msgstr "Sollevando eccezione %s al chiamante" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" -msgstr "contesto decompresso: %s" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" -#: cinder/rpc/amqp.py:231 -#, python-format -msgid "received %s" -msgstr "ricevuto %s" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Lo stato del volume deve essere disponibile" -#: cinder/rpc/amqp.py:236 -#, python-format -msgid "no method for message: %s" -msgstr "nessun metodo per il messaggio: %s" +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" -#: cinder/rpc/amqp.py:237 -#, python-format -msgid "No method for message: %s" -msgstr "nessun metodo per il messagggio: %s" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" -#: cinder/rpc/amqp.py:321 +#: cinder/volume/api.py:757 #, python-format -msgid "Making asynchronous call on %s ..." +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/rpc/amqp.py:324 -#, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID é %s" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/api.py:797 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "No available service named %s" msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/rpc/amqp.py:379 -#, python-format -msgid "Sending notification on %s..." +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/api.py:842 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/api.py:868 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/api.py:874 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/api.py:887 #, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/api.py:900 #, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/driver.py:282 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/driver.py:327 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/driver.py:340 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/rpc/impl_qpid.py:346 +#: cinder/volume/driver.py:358 #, python-format -msgid "Connected to AMQP server on %s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/scheduler/driver.py:63 -#, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/volume/driver.py:546 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/scheduler/driver.py:89 +#: cinder/volume/driver.py:548 #, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/manager.py:228 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/manager.py:235 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/manager.py:244 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +msgid "Re-exporting %s volumes" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/volume/manager.py:257 #, python-format -msgid "No host selection for %s defined." +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/manager.py:264 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/manager.py:273 #, python-format -msgid "Filtered %(hosts)s" +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 -#, python-format -msgid "Weighted %(weighted_host)s" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/manager.py:286 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: rimuovendo" + +#: cinder/volume/manager.py:380 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Volume ancora collegato" + +#: cinder/volume/manager.py:389 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "volume %s: removing export" msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/manager.py:394 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 #, python-format -msgid "Host filter passes for %(host)s" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/manager.py:430 #, python-format -msgid "Received %(service_name)s service update from %(host)s." +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/manager.py:462 #, python-format -msgid "No service for compute ID %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/manager.py:490 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/manager.py:496 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "snapshot %s: deleting" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/manager.py:526 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/manager.py:559 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/tests/fake_utils.py:72 -#, python-format -msgid "Faking execution of cmd (subprocess): %s" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/manager.py:698 #, python-format -msgid "Faked command matched %s" +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:760 #, python-format -msgid "Faked command raised an exception %s" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:807 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:880 #, python-format -msgid "Running instances: %s" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/manager.py:909 #, python-format -msgid "After terminating instances: %s" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:940 #, python-format -msgid "After force-killing instances: %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:976 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Target %s allocated" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/tests/test_volume_types.py:58 -#, python-format -msgid "Given data: %s" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/tests/test_volume_types.py:59 -#, python-format -msgid "Result data: %s" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "Quota exceeded: code=%(code)s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "_create: %s" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "_delete: %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "_get: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "_get_all: %s" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "flavor: %s" +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/tests/integrated/api/client.py:107 -#, python-format -msgid "Body: %s" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/tests/integrated/api/client.py:151 -#, python-format -msgid "%(relative_uri)s => code %(http_status)s" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/tests/integrated/api/client.py:168 -#, python-format -msgid "Decoding JSON: %s" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Nested received %(queue)s, %(value)s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Nested return %s" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "Received %s" +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" -msgstr "Fallita l'apertura della connessione verso l'hypervisor" - -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "Compute_service record created for %s " +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Compute_service record updated for %s " +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Filters added to instance %s" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "Adding security group rule: %r" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Adding provider rule: %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/eqlx.py:348 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Got exception: %s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "No such domain (%s)" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/dom.py:134 -#, python-format -msgid "Failed power down Bare-metal node %s" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "No such domain %s" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Domains: %s" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Nodes: %s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "After storing domains: %s" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Created new domain: %s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:226 -#, python-format -msgid "change_domain_state: to new state %s" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/glusterfs.py:608 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "No base file found for %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "instance %s: rebooted" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "instance %s: rescued" +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "instance %s: is building" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "instance %s: booted" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "instance %s spawned successfully" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "instance %s:not booted" +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "instance %s: Creating image" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "file already exists at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "instance %s: starting toXML method" +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "instance %s: finished toXML method" +msgid "Available shares: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "#### RLK: cpu_arch = %s " -msgstr "" - -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:216 -#, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "%s is not a directory." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "unknown disk image handler: %s" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Failed to remove container: %s" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "User %(username)s not found in password file." +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "unsupported partition: %s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "" - -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +msgid "%s" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "Failed to map partitions: %s" +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "qemu-nbd error: %s" +msgid " but size is now %d" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/nfs.py:361 #, python-format -msgid "nbd device %s did not show up" +msgid "%s is already mounted" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "Connecting to libvirt: %s" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Deleting instance files %(target)s" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -#, fuzzy, python-format -msgid "Instance soft rebooted successfully." -msgstr "volume %s: creato con successo" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" -#: cinder/virt/libvirt/connection.py:696 -#, fuzzy -msgid "Failed to soft reboot instance." -msgstr "Impossibile riavviare l'istanza" +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Automatically confirming migration %d" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -#, fuzzy -msgid "Guest does not have a console available" -msgstr "L'utente non ha i privilegi dell'amministratore" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "block_device_list %s" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/rbd.py:625 #, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "'' must be 1, but %d\n" +msgid "connection data: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 -#, python-format -msgid "topology (%(topology)s) must have %(ks)s" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 -#, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" -msgstr "" +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "istanza %s: creazione snapshot in corso" -#: cinder/virt/libvirt/connection.py:2136 -#, python-format -msgid "Timeout migrating for %s. nwfilter not found." -msgstr "" +#: cinder/volume/drivers/rbd.py:724 +#, fuzzy, python-format +msgid "not cloneable: %s" +msgstr "risposta %s" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 -#, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 -#, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" -msgstr "" +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "Impossibile localizzare il volume %s" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 -#, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "%s is a valid instance name" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "%s has a disk file" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Base file too young to remove: %s" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Removing base file: %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "risposta %s" + +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 -#, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Unknown base file: %s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Active base files: %s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "Corrupt base files: %s" +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Removable base files: %s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/libvirt/vif.py:90 -#, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/libvirt/vif.py:99 -#, python-format -msgid "Ensuring bridge %s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 -#, python-format -msgid "Failed while unplugging vif of instance '%s'" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/libvirt/volume.py:166 -#, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +#: cinder/volume/drivers/solidfire.py:673 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Impossibile sospendere l'istanza" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/zadara.py:357 #, python-format -msgid "%(text)s: _db_content => %(content)s" +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Glance image %s is in killed state" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 +#: cinder/volume/drivers/emc/emc_smis_common.py:40 msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 -#, python-format -msgid "Creating Port Group with name %s on the ESX host" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "Create Volume: %(volume)s Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" -msgstr "" - -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "httplib error in %s: " +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Socket error in %s: " +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "Type error in %s: " +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Exception in %s " +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Got total of %s instances" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 -#, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Powered on the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Uploading image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Uploaded image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format -msgid "Deleted temporary vmdk file %s" +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Did hard reboot of VM %s" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "instance - %s not present" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Powering off the VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Powered off the VM %s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 -#, python-format -msgid "Unregistering the VM %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 #, python-format -msgid "Unregistered the VM %s" +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Suspending the VM %s " +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Suspended the VM %s " +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "Resuming the VM %s" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "Resumed the VM %s " -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "Creating directory with path %s" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 #, python-format -msgid "Created directory with path %s" +msgid "Create export: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "Downloading image %s from glance image server" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:674 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "Error mapping volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" -msgstr "Sollevando NotImplemented" +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/fake.py:589 -#, python-format -msgid "Calling %(localname)s %(impl)s" -msgstr "Chiamando %(localname)s %(impl)s" - -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Calling getter %s" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Found no network for bridge %s" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:143 -#, python-format -msgid "Pool-Join failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/pool.py:146 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "Unable to join %(host)s in the pool" +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:162 -#, python-format -msgid "Pool-eject failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "Impossibile smontare il volume %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/virt/xenapi/vif.py:103 -#, python-format -msgid "Found no PIF for device %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Riconnesso alla coda" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "VBD not found in instance %s" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "VBD %s already detached" +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "Snapshotting with label '%(label)s'" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "Created snapshot %(template_vm_ref)s" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "download_vhd failed: %r" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 -#, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 -#, python-format -msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" +msgid "Add target WWN: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" -msgstr "Kernel/Ramdisk VDI %s distrutti" +msgid "Target WWNs: %s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -#, fuzzy -msgid "Failed to fetch glance image" -msgstr "Impossibile riavviare l'istanza" +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "Cercando vdi %s per kernel PV" +msgid "Found iSCSI endpoint: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "VDI %s is still available" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "Re-scanning SR %s" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/hds/hds.py:178 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "LUN %s is deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "Plugging VBD %s ... " +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Plugging VBD %s done." +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 -#, python-format -msgid "Destroying VBD for VDI %s ... " +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Destroying VBD for VDI %s done." +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Running pygrub against %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Found Xen kernel %s" +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "Writing partition table %s done." +msgid "Bad response from server: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 -#, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -#, fuzzy, python-format -msgid "Starting instance" -msgstr "Riavviando l'istanza %s" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -#, fuzzy -msgid "Failed to spawn, rolling back" -msgstr "Impossibile sospendere l'istanza" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Injecting file path: '%s'" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Instance agent version: %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "Updating Agent to %s" +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:686 -#, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "Impossibile montare il volume all'istanza %s" - -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 -#, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/ssh_common.py:792 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 +#: cinder/volume/drivers/huawei/ssh_common.py:933 #, python-format -msgid "Creating VIF for network %(network_ref)s" +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/huawei/ssh_common.py:1079 #, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 -#, python-format -msgid "OpenSSL error: %s" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "type is = %s" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "name = %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Failed getting details for pool %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 -#, python-format -msgid "Forgetting SR %s..." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Introducing %s..." +msgid "%s is not set" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 -#, python-format -msgid "Unable to find SR from VBD %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 -#, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "ensure_export: Volume %s not found on storage" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 -#, python-format -msgid "Unable to get record of VDI %s on" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 -#, python-format -msgid "Error finding vdis in SR %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 #, python-format -msgid "Creating SR %s" +msgid "initialize_connection: Did not find a preferred node for volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "Introducing SR %s" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "Checking for SR %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 -#, python-format -msgid "SR %s not found in the xapi database" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 -#, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" -msgstr "Impossible creare il VDI su SR %(sr_ref)s per l'istanza %(instance_name)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "Impossibile usare SR %(sr_ref)s per l'istanza %(instance_name)s" +msgid "leave: extend_volume: volume %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "Impossibile montare il volume all'istanza %s" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -msgstr "Mountpoint %(mountpoint)s montato all'istanza %(instance_name)s" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "Unable to locate volume %s" -msgstr "Impossibile localizzare il volume %s" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "Unable to detach volume %s" -msgstr "Impossibile smontare il volume %s" +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "Unable to destroy vbd %s" +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Error purging SR %s" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" -msgstr "Mountpoint %(mountpoint)s smontato dall'istanza %(instance_name)s" +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "Error in handshake: %s" +msgid "Failed to find host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "Invalid request: %s" +msgid "enter: get_host_from_connector: %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "Request: %s" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Request made with missing token: %s" +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Request made with invalid token: %s" +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "Unexpected error: %s" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "Lo stato del volume deve essere disponibile" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" msgstr "" -#: cinder/volume/api.py:325 -#, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "Lo stato del volume deve essere disponibile" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 #, python-format -msgid "volume group %s doesn't exist" +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 #, python-format -msgid "Could not find iSCSI export for volume %s" +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "rbd has no pool %s" +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "Sheepdog is not working: %s" +msgid "enter: delete_vdisk: vdisk %s" msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "leave: delete_vdisk: vdisk %s" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 #, python-format -msgid "Re-exporting %s volumes" +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 #, python-format -msgid "volume %s: skipping export" +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 #, python-format -msgid "volume %s: creating" -msgstr "volume %s: creazione in corso" +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 #, python-format -msgid "volume %s: creating export" -msgstr "volume %s: creazione in corso per l'esportazione" +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 #, python-format -msgid "volume %s: created successfully" -msgstr "volume %s: creato con successo" +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" -msgstr "Volume ancora collegato" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "volume %s: removing export" +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 #, python-format -msgid "volume %s: deleting" -msgstr "volume %s: rimuovendo" +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/netapp/api.py:419 #, python-format -msgid "volume %s: volume is busy" +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/netapp/common.py:103 #, python-format -msgid "volume %s: deleted successfully" -msgstr "volume %s: rimosso con successo" +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/netapp/common.py:109 #, python-format -msgid "snapshot %s: creating" +msgid "Storage family %s is not supported" msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/netapp/common.py:116 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "No default storage protocol found for storage family %(storage_family)s" msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/netapp/common.py:123 #, python-format -msgid "snapshot %s: created successfully" +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/netapp/common.py:130 #, python-format -msgid "snapshot %s: deleting" +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" msgstr "" -#: cinder/volume/manager.py:214 -#, fuzzy, python-format -msgid "snapshot %s: snapshot is busy" -msgstr "istanza %s: creazione snapshot in corso" +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/netapp/common.py:158 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/netapp/iscsi.py:105 #, python-format -msgid "New capabilities found: %s" +msgid "Using NetApp filer: %s" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/netapp/iscsi.py:166 #, python-format -msgid "Notification {%s} received" +msgid "Created LUN with name %s" msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/netapp/iscsi.py:175 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/netapp/iscsi.py:191 #, python-format -msgid "%s is not set" +msgid "Destroyed LUN %s" msgstr "" -#: cinder/volume/netapp.py:128 -#, fuzzy -msgid "Connected to DFM server" -msgstr "Riconnesso alla coda" +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/netapp/iscsi.py:238 #, python-format -msgid "Job failed: %s" +msgid "Failed to get LUN target details for the LUN %s" msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#: cinder/volume/drivers/netapp/iscsi.py:290 #, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "Impossibile localizzare il volume %s" +msgid "Snapshot %s deletion successful" +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 #, fuzzy, python-format -msgid "No LUN ID for volume %s" +msgid "Failed to get vol with required size for volume: %s" msgstr "Impossibile localizzare il volume %s" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "" +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "risposta %s" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "Lo stato del volume deve essere disponibile" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "volume %s: creazione in corso per l'esportazione" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "volume %s: creazione in corso per l'esportazione" + +#~ msgid "volume %s: creating from image" +#~ msgstr "volume %s: creazione in corso" + +#~ msgid "volume %s: creating" +#~ msgstr "volume %s: creazione in corso" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "volume %s: creazione in corso per l'esportazione" + +#~ msgid "volume %s: create failed" +#~ msgstr "volume %s: creazione in corso" + +#~ msgid "volume %s: created successfully" +#~ msgstr "volume %s: creato con successo" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "Prelievo %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "Impossibile smontare il volume %s" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "Impossibile localizzare il volume %s" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "Impossibile smontare il volume %s" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "risposta %s" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "Impossibile localizzare il volume %s" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "Impossibile localizzare il volume %s" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "Impossibile riavviare l'istanza" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:614 -#, python-format -msgid "Failed to get host details for host ID %s" -msgstr "" +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:620 -#, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "" +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" -#: cinder/volume/netapp.py:625 -#, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "" +#~ msgid "Connection to glance failed" +#~ msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#~ msgid "Invalid snapshot" +#~ msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Invalid input received" +#~ msgstr "E' stato ricevuto un input non valido" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Invalid volume type" +#~ msgstr "Tipo del volume non valido" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "Volume non valido" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "E' stato ricevuto un input non valido" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "debug in callback: %s" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, fuzzy, python-format -msgid "Got response: %s" -msgstr "risposta %s" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "The request is invalid." +#~ msgstr "La richiesta non è valida." + +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" -#~ msgstr "Provando a distruggere una istanza già distrutta: %s" +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" -#~ msgstr "Inizializzando il Consumer Adapter per %s" +#~ msgid "Create luncopy error." +#~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "topic is %s" -#~ msgstr "argomento é %s" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" -#~ msgid "message %s" -#~ msgstr "messaggio %s" +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "Publishing to route %s" -#~ msgstr "Pubblicando sulla route %s" +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" -#~ msgid "Declaring queue %s" -#~ msgstr "Dichiarando la coda %s" +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" -#~ msgid "Declaring exchange %s" -#~ msgstr "Dichiarando il centralino %s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgid "rbd export-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "Created VM %s..." -#~ msgstr "Creata VM %s.." +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." -#~ msgstr "Creata VM %(instance_name)s come %(vm_ref)s" +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Can't find lun or lun goup in array" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " +#~ msgid "migrate_volume: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" -#~ msgstr "Istanza %s: esecuzione fallita..." +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "_wait_child %d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "wait wrap.failed %s" #~ msgstr "" #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "volume %s mapping to multi host" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/it_IT/LC_MESSAGES/cinder.po b/cinder/locale/it_IT/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..7683b81c99 --- /dev/null +++ b/cinder/locale/it_IT/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Italian (Italy) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Italian (Italy) " +"(http://www.transifex.com/projects/p/openstack/language/it_IT/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/ja/LC_MESSAGES/cinder.po b/cinder/locale/ja/LC_MESSAGES/cinder.po index c81827f250..cc930823d2 100644 --- a/cinder/locale/ja/LC_MESSAGES/cinder.po +++ b/cinder/locale/ja/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2011-08-23 11:22+0000\n" "Last-Translator: Thierry Carrez \n" "Language-Team: \n" @@ -15,8182 +15,10747 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "ルートCAのファイル名" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "プライベートキーのファイル名" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" -msgstr "" - -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "キーを格納するパス" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "ルートCAを格納するパス" - -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "プロジェクトごとにCAを使用するか否かのフラグ" - -#: cinder/crypto.py:67 -#, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" -msgstr "ユーザの証明書のサブジェクト、%s はプロジェクト、ユーザ、タイムスタンプ" - -#: cinder/crypto.py:72 -#, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" -msgstr "プロジェクトの証明書のサブジェクト、%s はプロジェクト、およびタイムスタンプ" - -#: cinder/crypto.py:292 -#, python-format -msgid "Flags path: %s" -msgstr "Flags のパス: %s" - -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "コマンド実行において予期しないエラーが発生しました。" - -#: cinder/exception.py:59 -#, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" -msgstr "" -"%(description)s\n" -"コマンド: %(cmd)s\n" -"終了コード: %(exit_code)s\n" -"標準出力: %(stdout)r\n" -"標準エラー出力: %(stderr)r" - -#: cinder/exception.py:94 -msgid "DB exception wrapped." -msgstr "" - -#: cinder/exception.py:155 +#: cinder/exception.py:66 cinder/brick/exception.py:33 msgid "An unknown exception occurred." msgstr "" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" -msgstr "" - -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:195 -msgid "Connection to glance failed" +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" msgstr "" -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "" -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, fuzzy, python-format msgid "Not authorized for image %(image_id)s." msgstr "メッセージ %s に対するメソッドが存在しません。" -#: cinder/exception.py:220 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "" -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:154 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:237 +#: cinder/exception.py:163 msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:167 +msgid "The results are invalid." msgstr "" -#: cinder/exception.py:245 +#: cinder/exception.py:171 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:253 +#: cinder/exception.py:179 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Invalid volume: %(reason)s" msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "%(err)s" msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:197 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:201 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:277 -#, python-format -msgid "Invalid cidr %(cidr)s." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:218 #, python-format -msgid "%(err)s" +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:228 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Volume %(volume_id)s could not be found." msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:232 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:237 #, python-format msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:242 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:246 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:250 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" -#: cinder/exception.py:334 -#, fuzzy -msgid "Failed to terminate instance" -msgstr "インスタンス終了処理を開始します。" - -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:282 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:303 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:377 +#: cinder/exception.py:307 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:381 +#: cinder/exception.py:311 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:389 +#: cinder/exception.py:319 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:323 #, python-format msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:332 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:336 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:340 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:344 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:348 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:422 -msgid "Resource could not be found." +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:427 +#: cinder/exception.py:356 #, python-format -msgid "Required flag %(flag)s not set." +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:365 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "グループ %s は既に存在しています。" + +#: cinder/exception.py:369 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:373 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Malformed message body: %(reason)s" msgstr "" -#: cinder/exception.py:440 +#: cinder/exception.py:377 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Could not find config at %(path)s" msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/exception.py:385 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:398 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:457 +#: cinder/exception.py:402 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:409 #, python-format msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:415 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:419 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:475 +#: cinder/exception.py:423 #, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:427 #, python-format -msgid "No target id found for volume %(volume_id)s." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:484 +#: cinder/exception.py:432 #, python-format -msgid "No disk at %(location)s" +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:436 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:440 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" -#: cinder/exception.py:496 +#: cinder/exception.py:444 +#, python-format msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" -#: cinder/exception.py:501 +#: cinder/exception.py:449 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" -#: cinder/exception.py:505 +#: cinder/exception.py:453 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Failed to export for volume: %(reason)s" msgstr "" -#: cinder/exception.py:509 +#: cinder/exception.py:457 #, python-format -msgid "User %(user_id)s could not be found." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:513 +#: cinder/exception.py:461 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:517 +#: cinder/exception.py:465 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:521 -#, python-format -msgid "Role %(role_id)s could not be found." -msgstr "" +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "ボリューム %s の存在が確認できません。" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:529 -#, python-format -msgid "%(req)s is required to create a network." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:533 -#, python-format -msgid "Network %(network_id)s could not be found." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:485 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:541 -#, python-format -msgid "Network could not be found for uuid %(uuid)s" +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:545 -#, python-format -msgid "Network could not be found with cidr %(cidr)s." -msgstr "" +#: cinder/exception.py:493 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "不正なバックエンドです: %s" -#: cinder/exception.py:549 +#: cinder/exception.py:497 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "Connection to swift failed: %(reason)s" msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." msgstr "" -#: cinder/exception.py:557 +#: cinder/exception.py:505 #, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" -#: cinder/exception.py:562 +#: cinder/exception.py:509 #, python-format -msgid "Host is not set to the network (%(network_id)s)." +msgid "SSH command injection detected: %(command)s" msgstr "" -#: cinder/exception.py:566 +#: cinder/exception.py:513 #, python-format -msgid "Network %(network)s has active ports, cannot delete." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:522 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:527 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:531 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:536 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:541 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:546 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:599 +#: cinder/exception.py:550 #, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:554 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:608 -#, python-format -msgid "Fixed IP address %(address)s is invalid." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:620 -#, python-format -msgid "Floating ip not found for id %(id)s." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:576 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:580 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:584 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." -msgstr "" +#: cinder/exception.py:593 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "インスタンス %(instance_name)s 用のSR %(sr_ref)s における VDI を作成できません" -#: cinder/exception.py:644 +#: cinder/exception.py:597 #, python-format -msgid "Floating ip %(address)s is not associated." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:652 +#: cinder/exception.py:605 #, python-format -msgid "Interface %(interface)s not found." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:656 +#: cinder/exception.py:609 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:613 #, python-format -msgid "Certificate %(certificate_id)s not found." +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:668 -#, python-format -msgid "Host %(host)s could not be found." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:672 +#: cinder/exception.py:626 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:676 +#: cinder/exception.py:630 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:680 +#: cinder/exception.py:636 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:684 -#, python-format -msgid "Access Key %(access_key)s could not be found." +#: cinder/exception.py:641 +msgid "Unknown NFS exception" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:692 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Quota for project %(project_id)s could not be found." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:696 -#, python-format -msgid "Quota class %(class_name)s could not be found." +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" msgstr "" -#: cinder/exception.py:700 -#, python-format -msgid "Security group %(security_group_id)s not found." +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:704 -#, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:709 -#, python-format -msgid "Security group with rule %(rule_id)s not found." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:713 -#, python-format -msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:718 +#: cinder/quota.py:105 #, python-format msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:723 +#: cinder/quota.py:748 #, python-format -msgid "Migration %(migration_id)s could not be found." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:727 +#: cinder/quota.py:770 #, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:732 +#: cinder/quota.py:790 #, python-format -msgid "Console pool %(pool_id)s could not be found." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:736 -#, python-format -msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:742 -#, python-format -msgid "Console %(console_id)s could not be found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:746 +#: cinder/quota_utils.py:46 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:750 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:755 -#, python-format -msgid "Invalid console type %(console_type)s " -msgstr "" +#: cinder/service.py:95 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "%(topic)s ノードを開始しています (バージョン %(vcs_string)s)" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:763 +#: cinder/service.py:148 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:767 +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "データベースにエントリの存在しないサービスを終了します。" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "サービスデータベースオブジェクトが消滅しました。再作成します。" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "モデルサーバへの接続を復旧しました。" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "モデルサーバが消滅しました。" + +#: cinder/service.py:298 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:772 -#, python-format -msgid "Flavor %(flavor_id)s could not be found." +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:776 +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +#, fuzzy +msgid "Full set of CONF:" +msgstr "FLAGSの一覧:" + +#: cinder/service.py:387 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:780 +#: cinder/utils.py:96 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:784 +#: cinder/utils.py:127 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:789 -#, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:793 +#: cinder/utils.py:228 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "libvirt %s へ接続します。" + +#: cinder/utils.py:412 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." -msgstr "" +msgid "Invalid backend: %s" +msgstr "不正なバックエンドです: %s" -#: cinder/exception.py:798 +#: cinder/utils.py:423 #, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" +msgid "backend %s" +msgstr "バックエンドは %s です。" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:807 +#: cinder/utils.py:759 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:811 -#, python-format -msgid "LDAP group %(group_id)s could not be found." +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:815 +#: cinder/wsgi.py:169 #, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:819 +#: cinder/wsgi.py:206 #, python-format -msgid "File %(file_path)s could not be found." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:827 -#, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:832 -#, python-format -msgid "Network adapter %(adapter)s could not be found." -msgstr "" +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "__call__ を実装しなければなりません" -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:844 -#, python-format -msgid "Unable to use global role %(role_id)s" +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:861 -#, python-format -msgid "Key pair %(key_name)s already exists." +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:865 +#: cinder/api/common.py:162 #, python-format -msgid "User %(user)s already exists." +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:869 +#: cinder/api/common.py:189 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:873 -#, python-format -msgid "LDAP group %(group)s already exists." +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:877 -#, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" -msgstr "ユーザ %(uid)s はすでにグループ %(group_dn)s のメンバです。" - -#: cinder/exception.py:882 +#: cinder/api/extensions.py:197 #, python-format -msgid "Project %(project)s already exists." +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:886 +#: cinder/api/extensions.py:235 #, python-format -msgid "Instance %(name)s already exists." +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:890 +#: cinder/api/extensions.py:236 #, python-format -msgid "Instance Type %(name)s already exists." +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:894 +#: cinder/api/extensions.py:237 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:898 +#: cinder/api/extensions.py:239 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:906 +#: cinder/api/extensions.py:242 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:910 +#: cinder/api/extensions.py:256 #, python-format -msgid "Could not find config at %(path)s" +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/extensions.py:262 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:938 -#, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:946 -#, python-format -msgid "No valid host was found. %(reason)s" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:958 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:963 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:967 -#, python-format -msgid "Aggregate %(aggregate_name)s already exists." -msgstr "" +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "Secret Key change: ユーザ %s のシークレットキーを更新します。" -#: cinder/exception.py:971 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Delete backup with id: %s" msgstr "" -#: cinder/exception.py:975 +#: cinder/api/contrib/backups.py:185 #, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgid "Creating new backup %s" msgstr "" -#: cinder/exception.py:980 -#, python-format -msgid "Host %(host)s already member of another aggregate." +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/exception.py:984 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/exception.py:988 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/exception.py:992 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/exception.py:1005 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Error in SolidFire API response: status=%(status)s" +msgid "Host '%s' could not be found." msgstr "" -#: cinder/exception.py:1009 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Error in SolidFire API response: data=%(data)s" +msgid "Invalid status: '%s'" msgstr "" -#: cinder/exception.py:1013 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Detected existing vlan with id %(vlan)d" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/exception.py:1017 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Instance %(instance_id)s could not be found." +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/exception.py:1025 -#, fuzzy, python-format -msgid "Could not fetch image %(image)s" -msgstr "イメージをループバック %s にアタッチできません。" - -#: cinder/log.py:315 -#, python-format -msgid "syslog facility must be one of: %s" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/manager.py:146 -#, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/service.py:177 -#, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" -msgstr "%(topic)s ノードを開始しています (バージョン %(vcs_string)s)" - -#: cinder/service.py:195 -#, python-format -msgid "Creating Consumer connection for Service %s" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" -msgstr "データベースにエントリの存在しないサービスを終了します。" - -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." -msgstr "サービスデータベースオブジェクトが消滅しました。再作成します。" - -#: cinder/service.py:334 -msgid "Recovered model server connection!" -msgstr "モデルサーバへの接続を復旧しました。" - -#: cinder/service.py:340 -msgid "model server went away" -msgstr "モデルサーバが消滅しました。" - -#: cinder/service.py:433 -msgid "Full set of FLAGS:" -msgstr "FLAGSの一覧:" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" -#: cinder/service.py:440 -#, python-format -msgid "%(flag)s : FLAG SET " +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/utils.py:79 +#: cinder/api/contrib/quotas.py:111 #, python-format -msgid "Inner Exception: %s" -msgstr "内側で発生した例外: %s" +msgid "Bad key(s) in quota set: %s" +msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" -msgstr "ファイルをフェッチ: %s" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" -msgstr "コマンド実行(subprocess): %s" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Result was %s" -msgstr "コマンド実行結果: %s" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" -#: cinder/utils.py:249 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "%r failed. Retrying." +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" -msgstr "コマンド(SSH)を実行: %s" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" -msgstr "コールバック中のデバッグ: %s" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" -#: cinder/utils.py:534 -#, python-format -msgid "Link Local address is not found.:%s" -msgstr "リンクローカルアドレスが見つかりません: %s" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" -#: cinder/utils.py:537 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" -msgstr "%(interface)s のローカルIPアドレスのリンクが取得できません:%(ex)s" +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" -#: cinder/utils.py:648 -#, python-format -msgid "Invalid backend: %s" -msgstr "不正なバックエンドです: %s" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" -#: cinder/utils.py:659 -#, python-format -msgid "backend %s" -msgstr "バックエンドは %s です。" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:942 -#, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/utils.py:1169 -#, python-format -msgid "Invalid server_string: %s" +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/utils.py:1298 +#: cinder/api/contrib/volume_transfer.py:147 +#, fuzzy, python-format +msgid "Creating new volume transfer %s" +msgstr "Create volume: %s GBのボリュームを作成します。" + +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" -msgstr "" +#: cinder/api/contrib/volume_transfer.py:196 +#, fuzzy, python-format +msgid "Accepting transfer %s" +msgstr "Create volume: %s GBのボリュームを作成します。" -#: cinder/utils.py:1461 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/utils.py:1495 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/wsgi.py:97 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +msgid "Valid control location are: %s" msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/wsgi.py:117 -#, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" -msgstr "__call__ を実装しなければなりません" +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "エラー %s をキャッチしました。" -#: cinder/api/direct.py:218 -msgid "not available" -msgstr "利用できません" +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" -#: cinder/api/direct.py:299 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "Returned non-serializeable type: %s" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/api/sizelimit.py:51 +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 msgid "Request is too large." msgstr "" -#: cinder/api/validator.py:142 -#, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/api/ec2/__init__.py:73 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "%(code)s: %(message)s" +msgid "Extended resource: %s" msgstr "" -#: cinder/api/ec2/__init__.py:95 +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "FaultWrapper: %s" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." -msgstr "認証失敗の回数が多すぎます。" - -#: cinder/api/ec2/__init__.py:180 +#: cinder/api/openstack/__init__.py:110 #, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." -msgstr "アクセスキー %(access_key)s は %(failures)d 回認証に失敗しましたので、%(lock_mins)d 分間ロックします。" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "Authentication Failure: %s" -msgstr "%s の認証に失敗しました。" +msgid "Exception handling resource: %s" +msgstr "" -#: cinder/api/ec2/__init__.py:404 +#: cinder/api/openstack/wsgi.py:682 #, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" -msgstr "%(uname)s 用の認証リクエスト:%(pname)s)" +msgid "Fault thrown: %s" +msgstr "" -#: cinder/api/ec2/__init__.py:435 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "action: %s" -msgstr "アクション(action): %s" +msgid "HTTP exception thrown: %s" +msgstr "" -#: cinder/api/ec2/__init__.py:437 -#, python-format -msgid "arg: %(key)s\t\tval: %(value)s" -msgstr "引数: %(key)s\t\t値: %(value)s" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" -msgstr "コントローラ=%(controller)s とアクション=%(action)s 用の許可されていないリクエスト" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" -#: cinder/api/ec2/__init__.py:584 -#, python-format -msgid "InstanceNotFound raised: %s" -msgstr "InstanceNotFound が発行されました: %s" +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" -#: cinder/api/ec2/__init__.py:590 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "VolumeNotFound raised: %s" -msgstr "VolumeNotFound が発行されました: %s" +msgid "There is no such action: %s" +msgstr "" -#: cinder/api/ec2/__init__.py:596 -#, python-format -msgid "SnapshotNotFound raised: %s" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" -msgstr "NotFound 発生: %s" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" -#: cinder/api/ec2/__init__.py:605 -#, python-format -msgid "EC2APIError raised: %s" +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "KeyPairExists raised: %s" +msgid "%(url)s returned a fault: %(e)s" msgstr "" -#: cinder/api/ec2/__init__.py:617 -#, python-format -msgid "InvalidParameterValue raised: %s" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/__init__.py:621 -#, python-format -msgid "InvalidPortRange raised: %s" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "NotAuthorized raised: %s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/__init__.py:633 -#, fuzzy, python-format -msgid "QuotaError raised: %s" -msgstr "予期しないエラー発生: %s" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +#, fuzzy +msgid "snapshot does not exist" +msgstr "snapshotting: インスタンス %s のスナップショットを取得中" -#: cinder/api/ec2/__init__.py:637 -#, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/__init__.py:646 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Unexpected error raised: %s" -msgstr "予期しないエラー発生: %s" - -#: cinder/api/ec2/__init__.py:647 -#, python-format -msgid "Environment: %s" +msgid "Delete snapshot with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." -msgstr "未知のエラーが発生しました。再度リクエストを実行してください。" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" -#: cinder/api/ec2/apirequest.py:64 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" -msgstr "未サポートの API リクエスト: コントローラ = %(controller)s, アクション = %(action)s" +msgid "Create snapshot from volume %s" +msgstr "" -#: cinder/api/ec2/cloud.py:336 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "Create snapshot of volume %s" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/cloud.py:372 +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +#, fuzzy +msgid "volume does not exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/cloud.py:378 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgid "Delete volume with id: %s" msgstr "" -#: cinder/api/ec2/cloud.py:382 +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "Create key pair %s" -msgstr "Create key pair: キーペア %s を作成します。" +msgid "snapshot id:%s not found" +msgstr "" -#: cinder/api/ec2/cloud.py:391 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "Import key %s" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "Delete key pair %s" -msgstr "Delete key pair: キーペア %s を削除します。" +msgid "Create volume of %s GB" +msgstr "Create volume: %s GBのボリュームを作成します。" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "Revoke security group ingress %s" -msgstr "Revoke security group ingress: セキュリティグループ許可 %s の取消" +msgid "Missing required element '%s' in request body" +msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, fuzzy, python-format -msgid "%s Not enough parameters to build a valid rule" -msgstr "有効なルールを作成する為の十分なパラメータがありません" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." -msgstr "指定されたパラメータに該当するルールがありません。" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 -#, python-format -msgid "Authorize security group ingress %s" -msgstr "Authorize security group ingress: セキュリティグループ許可 %s" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" -#: cinder/api/ec2/cloud.py:725 -#, fuzzy, python-format -msgid "%s - This rule already exists in group" -msgstr "指定されたルールは既にグループ %s に存在しています。" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" -#: cinder/api/ec2/cloud.py:769 -#, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "Create Security Group %s" -msgstr "Create Security Group: セキュリティグループ %s を作成します。" +msgid "Removing options '%s' from query" +msgstr "" -#: cinder/api/ec2/cloud.py:783 -#, python-format -msgid "group %s already exists" -msgstr "グループ %s は既に存在しています。" +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "ボリュームのステータス(status)は available でなければなりません。" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 -#, python-format -msgid "Delete security group %s" -msgstr "Delete security group: セキュリティグループ %s を削除します。" +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "ボリュームのステータス(status)は available でなければなりません。" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 -#, python-format -msgid "Get console output for instance %s" -msgstr "Get console output: インスタンス %s のコンソール出力を取得します。" +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "ボリュームのステータス(status)は available でなければなりません。" -#: cinder/api/ec2/cloud.py:894 -#, python-format -msgid "Create volume from snapshot %s" +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/backup/api.py:154 #, python-format -msgid "Create volume of %s GB" -msgstr "Create volume: %s GBのボリュームを作成します。" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "ボリュームのステータス(status)は available でなければなりません。" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/backup/api.py:181 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -msgstr "ボリューム %(volume_id)s をインスタンス %(instance_id)s のデバイス %(device)s に接続" +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/backup/manager.py:100 #, python-format -msgid "Detach volume %s" -msgstr "Detach volume: ボリューム %s をデタッチします" - -#: cinder/api/ec2/cloud.py:959 -#, fuzzy, python-format -msgid "Detach Volume Failed." -msgstr "Detach volume: ボリューム %s をデタッチします" +msgid "Checking hostname '%s' for backend info." +msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/backup/manager.py:107 #, python-format -msgid "attribute not supported: %s" -msgstr "アトリビュート %s はサポートされていません。" +msgid "Backend not found in hostname (%s) so using default." +msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/backup/manager.py:117 #, python-format -msgid "vol = %s\n" +msgid "Manager requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" -msgstr "Allocate address: アドレスを割り当てます。" +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" -#: cinder/api/ec2/cloud.py:1267 +#: cinder/backup/manager.py:123 #, python-format -msgid "Release address %s" -msgstr "Release address: アドレス %s を開放します。" +msgid "Volume manager for backend '%s' does not exist." +msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/manager.py:129 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" -msgstr "インスタンス %(instance_id)s にアドレス %(public_ip)s を割り当て" +msgid "Driver requested for volume_backend '%s'." +msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/manager.py:147 #, python-format -msgid "Disassociate address %s" -msgstr "Disassociate address: アドレス %s の関連付けを解除します。" - -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" -msgstr "インスタンス終了処理を開始します。" - -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/manager.py:154 #, python-format -msgid "Reboot instance %r" -msgstr "Reboot instance: インスタンス %r を再起動します。" - -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +msgid "Registering default backend %s." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/manager.py:165 #, python-format -msgid "De-registering image %s" -msgstr "De-registering image: イメージ %s を登録解除します。" +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/manager.py:189 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" -msgstr "イメージ %(image_location)s が ID %(image_id)s で登録されました" - -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" -msgstr "ユーザまたはグループが指定されていません。" - -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" -msgstr "グループ \"all\" のみサポートされています。" - -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" -msgstr "operation_type は add または remove の何れかである必要があります。" +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/manager.py:194 #, python-format -msgid "Updating image %s publicity" -msgstr "イメージ %s の公開設定を更新します。" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/backup/manager.py:206 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/manager.py:212 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/backup/manager.py:217 #, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "ip %s に対するメタデータの取得に失敗しました。" +msgid "Resuming delete on backup: %s." +msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/backup/manager.py:225 #, python-format -msgid "Caught error: %s" -msgstr "エラー %s をキャッチしました。" +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/manager.py:237 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/manager.py:282 #, python-format -msgid "Extended resource: %s" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/manager.py:286 #, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/manager.py:299 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/manager.py:310 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/manager.py:329 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/manager.py:379 #, python-format -msgid "Could not find %s in request." +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/manager.py:386 #, python-format -msgid "Successfully authenticated '%s'" +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/manager.py:422 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "marker [%s] not found" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "href %s does not contain version" +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "Converting nw_info: %s" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Converted networks: %s" +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "Loaded extension: %s" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:377 #, python-format -msgid "Ext name: %s" +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "Ext alias: %s" +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/drivers/ceph.py:389 #, python-format -msgid "Ext description: %s" +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/extensions.py:229 -#, python-format -msgid "Ext namespace: %s" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/drivers/ceph.py:397 #, python-format -msgid "Ext updated: %s" +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/ceph.py:407 #, python-format -msgid "Exception loading extension: %s" +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "Loading extension %s" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "Calling extension factory %s" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/drivers/ceph.py:488 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/ceph.py:528 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:586 #, python-format -msgid "Exception handling resource: %s" +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/wsgi.py:586 -#, python-format -msgid "Fault thrown: %s" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "HTTP exception thrown: %s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:704 #, python-format -msgid "There is no such action: %s" +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" -msgstr "" - -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/ceph.py:1005 #, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "Invalid server status: %(status)s" +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." -msgstr "" +#: cinder/backup/drivers/swift.py:146 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "ボリュームグループ%sが存在しません。" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/swift.py:151 #, python-format -msgid "Bad personality format: missing %s" +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" -msgstr "" - -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/swift.py:157 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 +#: cinder/backup/drivers/swift.py:173 #, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/swift.py:182 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:192 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/swift.py:209 #, python-format -msgid "Bad network format: missing %s" +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Error in confirm-resize %s" +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/swift.py:301 #, python-format -msgid "Error in revert-resize %s" +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "Error in reboot %s" +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/backup/drivers/tsm.py:199 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/backup/drivers/tsm.py:206 #, python-format -msgid "Compute.api::pause %s" -msgstr "例外: Compute.api::pause %s" +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/backup/drivers/tsm.py:213 #, python-format -msgid "Compute.api::unpause %s" -msgstr "例外: Compute.api::unpause %s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/backup/drivers/tsm.py:260 #, python-format -msgid "compute.api::suspend %s" -msgstr "例外: compute.api::suspend %s" +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/backup/drivers/tsm.py:286 #, python-format -msgid "compute.api::resume %s" -msgstr "例外: compute.api::resume %s" +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/backup/drivers/tsm.py:298 #, python-format -msgid "Error in migrate %s" +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/backup/drivers/tsm.py:308 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "例外: Compute.api::reset_network %s" +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/backup/drivers/tsm.py:352 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/backup/drivers/tsm.py:362 #, python-format -msgid "Compute.api::lock %s" -msgstr "例外: Compute.api::lock %s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/backup/drivers/tsm.py:413 #, python-format -msgid "Compute.api::unlock %s" -msgstr "例外: Compute.api::unlock %s" +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/backup/drivers/tsm.py:421 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/brick/exception.py:93 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#: cinder/brick/exception.py:97 #, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/exception.py:101 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/brick/exception.py:105 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#: cinder/brick/exception.py:109 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#: cinder/brick/exception.py:113 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/brick/exception.py:117 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/initiator/connector.py:127 #, python-format -msgid "Aggregates does not have %s action" +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/brick/initiator/connector.py:229 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 -msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "No more floating ips in pool %s." +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 -#, python-format -msgid "Invalid status: '%s'" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 -#, fuzzy, python-format -msgid "Invalid mode: '%s'" -msgstr "不正なバックエンドです: %s" - -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "Invalid update setting: '%s'" +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 +#: cinder/brick/initiator/linuxscsi.py:145 #, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/initiator/linuxscsi.py:149 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 -#, python-format -msgid "Key pair '%s' already exists." -msgstr "" - -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" -msgstr "" - -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" -msgstr "" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/iscsi/iscsi.py:227 #, python-format -msgid "Unable to find address %r" +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Network does not have %s action" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Disassociating network with id %s" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Showing network with id %s" +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 -#, python-format -msgid "Deleting network with id %s" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 +#: cinder/brick/iscsi/iscsi.py:532 #, fuzzy, python-format -msgid "Security group is still in use" -msgstr "Revoke security group ingress: セキュリティグループ許可 %s の取消" +msgid "Removing iscsi_target: %s" +msgstr "Rebooting instance: インスタンス %s を再起動します。" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Security group %s already exists" +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#: cinder/brick/iscsi/iscsi.py:571 #, python-format -msgid "Security group %s is not a string or unicode" +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 -#, python-format -msgid "Security group %s cannot be empty." +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Security group %s should not be greater than 255 characters." +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "Security group (%s) not found" +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." -msgstr "有効なルールを作成する為の十分なパラメータがありません" - -#: cinder/api/openstack/compute/contrib/security_groups.py:376 -#, python-format -msgid "This rule already exists in group %s" -msgstr "指定されたルールは既にグループ %s に存在しています。" +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "ボリューム %s の存在が確認できません。" -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" -msgstr "" +#: cinder/brick/local_dev/lvm.py:370 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "VBD %s から SRを取得できません。" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 -#, python-format -msgid "Rule (%s) not found" +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/brick/local_dev/lvm.py:489 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 -#, python-format -msgid "start instance %r" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "stop instance %r" +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 -#, python-format -msgid "vol=%s" +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 -#, python-format -msgid "Delete volume with id: %s" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 -#, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 -#, python-format -msgid "Delete snapshot with id: %s" +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "Create snapshot from volume %s" +msgid "Already mounted: %s" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" -msgstr "シングルトンをインスタンス化しようとしました。" - -#: cinder/auth/ldapdriver.py:650 -#, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." -msgstr "グループの最後のメンバーを削除しようとしました。代わりにグループ %s を削除してください。" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" -msgstr "ユーザ %r を検索します。" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" -#: cinder/auth/manager.py:302 -#, python-format -msgid "Failed authorization for access key %s" -msgstr "Failed authorization: アクセスキー %s の認証に失敗しました。" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" -#: cinder/auth/manager.py:308 -#, python-format -msgid "Using project name = user name (%s)" -msgstr "ユーザ名 (%s) をプロジェクト名として使用します。" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" -#: cinder/auth/manager.py:315 +#: cinder/compute/nova.py:97 #, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" -msgstr "許可されません: %(pjid)s という名称のプロジェクトはありません (ユーザ=%(uname)s)" +msgid "Novaclient connection created using URL: %s" +msgstr "" -#: cinder/auth/manager.py:324 -#, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" -msgstr "許可されません: ユーザ %(uname)s は管理者でもプロジェクト %(pjname)s のメンバでもありません。" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "Request context を空とすることは非推奨です。" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "user.secret: %s" +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "expected_signature: %s" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "signature: %s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" -msgstr "Invalid signature: ユーザ %s の署名が不正です。" - -#: cinder/auth/manager.py:353 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "host_only_signature: %s" +msgid "No backup with id %s" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" -msgstr "プロジェクトを指定してください。" - -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" -msgstr "プロジェクト %(pid)s のユーザ %(uid)s にロール %(role)s を付与します。" - -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" -msgstr "サイト共通のロール %(role)s をユーザ %(uid)s に付与します。" +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "ボリュームのステータス(status)は available でなければなりません。" -#: cinder/auth/manager.py:519 +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" -msgstr "プロジェクト %(pid)s のユーザ %(uid)s からロール %(role)s を削除します。" +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" -msgstr "ユーザ %(uid)s からサイト共通のロール %(role)s を削除します。" +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" -#: cinder/auth/manager.py:595 -#, python-format -msgid "Created project %(name)s with manager %(manager_user)s" -msgstr "プロジェクト %(name)s を管理者 %(manager_user)s で作成しました。" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" -#: cinder/auth/manager.py:613 -#, python-format -msgid "modifying project %s" -msgstr "modifying project: プロジェクト %s を更新します。" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" -msgstr "ユーザ %(uid)s をプロジェクト %(pid)s に追加します。" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" -msgstr "ユーザ %(uid)s をプロジェクト %(pid)s から削除します。" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" -#: cinder/auth/manager.py:676 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Deleting project %s" -msgstr "Deleting project: プロジェクト %s を削除します。" +msgid "Table |%s| not created!" +msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" -msgstr "ユーザ %(rvname)s を作成しました。(管理者: %(rvadmin)r)" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" -msgstr "Deleting user: ユーザ %s を削除します。" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" -#: cinder/auth/manager.py:753 -#, python-format -msgid "Access Key change for user %s" -msgstr "Access Key change: ユーザ %s のアクセスキーを更新します。" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" -#: cinder/auth/manager.py:755 -#, python-format -msgid "Secret Key change for user %s" -msgstr "Secret Key change: ユーザ %s のシークレットキーを更新します。" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" -msgstr "ユーザ %(uid)s に対して管理者状態が %(admin)r に設定されました。" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" -msgstr "プロジェクト %s に関するvpnデータがありません。" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -#, fuzzy, python-format -msgid "Instance type for vpn instances" -msgstr "Get console output: インスタンス %s のコンソール出力を取得します。" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" -msgstr "openvpnの設定に入れるネットワークの値" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" -msgstr "openvpnの設定に入れるネットマスクの値" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Launching VPN for %s" -msgstr "%s 用のVPNを起動します。" - -#: cinder/compute/api.py:141 -msgid "No compute host specified" +msgid "Table |%s| not created" msgstr "" -#: cinder/compute/api.py:144 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Unable to find host for Instance %s" +msgid "Exception while dropping table %s." msgstr "" -#: cinder/compute/api.py:192 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +msgid "Exception while creating table %s." msgstr "" -#: cinder/compute/api.py:203 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "Column |%s| not created!" msgstr "" -#: cinder/compute/api.py:257 -#, fuzzy, python-format -msgid "Cannot run any more instances of this type." -msgstr "インスタンスのクオータを超えました。このタイプにおいてはあと %s インスタンスしか実行できません。" - -#: cinder/compute/api.py:259 -#, fuzzy, python-format -msgid "Can only run %s more instances of this type." -msgstr "インスタンスのクオータを超えました。このタイプにおいてはあと %s インスタンスしか実行できません。" - -#: cinder/compute/api.py:261 -#, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" -msgstr "raw instanceを生成します。" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" -#: cinder/compute/api.py:312 -#, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/compute/api.py:383 -#, python-format -msgid "Going to run %s instances..." -msgstr "%s 個のインスタンスの起動を始めます…" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" -#: cinder/compute/api.py:447 -#, python-format -msgid "bdm %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/compute/api.py:474 -#, python-format -msgid "block_device_mapping %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/compute/api.py:591 -#, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/compute/api.py:871 -#, fuzzy, python-format -msgid "Going to try to soft delete instance" -msgstr "%s を停止しようとしています" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/compute/api.py:939 -#, fuzzy, python-format -msgid "Going to try to terminate instance" -msgstr "%s を停止しようとしています" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" -#: cinder/compute/api.py:977 -#, fuzzy, python-format -msgid "Going to try to stop instance" -msgstr "%s を停止しようとしています" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" -#: cinder/compute/api.py:996 -#, fuzzy, python-format -msgid "Going to try to start instance" -msgstr "%s を停止しようとしています" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" -#: cinder/compute/api.py:1000 -#, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Searching by: %s" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/api.py:1201 -#, python-format -msgid "Image type not recognized %s" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/api.py:1377 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/image/image_utils.py:157 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/image/image_utils.py:178 #, python-format -msgid "DB error: %s" +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/image/image_utils.py:206 #, python-format -msgid "Instance type %s not found for deletion" +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/image/image_utils.py:224 #, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: decorating: |%s|" +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/image/image_utils.py:260 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/manager.py:144 -#, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: locked: |%s|" +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" -#: cinder/compute/manager.py:146 -#, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: admin: |%s|" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: executing: |%s|" +msgid "Not deleting key %s" +msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "check_instance_lock: not executing |%s|" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/excutils.py:48 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/manager.py:240 -#, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "Setting up bdm %s" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:406 -#, fuzzy, python-format -msgid "Exception encountered while terminating the instance %s" -msgstr "インスタンス %s を終了した後です。" +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "Instance %s not found." +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" -msgstr "インスタンスは既に生成されています。" +#: cinder/openstack/common/log.py:301 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "受信: %s" -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/compute/manager.py:565 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "Instance network_info: |%s|" -msgstr "" - -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "%(action_str)s instance" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "terminating bdm %s" +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/policy.py:149 #, python-format msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/openstack/common/policy.py:163 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Rebuilding instance %s" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Rebooting instance %s" -msgstr "Rebooting instance: インスタンス %s を再起動します。" +msgid "No handler for matches of kind %s" +msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "instance %s: snapshotting" -msgstr "snapshotting: インスタンス %s のスナップショットを取得中" +msgid "Running cmd (subprocess): %s" +msgstr "コマンド実行(subprocess): %s" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" -msgstr "" +msgid "Result was %s" +msgstr "コマンド実行結果: %s" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "Rotating out %d backups" -msgstr "" +msgid "Running cmd (SSH): %s" +msgstr "コマンド(SSH)を実行: %s" -#: cinder/compute/manager.py:1005 -#, python-format -msgid "Deleting image %s" +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:1035 -#, python-format -msgid "Failed to set admin password. Instance %s is not running" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 #, python-format -msgid "Instance %s: Root password set" +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:1079 -#, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Unhandled exception" +msgstr "内側で発生した例外: %s" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/service.py:337 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "開始アドレス" + +#: cinder/openstack/common/service.py:354 #, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "instance %s: rescuing" -msgstr "Rescuing: インスタンス %s をレスキューします。" +msgid "pid %d not in child list" +msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "instance %s: unrescuing" -msgstr "Unrescuing: インスタンス %s をアンレスキューします。" - -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "instance %s: migrating" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "instance %s: pausing" -msgstr "pausing: インスタンス %s を一時停止します。" +msgid "Invalid SSL version : %s" +msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "instance %s: unpausing" -msgstr "unpausing: インスタンス %s の一時停止を解除します。" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "retrieving diagnostics: インスタンス %s の診断情報を取得します。" +msgid "Invalid string format: %s" +msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "instance %s: suspending" -msgstr "suspending: インスタンス %s をサスペンドします。" +msgid "Unknown byte multiplier: %s" +msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/versionutils.py:69 #, python-format -msgid "instance %s: resuming" -msgstr "resuming: インスタンス %s をレジュームします。" +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "instance %s: locking" -msgstr "locking: インスタンス %s をロックします。" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" -#: cinder/compute/manager.py:1588 -#, python-format -msgid "instance %s: unlocking" -msgstr "unlocking: インスタンス %s のロックを解除します。" +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "instance %s: getting locked state" -msgstr "getting locked state: インスタンス %s のロックを取得しました。" +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "instance %s: reset network" -msgstr "インスタンス %s: ネットワークをリセットします" +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" -#: cinder/compute/manager.py:1614 -#, python-format -msgid "instance %s: inject network info" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:1618 -#, python-format -msgid "network_info to inject: |%s|" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/compute/manager.py:1655 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "instance %s: getting vnc console" +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "SQL connection failed. %s attempts left." msgstr "" -#: cinder/compute/manager.py:1703 -#, python-format -msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:1714 +#: cinder/openstack/common/notifier/api.py:145 #, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "Failed to load notifier %s. These notifications will not be sent." msgstr "" -#: cinder/compute/manager.py:1752 -#, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "ボリュームを未知のインスタンス %s からデタッチします。" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." -msgstr "" +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "context %s をアンパックしました。" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." -msgstr "" +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "受信: %s" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" -msgstr "" +msgid "no method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." -msgstr "" +msgid "No method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" -#: cinder/compute/manager.py:2075 -msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_IDは %s です。" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/common.py:121 #, python-format msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/rpc/common.py:151 #, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/common.py:156 #, python-format msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" -msgstr "" +msgid "Returning exception %s to caller" +msgstr "呼び出し元に 例外 %s を返却します。" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" -msgstr "コンソールを追加しています" - -#: cinder/console/manager.py:97 +#: cinder/openstack/common/rpc/impl_kombu.py:477 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." -msgstr "存在しないコンソール %(console_id)s を削除しようとしました" +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" -#: cinder/console/vmrc_manager.py:122 +#: cinder/openstack/common/rpc/impl_kombu.py:499 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format -msgid "Removing console %(console_id)s." +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" -msgstr "xvp 設定を再構築しています" - -#: cinder/console/xvp.py:116 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "Re-wrote %s" -msgstr "%s を再度書き込みました" - -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" -msgstr "xvp を停止しています" - -#: cinder/console/xvp.py:134 -msgid "Starting xvp" -msgstr "xvp を開始しています" +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 #, python-format -msgid "Error starting xvp: %s" -msgstr "xvp の開始中にエラー: %s" - -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" -msgstr "xvp を再起動しています" - -#: cinder/console/xvp.py:146 -msgid "xvp not running..." -msgstr "xvp が実行されていません…" +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" -msgstr "Request context を空とすることは非推奨です。" - -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "No ComputeNode for %(host)s" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "No backend config with id %(sm_backend_id)s" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 -#, python-format -msgid "No sm_flavor called %(sm_flavor)s" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 -#, python-format -msgid "No sm_volume with id %(volume_id)s" +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." -msgstr "python-migrate がインストールされていません。終了します。" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "SQL connection failed. %s attempts left." +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "Table |%s| not created!" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "join list for moving mac_addresses |%s|" +msgid "Subscribing to %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" -msgstr "" +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "context %s をアンパックしました。" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 -#, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." -msgstr "" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +#, fuzzy +msgid "Registering reactor" +msgstr "De-registering image: イメージ %s を登録解除します。" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/image/glance.py:281 +#: cinder/openstack/common/rpc/impl_zmq.py:506 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/image/glance.py:289 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/image/glance.py:410 -#, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/image/s3.py:309 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/image/s3.py:328 -#, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/image/s3.py:379 -#, python-format -msgid "Failed to decrypt private key: %s" -msgstr "プライベートキーの復号に失敗しました: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" -#: cinder/image/s3.py:387 -#, python-format -msgid "Failed to decrypt initialization vector: %s" -msgstr "初期化ベクタの復号に失敗しました: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "受信: %s" -#: cinder/image/s3.py:398 -#, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" -msgstr "イメージファイル %(image_file)s の復号に失敗しました: %(err)s" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 -#, python-format -msgid "Bad mac for to_global_ipv6: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/openstack/common/rpc/impl_zmq.py:721 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "%(msg)s" msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/network/linux_net.py:166 -#, python-format -msgid "Attempted to remove chain %s which does not exist" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "Unknown chain: %r" +msgid "topic is %s." msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/openstack/common/rpc/impl_zmq.py:815 #, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/network/linux_net.py:694 -#, python-format -msgid "Hupping dnsmasq threw %s" -msgstr "dnsmasqに対してhupを送信しましたが %s が発生しました。" - -#: cinder/network/linux_net.py:696 -#, python-format -msgid "Pid %d is stale, relaunching dnsmasq" -msgstr "Pid %d は無効です。dnsmasqを再実行します。" +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake には %s が実装されていません。" -#: cinder/network/linux_net.py:756 +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "killing radvd threw %s" -msgstr "radvd 停止が %s 例外を発行しました" +msgid "Matchmaker unregistered: %s, %s" +msgstr "" -#: cinder/network/linux_net.py:758 -#, python-format -msgid "Pid %d is stale, relaunching radvd" -msgstr "Pid %d がストールしているので radvd を再実行しています…" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Starting VLAN inteface %s" -msgstr "VLANインタフェース %s を開始します。" +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Starting Bridge interface for %s" -msgstr "%s 用のブリッジインタフェースを開始します。" - -#: cinder/network/linux_net.py:1142 -#, fuzzy, python-format -msgid "Starting bridge %s " -msgstr "%s 用のブリッジインタフェースを開始します。" - -#: cinder/network/linux_net.py:1149 -#, fuzzy, python-format -msgid "Done starting bridge %s" -msgstr "xvp の開始中にエラー: %s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" -#: cinder/network/linux_net.py:1167 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Failed unplugging gateway interface '%s'" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/network/linux_net.py:1170 -#, python-format -msgid "Unplugged gateway interface '%s'" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/network/manager.py:291 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 -#, python-format -msgid "Interface %(interface)s not found" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/network/manager.py:315 -#, python-format -msgid "floating IP allocation for instance |%s|" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/network/manager.py:353 +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "予備の(fallback)スケジューラを実装する必要があります。" + +#: cinder/scheduler/driver.py:82 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "予備の(fallback)スケジューラを実装する必要があります。" + +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "floating IP deallocation for instance |%s|" +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/scheduler/filter_scheduler.py:114 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/network/manager.py:402 -#, python-format -msgid "Quota exceeded for %s, tried to allocate address" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/network/manager.py:614 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/network/manager.py:660 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "Filtered %s" msgstr "" -#: cinder/network/manager.py:778 +#: cinder/scheduler/filter_scheduler.py:276 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Choosing %s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" -msgstr "ネットワークホストの設定をします。" - -#: cinder/network/manager.py:896 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "network allocations for instance |%s|" +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/network/manager.py:901 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/manager.py:930 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "network deallocation for instance |%s|" +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/network/manager.py:1152 -#, python-format +#: cinder/scheduler/manager.py:63 msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." -msgstr "" - -#: cinder/network/manager.py:1227 -#, python-format -msgid "Unable to release %s because vif doesn't exist." +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/manager.py:1244 -#, python-format -msgid "Leased IP |%(address)s|" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/manager.py:1248 -#, python-format -msgid "IP %s leased that is not associated" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/manager.py:1256 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "IP |%s| leased that isn't allocated" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "Released IP |%(address)s|" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/manager.py:1265 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "IP %s released that is not associated" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/manager.py:1268 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "IP %s released that was not leased" -msgstr "リースしていないIP %s が開放されました。" - -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/network/manager.py:1334 -#, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" -msgstr "ネットワークの数とVLANの開始番号の和は 4094 より大きくできません。" - -#: cinder/network/manager.py:1839 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" -msgstr "" - -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" -msgstr "" +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "偽のISCSI: %s" -#: cinder/network/quantum/client.py:180 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/network/quantum/client.py:196 -#, python-format -msgid "Quantum entity not found: %s" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Faked command matched %s" msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/quantum/manager.py:301 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format -msgid "network allocations for instance %s" +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +msgid "Given data: %s" msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "Result data: %s" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "Server returned error: %s" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Detach volume: ボリューム %s をデタッチします" + +#: cinder/tests/integrated/api/client.py:32 #, python-format msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 -#, python-format -msgid "No network with net_id = %s" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "No fixed IPs to deallocate for vif %s" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgid "Body: %s" msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 +#: cinder/tests/integrated/api/client.py:121 #, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/notifier/api.py:115 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "%s not in valid priorities" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/notifier/api.py:130 -#, python-format -msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/notifier/list_notifier.py:65 +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgid "Decoding JSON: %s" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/rpc/amqp.py:146 -#, python-format -msgid "Returning exception %s to caller" -msgstr "呼び出し元に 例外 %s を返却します。" - -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" -msgstr "context %s をアンパックしました。" +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" -#: cinder/rpc/amqp.py:231 -#, python-format -msgid "received %s" -msgstr "受信: %s" +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" -#: cinder/rpc/amqp.py:236 +#: cinder/transfer/api.py:136 #, python-format -msgid "no method for message: %s" -msgstr "メッセージ %s に対するメソッドが存在しません。" +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" -#: cinder/rpc/amqp.py:237 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format -msgid "No method for message: %s" -msgstr "メッセージ %s に対するメソッドが存在しません。" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" -#: cinder/rpc/amqp.py:321 +#: cinder/transfer/api.py:182 #, python-format -msgid "Making asynchronous call on %s ..." +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/transfer/api.py:199 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_IDは %s です。" +msgid "Volume %s has been transferred." +msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/api.py:143 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/rpc/amqp.py:379 -#, python-format -msgid "Sending notification on %s..." +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/api.py:214 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." -msgstr "" - -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/api.py:229 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "Searching by: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:466 -#, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/rpc/impl_kombu.py:482 -#, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 -#, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" -msgstr "" +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "ボリュームのステータス(status)は available でなければなりません。" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 -#, python-format -msgid "Timed out waiting for RPC response: %s" +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 -#, python-format -msgid "Failed to consume message from queue: %s" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/api.py:490 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/api.py:502 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/rpc/impl_qpid.py:346 -#, python-format -msgid "Connected to AMQP server on %s" +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/volume/api.py:757 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/scheduler/driver.py:80 -#, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/scheduler/driver.py:89 -#, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/api.py:797 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "No available service named %s" msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" -msgstr "予備の(fallback)スケジューラを実装する必要があります。" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/api.py:862 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/api.py:868 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/api.py:874 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/volume/api.py:887 #, python-format -msgid "No host selection for %s defined." +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/api.py:900 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 -#, python-format -msgid "Filtered %(hosts)s" +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Weighted %(weighted_host)s" -msgstr "" +msgid "Recovering from a failed execute. Try number %s" +msgstr "実行失敗からリカバリーします。%s 回目のトライ。" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/driver.py:282 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/driver.py:327 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/driver.py:340 #, python-format -msgid "Host filter passes for %(host)s" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/driver.py:358 #, python-format -msgid "Received %(service_name)s service update from %(host)s." +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/driver.py:433 #, python-format -msgid "No service for compute ID %s" +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/driver.py:451 #, python-format -msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/scheduler/manager.py:150 -#, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/driver.py:548 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/tests/fake_utils.py:72 +#: cinder/volume/manager.py:203 #, python-format -msgid "Faking execution of cmd (subprocess): %s" +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/tests/fake_utils.py:80 -#, python-format -msgid "Faked command matched %s" +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:228 #, python-format -msgid "Faked command raised an exception %s" +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:235 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" -msgstr "" - -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" -msgstr "" - -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:244 #, python-format -msgid "Running instances: %s" -msgstr "インスタンス %s は実行中です。" +msgid "Re-exporting %s volumes" +msgstr "%s 個のボリュームを再エクスポートします。" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/manager.py:257 #, python-format -msgid "After terminating instances: %s" -msgstr "インスタンス %s を終了した後です。" - -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:264 #, python-format -msgid "After force-killing instances: %s" +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "ボリューム %s のエキスポートをスキップします。" + +#: cinder/volume/manager.py:273 #, python-format msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" -msgstr "開始アドレス" +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "ボリューム %s を削除します。" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" -msgstr "" +#: cinder/volume/manager.py:380 +#, fuzzy +msgid "volume is not local to this node" +msgstr "ボリュームはこのノードのローカルではありません。" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "ボリューム %s のエクスポートを解除します。" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/manager.py:434 #, python-format -msgid "Target %s allocated" -msgstr "ターゲット %s をアロケートしました。" +msgid "volume %s: deleted successfully" +msgstr "ボリューム %s の削除に成功しました。" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/manager.py:451 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "snapshot %s: creating" msgstr "" -#: cinder/tests/test_volume_types.py:58 +#: cinder/volume/manager.py:462 #, python-format -msgid "Given data: %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/tests/test_volume_types.py:59 +#: cinder/volume/manager.py:490 #, python-format -msgid "Result data: %s" +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/manager.py:496 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "snapshot %s: deleting" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/volume/manager.py:526 #, python-format -msgid "Quota exceeded: code=%(code)s" +msgid "Cannot delete snapshot %s: snapshot is busy" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 -#, python-format -msgid "_create: %s" +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/manager.py:559 #, python-format -msgid "_delete: %s" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 -#, python-format -msgid "_get: %s" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 -#, python-format -msgid "_get_all: %s" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 -#, python-format -msgid "test_snapshot_create: param=%s" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 -#, python-format -msgid "test_snapshot_create: resp_dict=%s" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/manager.py:698 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "ボリューム %(volume_id)s をインスタンス %(instance_id)s のデバイス %(device)s に接続" + +#: cinder/volume/manager.py:760 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/manager.py:807 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 -#, python-format -msgid "test_snapshot_show: resp=%s" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 -#, python-format -msgid "test_snapshot_detail: resp_dict=%s" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/manager.py:880 #, python-format -msgid "flavor: %s" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/manager.py:909 #, python-format msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" - -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" -msgstr "" - -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/manager.py:940 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/volume/manager.py:976 #, python-format -msgid "Body: %s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/tests/integrated/api/client.py:125 -#, python-format -msgid "%(auth_uri)s => code %(http_status)s" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/manager.py:1024 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" msgstr "" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/manager.py:1091 #, python-format -msgid "Decoding JSON: %s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/manager.py:1103 #, python-format -msgid "Nested received %(queue)s, %(value)s" -msgstr "Nested received %(queue)s, %(value)s" +msgid "volume %s: extending" +msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/manager.py:1105 #, python-format -msgid "Nested return %s" -msgstr "ネストした戻り値: %s" - -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/manager.py:1107 #, python-format -msgid "Received %s" -msgstr "%s を受信。" +msgid "volume %s: Error trying to extend volume" +msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" -msgstr "ハイパーバイザへの接続に失敗しました。" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 -#, python-format -msgid "Compute_service record created for %s " +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Compute_service record updated for %s " +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" -msgstr "フィルタされていないインスタンス %s のフィルタ解除を試行しました" +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" -#: cinder/virt/firewall.py:137 -#, python-format -msgid "Filters added to instance %s" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Adding security group rule: %r" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Adding provider rule: %s" +msgid "DB error: %s" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "Converted to raw, but format is now %s" -msgstr "" - -#: cinder/virt/vmwareapi_conn.py:105 msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 -#, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/utils.py:144 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/virt/xenapi_conn.py:140 +#: cinder/volume/volume_types.py:130 +#, python-format msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -"connection_type=xenapi を使用するには、以下の指定が必要です: xenapi_connection_url, " -"xenapi_connection_username (オプション), xenapi_connection_password" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "Got exception: %s" -msgstr "例外 %s が発生しました。" - -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" -msgstr "" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Create volume: %s GBのボリュームを作成します。" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "No such domain (%s)" +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "Failed power down Bare-metal node %s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/virt/baremetal/dom.py:154 -#, python-format -msgid "No such domain %s" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Domains: %s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Nodes: %s" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "After storing domains: %s" +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Created new domain: %s" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/baremetal/dom.py:226 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "change_domain_state: to new state %s" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 -#, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" -msgstr "インスタンス %(instance_name)s: インスタンスファイル群 %(target)s を削除しています" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "コマンド実行において予期しないエラーが発生しました。" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "instance %s: rebooted" -msgstr "インスタンス%s: 再起動しました。" - -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "instance %s: rescued" -msgstr "インスタンス %s: rescued" - -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "instance %s: is building" +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "instance %s: booted" -msgstr "インスタンス %s: 起動しました。" +msgid "Volume %s was not found while trying to delete it" +msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/eqlx.py:348 #, python-format -msgid "instance %s spawned successfully" +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "instance %s:not booted" +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/eqlx.py:384 #, python-format -msgid "instance %s: Creating image" -msgstr "インスタンス %s のイメージを生成します。" +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/eqlx.py:405 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/eqlx.py:415 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" -msgstr "インスタンス %(inst_name)s: イメージ %(img_id)s へのデータ埋め込みのエラーを無視しています (%(e)s)" +msgid "Failed to terminate connection to volume %s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "instance %s: starting toXML method" -msgstr "インスタンス %s: toXML メソッドを開始。" +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/eqlx.py:440 #, python-format -msgid "instance %s: finished toXML method" -msgstr "インスタンス %s: toXML メソッドを完了。" - -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 -msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/tilera.py:216 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" -msgstr "" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "ネストした戻り値: %s" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" -msgstr "" - -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" -msgstr "" - -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" -msgstr "" - -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." -msgstr "" - -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "unknown disk image handler: %s" -msgstr "" - -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "Failed to mount filesystem: %s" -msgstr "ファイルシステム %s のマウントに失敗しました。" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/glusterfs.py:403 #, python-format -msgid "Failed to remove container: %s" +msgid "nova call result: %s" msgstr "" -#: cinder/virt/disk/api.py:441 -#, python-format -msgid "User %(username)s not found in password file." +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/disk/api.py:457 -#, python-format -msgid "User %(username)s not found in shadow file." +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/glusterfs.py:431 #, python-format -msgid "unsupported partition: %s" +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "イメージをループバック %s にアタッチできません。" - -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/glusterfs.py:457 #, python-format -msgid "Failed to map partitions: %s" +msgid "volume id: %s" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" -msgstr "空きの nbd デバイスがありません" - -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "qemu-nbd error: %s" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/disk/nbd.py:93 -#, python-format -msgid "nbd device %s did not show up" -msgstr "nbd デバイス %s が出現しません" - -#: cinder/virt/libvirt/connection.py:265 -#, python-format -msgid "Connecting to libvirt: %s" -msgstr "libvirt %s へ接続します。" - -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" -msgstr "libvirtへの接続が切れています。" - -#: cinder/virt/libvirt/connection.py:388 -#, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "Deleting instance files %(target)s" +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/libvirt/connection.py:692 -#, fuzzy, python-format -msgid "Instance soft rebooted successfully." -msgstr "インスタンス%s: 再起動しました。" - -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "Automatically confirming migration %d" +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" -msgstr "データ: %(data)r, ファイルパス: %(fpath)r" - -#: cinder/virt/libvirt/connection.py:978 -msgid "Guest does not have a console available" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "file already exists at %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "Available shares: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/gpfs.py:97 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 -#, python-format -msgid "block_device_list %s" +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/gpfs.py:160 #, python-format msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" -msgstr "" - -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/gpfs.py:169 #, python-format -msgid "'' must be 1, but %d\n" +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/libvirt/connection.py:2067 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +msgid "%s is not a directory." msgstr "" -#: cinder/virt/libvirt/connection.py:2079 +#: cinder/volume/drivers/gpfs.py:197 #, python-format msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/libvirt/connection.py:2136 +#: cinder/volume/drivers/gpfs.py:556 #, python-format -msgid "Timeout migrating for %s. nwfilter not found." +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/gpfs.py:637 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgid "%s" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." -msgstr "" - -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/nfs.py:263 #, python-format -msgid "%s is a valid instance name" +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "%s has a disk file" +msgid " but size is now %d" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 -#, python-format -msgid "Instance %(instance)s is backed by %(backing)s" -msgstr "" +#: cinder/volume/drivers/nfs.py:361 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "グループ %s は既に存在しています。" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "Base file too young to remove: %s" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/nfs.py:526 #, python-format -msgid "Removing base file: %s" +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/rbd.py:160 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 -#, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 -#, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 -#, python-format -msgid "%(id)s (%(base_file)s): image is not in use" -msgstr "" +#: cinder/volume/drivers/rbd.py:210 +#, fuzzy, python-format +msgid "error opening rbd image %s" +msgstr "xvp の開始中にエラー: %s" -#: cinder/virt/libvirt/imagecache.py:354 -#, python-format -msgid "%(id)s (%(base_file)s): image is in use" +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 -#, python-format -msgid "Skipping verification, no base directory at %s" -msgstr "" +#: cinder/volume/drivers/rbd.py:265 +#, fuzzy +msgid "error connecting to ceph cluster" +msgstr "libvirt %s へ接続します。" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Unknown base file: %s" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/rbd.py:423 #, python-format -msgid "Active base files: %s" +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/rbd.py:435 #, python-format -msgid "Corrupt base files: %s" +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "Removable base files: %s" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" -msgstr "開いたポートが見つかりません" - -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/vif.py:99 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Ensuring bridge %s" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Failed while unplugging vif of instance '%s'" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/volume.py:166 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 -#, python-format -msgid "%(text)s: _db_content => %(content)s" -msgstr "%(text)s: _db_content => %(content)s" - -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/rbd.py:593 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "deleting rbd volume %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 -#, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" -msgstr "" +#: cinder/volume/drivers/rbd.py:696 +#, fuzzy, python-format +msgid "connection data: %s" +msgstr "libvirt %s へ接続します。" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 -#, python-format -msgid "Glance image %s is in killed state" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 -#, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" -msgstr "" +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "snapshotting: インスタンス %s のスナップショットを取得中" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" -msgstr "" +#: cinder/volume/drivers/rbd.py:724 +#, fuzzy, python-format +msgid "not cloneable: %s" +msgstr "応答 %s" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 -#, python-format -msgid "Created Port Group with name %s on the ESX host" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 -#, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" -msgstr "" +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "ボリューム %s の存在が確認できません。" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "httplib error in %s: " +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 -#, python-format -msgid "Socket error in %s: " +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/scality.py:78 #, python-format -msgid "Type error in %s: " +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 -#, python-format -msgid "Exception in %s " +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "Got total of %s instances" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/sheepdog.py:59 #, python-format -msgid "Creating VM with the name %s on the ESX host" -msgstr "" +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog が動作していません: %s" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "Sheepdog が機能していません" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/solidfire.py:151 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Powering on the VM instance %s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "応答 %s" + +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "Powered on the VM instance %s" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/solidfire.py:398 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "Uploading image %s" +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "Uploaded image %s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Deleted temporary vmdk file %s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 -#, python-format -msgid "Rebooted guest OS of VM %s" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Did hard reboot of VM %s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 -#, python-format -msgid "instance - %s not present" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 -#, python-format -msgid "Powering off the VM %s" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 -#, python-format -msgid "Powered off the VM %s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 -#, python-format -msgid "Unregistering the VM %s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 -#, python-format -msgid "Unregistered the VM %s" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/solidfire.py:673 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "インスタンス終了処理を開始します。" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/zadara.py:357 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Suspending the VM %s " +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/zadara.py:464 #, python-format -msgid "Suspended the VM %s " +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Resuming the VM %s" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "Resumed the VM %s " +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "ボリューム %(vol_name)s: サイズ %(vol_size)sG のlvを作成します。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "Creating directory with path %s" +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "Created directory with path %s" +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Downloading image %s from glance image server" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 -#, python-format -msgid "Uploaded image %s to the Glance image server" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" -msgstr "NotImplemented 例外を発生させます。" - -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format -msgid "xenapi.fake does not have an implementation for %s" -msgstr "xenapi.fake には %s が実装されていません。" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format -msgid "Calling %(localname)s %(impl)s" -msgstr "%(localname)s %(impl)s を呼び出します。" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format -msgid "Calling getter %s" -msgstr "getter %s をコールします。" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" -msgstr "xenapi.fake に %s に関する実装がないか、引数の数が誤っています。" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 -#, python-format -msgid "Found non-unique network for bridge %s" -msgstr "ブリッジ %s に対してブリッジが複数存在します。" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Found no network for bridge %s" -msgstr "ブリッジ %s に対するネットワークが存在しません。" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/xenapi/pool.py:146 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Unable to join %(host)s in the pool" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "ボリューム %s を切断(detach)できません" +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Found no PIF for device %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "VBD not found in instance %s" -msgstr "インスタンス %s のVBDが見つかりません。" - -#: cinder/virt/xenapi/vm_utils.py:262 -#, fuzzy, python-format -msgid "VBD %s already detached" -msgstr "グループ %s は既に存在しています。" +msgid "Delete Volume: %(volume)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Unable to unplug VBD %s" -msgstr "VBD %s の unplug に失敗しました。" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "Unable to destroy VBD %s" -msgstr "VBD %s の削除に失敗しました。" - -#: cinder/virt/xenapi/vm_utils.py:305 -#, fuzzy, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -msgstr "VM %(vm_ref)s, VDI %(vdi_ref)s 用仮想ブロックデバイス(VBD) %(vbd_ref)s を作成しました。" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." -msgstr "VM %(vm_ref)s, VDI %(vdi_ref)s 用仮想ブロックデバイス(VBD) %(vbd_ref)s を作成しました。" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 -#, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -"%(sr_ref)s 上に VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, " -"%(read_only)s) を作成しました。" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 -#, fuzzy, python-format -msgid "Snapshotting with label '%(label)s'" -msgstr "ラベル '%(label)s' 付き VM %(vm_ref)s のスナップショットを作成しています…" - -#: cinder/virt/xenapi/vm_utils.py:392 -#, fuzzy, python-format -msgid "Created snapshot %(template_vm_ref)s" -msgstr "VM %(vm_ref)s からスナップショット %(template_vm_ref)s を作成しました。" - -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" -msgstr "ID %(image_id)s として %(vdi_uuids)s のアップロードの為に xapi を問い合わせしています" +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 #, python-format -msgid "download_vhd failed: %r" +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 #, fuzzy, python-format -msgid "Size for image %(image)s: %(virtual_size)d" -msgstr "イメージ %(image)s のサイズ:%(virtual_size)d" +msgid "Create export: %(volume)s" +msgstr "%s 個のボリュームを再エクスポートします。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "ボリューム %(volume_id)s をインスタンス %(instance_id)s のデバイス %(device)s に接続" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" -msgstr "ドメイン0 上の /boot/guest に VDI %s をコピー中" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 -#, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" -msgstr "カーネル/RAMディスク VDI %s が削除されました" +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "xvp の開始中にエラー: %s" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "PV kernelのvdi %s を取得します。" +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "VDI %s is still available" -msgstr "VDI %s は依然として存在しています。" +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 -#, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" -msgstr "(VM_UTILS) xenserver の vm state -> |%s|" +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" -msgstr "(VM_UTILS) xenapi の power_state -> |%s|" +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 -#, python-format -msgid "Re-scanning SR %s" -msgstr "SR %s を再スキャンします。" +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "ボリューム %s の削除に成功しました。" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 -#, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 -#, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "キューに再接続しました。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" -msgstr "VHD %(vdi_uuid)s の親は %(parent_ref)s です" +msgid "Found Storage Configuration Service: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." -msgstr "親 %(parent_uuid)s が元々の親 %(original_parent_uuid)s と一致しません。作成を待機しています…" +msgid "Found Controller Configuration Service: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Plugging VBD %s ... " -msgstr "VBD %s を接続しています… " +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Plugging VBD %s done." -msgstr "仮想ブロックデバイス(VBD) %s の接続が完了しました。" +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 -#, python-format -msgid "Destroying VBD for VDI %s ... " -msgstr "VDI %s 用の仮想ブロックデバイス(VBD)を削除しています… " +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "ボリューム %(volume_id)s をインスタンス %(instance_id)s のデバイス %(device)s に接続" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Destroying VBD for VDI %s done." -msgstr "VDI %s 用の仮想ブロックデバイス(VBD)の削除が完了しました。" +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "Running pygrub against %s" -msgstr "%s に対して pygrub を実行しています" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "Found Xen kernel %s" -msgstr "Xen Kernel %s が見つかりました。" - -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -"No Xen kernel found. Booting HVM.\r\n" -"Xen 用カーネルが見つかりません。完全仮想化モード(HVM)で起動しています。" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 -#, python-format -msgid "Writing partition table %s done." -msgstr "パーティションテーブル %s の書き込みが完了しました。" - -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" -msgstr "" - -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" -msgstr "" - -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" -msgstr "" - -#: cinder/virt/xenapi/vmops.py:233 -#, fuzzy -msgid "Starting instance" -msgstr "raw instanceを生成します。" - -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" -msgstr "" - -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" -msgstr "" - -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" -msgstr "" - -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "Injecting file path: '%s'" -msgstr "ファイルパス '%s' を埋め込んでいます" - -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" -msgstr "" - -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -#, fuzzy -msgid "Starting VM" -msgstr "xvp を再起動しています" - -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:554 -#, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Instance agent version: %s" +msgid "Add target WWN: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Updating Agent to %s" +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -#, fuzzy, python-format -msgid "Finished snapshot and upload for VM" -msgstr "VM %s のスナップショットとアップロードが完了しました。" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" -#: cinder/virt/xenapi/vmops.py:677 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 #, fuzzy, python-format -msgid "Starting snapshot for VM" -msgstr "VM %s に対するスナップショットを開始します。" +msgid "Cannot find device number for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" -#: cinder/virt/xenapi/vmops.py:686 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "インスタンス %s にボリュームを接続(attach)できません。" +msgid "Found iSCSI endpoint: %s" +msgstr "NotFound 発生: %s" -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/hds/hds.py:178 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/hds/hds.py:197 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/hds/hds.py:250 +#, fuzzy, python-format +msgid "HDP not found: %s" +msgstr "Sheepdog が動作していません: %s" + +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/hds/hds.py:327 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" -msgstr "カーネル/RAMディスクファイルが削除されました" - -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -#, fuzzy -msgid "Destroying VM" -msgstr "xvp を再起動しています" - -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -#, fuzzy -msgid "Injecting network info to xenstore" -msgstr "ネットワークホストの設定をします。" - -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 -#, fuzzy, python-format -msgid "Creating VIF for network %(network_ref)s" -msgstr "VM %(vm_ref)s, network %(network_ref)s 用仮想インターフェース(VIF)を作成しています。" - -#: cinder/virt/xenapi/vmops.py:1495 -#, fuzzy, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" -msgstr "VM %(vm_ref)s, network %(network_ref)s 用仮想インターフェース(VIF)を作成しています。" - -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/huawei/huawei_utils.py:129 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 +#: cinder/volume/drivers/huawei/rest_common.py:59 #, python-format -msgid "OpenSSL error: %s" -msgstr "OpenSSL エラー: %s" - -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid "type is = %s" +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "name = %s" +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "Bad response from server: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" -msgstr "Storage Repository を作成できません。" - -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." -msgstr "%(sr_ref)s として %(label)s を導入しました" +msgid "Login error, reason is %s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Forgetting SR %s..." +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Introducing %s..." -msgstr "%s を introduce します…" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/huawei/rest_common.py:354 #, python-format -msgid "Unable to find SR from VBD %s" -msgstr "VBD %s から SRを取得できません。" +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/huawei/rest_common.py:474 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" -msgstr "%(sr_ref)s 用の物理ブロックデバイス(PBD)取得時に例外 %(exc)s を無視しています" +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" -msgstr "物理ブロックデバイス(PBD) %(pbd)s の取り外し時に例外 %(exc)s を無視しています" +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/huawei/rest_common.py:527 #, python-format -msgid "Unable to introduce VDI on SR %s" -msgstr "SR %s のVDIのintroduceができません。" +msgid "host lun id is %s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/huawei/rest_common.py:553 #, python-format -msgid "Unable to get record of VDI %s on" -msgstr "VDI %s のレコードを取得できません。" +msgid "the free wwns %s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Unable to introduce VDI for SR %s" -msgstr "SR %s のVDIをintroduceできません。" +msgid "the fc server properties is:%s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Error finding vdis in SR %s" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/huawei/rest_common.py:874 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/huawei/rest_common.py:937 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/huawei/rest_common.py:964 #, python-format -msgid "Mountpoint cannot be translated: %s" -msgstr "マウントポイントを変換できません。 %s" +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format -msgid "Creating SR %s" +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/huawei/rest_common.py:1124 #, python-format -msgid "Introducing SR %s" +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "Checking for SR %s" +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/huawei/rest_common.py:1256 #, python-format -msgid "SR %s not found in the xapi database" +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 -#, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" -msgstr "インスタンス %(instance_name)s 用のSR %(sr_ref)s における VDI を作成できません" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 -#, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "インスタンス %(instance_name)s 用のSR %(sr_ref)s が使用できません" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "インスタンス %s にボリュームを接続(attach)できません。" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -msgstr "インスタンス %(instance_name)s にマウントポイント %(mountpoint)s を接続(attach)しました" +msgid "_get_login_info: %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "ボリューム切断: %(instance_name)s, %(mountpoint)s" +msgid "create_volume: volume name: %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Unable to locate volume %s" -msgstr "ボリューム %s の存在が確認できません。" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Unable to detach volume %s" -msgstr "ボリューム %s を切断(detach)できません" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Unable to destroy vbd %s" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 -#, python-format -msgid "Error purging SR %s" +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/huawei/ssh_common.py:421 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" -msgstr "インスタンス %(instance_name)s からマウントポイント %(mountpoint)s を切断(detach)しました" +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/huawei/ssh_common.py:436 #, python-format -msgid "Error in handshake: %s" +msgid "CLI command: %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/huawei/ssh_common.py:466 #, python-format -msgid "Invalid request: %s" +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/huawei/ssh_common.py:501 #, python-format -msgid "Request: %s" +msgid "_execute_cli: %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/huawei/ssh_common.py:511 #, python-format -msgid "Request made with missing token: %s" +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/huawei/ssh_common.py:516 #, python-format -msgid "Request made with invalid token: %s" +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/huawei/ssh_common.py:570 #, python-format -msgid "Unexpected error: %s" +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/huawei/ssh_common.py:580 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "ボリュームのステータス(status)は available でなければなりません。" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/volume/api.py:325 -#, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "ボリュームのステータス(status)は available でなければなりません。" +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "実行失敗からリカバリーします。%s 回目のトライ。" +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/huawei/ssh_common.py:865 #, python-format -msgid "volume group %s doesn't exist" -msgstr "ボリュームグループ%sが存在しません。" +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/huawei/ssh_common.py:873 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/huawei/ssh_common.py:916 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/huawei/ssh_common.py:1413 #, python-format -msgid "FAKE ISCSI: %s" -msgstr "偽のISCSI: %s" +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "rbd has no pool %s" -msgstr "rbd にプール %s がありません。" +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Sheepdog is not working: %s" -msgstr "Sheepdog が動作していません: %s" +msgid "%s is not set" +msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" -msgstr "Sheepdog が機能していません" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Re-exporting %s volumes" -msgstr "%s 個のボリュームを再エクスポートします。" +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "volume %s: skipping export" -msgstr "ボリューム %s のエキスポートをスキップします。" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "volume %s: creating" -msgstr "ボリューム%sを作成します。" +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" -msgstr "ボリューム %(vol_name)s: サイズ %(vol_size)sG のlvを作成します。" +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "volume %s: creating export" -msgstr "ボリューム %s をエクスポートします。" +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 #, python-format -msgid "volume %s: created successfully" -msgstr "ボリューム %s の作成に成功しました。" +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" -msgstr "ボリュームはアタッチされたままです。" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" -msgstr "ボリュームはこのノードのローカルではありません。" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 #, python-format -msgid "volume %s: removing export" -msgstr "ボリューム %s のエクスポートを解除します。" +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "volume %s: deleting" -msgstr "ボリューム %s を削除します。" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 #, python-format -msgid "volume %s: volume is busy" +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "volume %s: deleted successfully" -msgstr "ボリューム %s の削除に成功しました。" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "snapshot %s: creating" +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "snapshot %s: created successfully" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "snapshot %s: deleting" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/volume/manager.py:214 -#, fuzzy, python-format -msgid "snapshot %s: snapshot is busy" -msgstr "snapshotting: インスタンス %s のスナップショットを取得中" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "New capabilities found: %s" +msgid "Could not find key in output of command %(cmd)s: %(out)s" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Notification {%s} received" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "%s is not set" +msgid "Failed to find host %s" msgstr "" -#: cinder/volume/netapp.py:128 -#, fuzzy -msgid "Connected to DFM server" -msgstr "キューに再接続しました。" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" -#: cinder/volume/netapp.py:159 -#, fuzzy, python-format -msgid "Job failed: %s" -msgstr "NotFound 発生: %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 -#, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, fuzzy, python-format -msgid "No LUN ID for volume %s" -msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, fuzzy, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "ip %s に対するメタデータの取得に失敗しました。" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" -#: cinder/volume/netapp.py:614 -#, fuzzy, python-format -msgid "Failed to get host details for host ID %s" -msgstr "ip %s に対するメタデータの取得に失敗しました。" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" -#: cinder/volume/netapp.py:620 -#, fuzzy, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "ip %s に対するメタデータの取得に失敗しました。" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" -#: cinder/volume/netapp.py:625 -#, fuzzy, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "ip %s に対するメタデータの取得に失敗しました。" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" msgstr "" -#: cinder/volume/san.py:320 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 #, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/volume/san.py:452 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 #, python-format -msgid "CLIQ command returned %s" +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" +"%(description)s\n" +"コマンド: %(cmd)s\n" +"終了コード: %(exit_code)s\n" +"標準出力: %(stdout)r\n" +"標準エラー出力: %(stderr)r" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "インスタンス %s は実行中です。" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "ネストした戻り値: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "応答 %s" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "グループ %s は既に存在しています。" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Deleting user: ユーザ %s を削除します。" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "エラー %s をキャッチしました。" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "コマンド実行において予期しないエラーが発生しました。" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "例外 %s が発生しました。" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "ボリューム %(vol_name)s: サイズ %(vol_size)sG のlvを作成します。" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "ボリューム %s をエクスポートします。" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "ボリューム %s をエクスポートします。" + +#~ msgid "volume %s: creating from image" +#~ msgstr "ボリューム%sを作成します。" + +#~ msgid "volume %s: creating" +#~ msgstr "ボリューム%sを作成します。" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "ボリューム %s をエクスポートします。" + +#~ msgid "volume %s: create failed" +#~ msgstr "ボリューム%sを作成します。" + +#~ msgid "volume %s: created successfully" +#~ msgstr "ボリューム %s の作成に成功しました。" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "ファイルをフェッチ: %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "リンクローカルアドレスが見つかりません: %s" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "%(interface)s のローカルIPアドレスのリンクが取得できません:%(ex)s" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "ボリューム %s を切断(detach)できません" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "ボリューム %s の存在が確認できません。" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "VBD %s から SRを取得できません。" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "応答 %s" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "ボリューム %s の存在が確認できません。" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "ボリューム %s の存在が確認できません。" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "インスタンス終了処理を開始します。" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "ボリュームグループ%sが存在しません。" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "コマンド実行において予期しないエラーが発生しました。" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "libvirtへの接続が切れています。" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "コールバック中のデバッグ: %s" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "ボリュームグループ%sが存在しません。" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "Sheepdog が動作していません: %s" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, fuzzy, python-format -msgid "Got response: %s" -msgstr "応答 %s" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, fuzzy, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "ボリュームグループ%sが存在しません。" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "The request is invalid." #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" -#~ msgstr "既に消去済みのインスタンス%sを消去しようとしました。" +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" -#~ msgstr "%sのアダプターコンシューマー(Adapter Consumer)を初期化しています。" +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "topic is %s" -#~ msgstr "topic は %s です。" +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" -#~ msgid "message %s" -#~ msgstr "メッセージ %s" +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" -#~ msgstr "(%(nm)s) 公開 (キー: %(routing_key)s) %(message)s" +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" -#~ msgid "Publishing to route %s" -#~ msgstr "ルート %s へパブリッシュ" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Declaring queue %s" -#~ msgstr "queue %s の宣言" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Declaring exchange %s" -#~ msgstr "exchange %s の宣言" +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" -#~ msgstr "キー %(routing_key)s 付きで %(exchange)s に %(queue)s をバインドしています" +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" -#~ msgstr "%(queue)s から取得しています: %(message)s" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" -#~ msgstr "タスク [%(name)s] %(task)s 状態: 成功 %(result)s" +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" -#~ msgstr "タスク [%(name)s] %(task)s 状態: %(status)s %(error_info)s" +#~ msgid "Invalid request body" +#~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "virsh said: %r" -#~ msgstr "virsh の出力: %r" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" -#~ msgid "cool, it's a device" -#~ msgstr "デバイスです。" +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Created VM %s..." -#~ msgstr "VM %s を作成します。" +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." -#~ msgstr "%(vm_ref)s として VM %(instance_name)s を作成しています" +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " -#~ msgstr "VDI %s 用に VBD を作成しています… " +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." -#~ msgstr "VDI %s 用 VBD の作成が完了しました。" +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" -#~ msgid "VBD.unplug successful first time." -#~ msgstr "VBD.unplug は1回目で成功しました。" +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." -#~ msgstr "VBD.unplug が拒否されました: 再試行しています…" +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." -#~ msgstr "VBD.unplug は最終的に成功しました。" +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" -#~ msgstr "VBD.unplug 中の XenAPI.Failure を無視しています: %s" +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" -#~ msgstr "XenAPI.Failure %s を無視しています" +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" -#~ msgstr "インスタンス %s: 起動に失敗しました" +#~ msgid "SIGTERM received" +#~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "_wait_child %d" #~ msgstr "" +#~ msgid "wait wrap.failed %s" +#~ msgstr "NotFound 発生: %s" + #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." -#~ msgstr "VM %s を開始します…" - -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "volume %s mapping to multi host" +#~ msgstr "ボリューム %s のエキスポートをスキップします。" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." -#~ msgstr "VM %(vm_ref)s, network %(network_ref)s 用 VIF %(vif_ref)s を作成しました。" +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" #~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" #~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -#~ msgstr "VM %(vm_ref)s, VDI %(vdi_ref)s 用 VBD を作成しています… " +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/ka_GE/LC_MESSAGES/cinder.po b/cinder/locale/ka_GE/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..1e02f52707 --- /dev/null +++ b/cinder/locale/ka_GE/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Georgian (Georgia) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Georgian (Georgia) " +"(http://www.transifex.com/projects/p/openstack/language/ka_GE/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/km/LC_MESSAGES/cinder.po b/cinder/locale/km/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..9b9a92207b --- /dev/null +++ b/cinder/locale/km/LC_MESSAGES/cinder.po @@ -0,0 +1,10001 @@ +# Khmer translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-11-26 20:45+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Khmer " +"(http://www.transifex.com/projects/p/openstack/language/km/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/kn/LC_MESSAGES/cinder.po b/cinder/locale/kn/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..ff41008fd3 --- /dev/null +++ b/cinder/locale/kn/LC_MESSAGES/cinder.po @@ -0,0 +1,10412 @@ +# Kannada translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-09-17 14:44+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: Kannada " +"(http://www.transifex.com/projects/p/openstack/language/kn/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/ko/LC_MESSAGES/cinder.po b/cinder/locale/ko/LC_MESSAGES/cinder.po index ed2ab5d9ca..f5f54da64e 100644 --- a/cinder/locale/ko/LC_MESSAGES/cinder.po +++ b/cinder/locale/ko/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2011-12-16 04:42+0000\n" "Last-Translator: Zhongyue Luo \n" "Language-Team: Korean \n" @@ -15,8193 +15,10736 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "CA 루트의 파일이름" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "비밀키의 파일명" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "키를 저장하는 경로" +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "CA 루트를 저장하는 경로" +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "각 프로젝트마다 CA를 사용하시겠습니까?" +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" -#: cinder/crypto.py:67 +#: cinder/exception.py:120 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." msgstr "" -#: cinder/crypto.py:72 +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" +msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/crypto.py:292 +#: cinder/exception.py:137 #, python-format -msgid "Flags path: %s" -msgstr "플래그 경로: %s" +msgid "Not authorized for image %(image_id)s." +msgstr "" -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" -#: cinder/exception.py:59 +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +msgid "Invalid snapshot: %(reason)s" msgstr "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" -#: cinder/exception.py:94 -msgid "DB exception wrapped." +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:155 -msgid "An unknown exception occurred." +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" +#: cinder/exception.py:163 +msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" +#: cinder/exception.py:167 +msgid "The results are invalid." msgstr "" -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:195 -msgid "Connection to glance failed" +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:203 -msgid "Not authorized." +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:208 -msgid "User does not have admin privileges" +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" msgstr "" -#: cinder/exception.py:212 +#: cinder/exception.py:197 #, python-format -msgid "Policy doesn't allow %(action)s to be performed." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:216 +#: cinder/exception.py:201 #, python-format -msgid "Not authorized for image %(image_id)s." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:220 -msgid "Unacceptable parameters." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:218 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:237 -msgid "Failed to load data into json format" +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:245 +#: cinder/exception.py:242 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:253 +#: cinder/exception.py:250 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:264 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" #: cinder/exception.py:269 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:274 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:277 +#: cinder/exception.py:278 #, python-format -msgid "Invalid cidr %(cidr)s." +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:291 #, python-format -msgid "%(err)s" +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:295 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:299 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:303 #, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:307 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:311 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:315 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:334 -msgid "Failed to terminate instance" +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:348 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:373 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "Malformed message body: %(reason)s" msgstr "" #: cinder/exception.py:377 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "Could not find config at %(path)s" msgstr "" #: cinder/exception.py:381 -#, python-format -msgid "The supplied device (%(device)s) is busy." -msgstr "" +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "%s 볼륨을 찾을수 없습니다" #: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" #: cinder/exception.py:389 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:398 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:402 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:409 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:415 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:419 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:423 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." -msgstr "" - -#: cinder/exception.py:422 -msgid "Resource could not be found." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" #: cinder/exception.py:427 #, python-format -msgid "Required flag %(flag)s not set." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:432 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:436 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" #: cinder/exception.py:440 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" #: cinder/exception.py:444 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" #: cinder/exception.py:449 -msgid "Zero volume types found." +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" #: cinder/exception.py:453 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Failed to export for volume: %(reason)s" msgstr "" #: cinder/exception.py:457 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:461 #, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:465 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:471 -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:475 -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:480 -#, python-format -msgid "No target id found for volume %(volume_id)s." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:484 +#: cinder/exception.py:485 #, python-format -msgid "No disk at %(location)s" +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:488 -#, python-format -msgid "Could not find a handler for %(driver_type)s volume." +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:493 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Invalid backup: %(reason)s" msgstr "" -#: cinder/exception.py:496 -msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" msgstr "" #: cinder/exception.py:501 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Transfer %(transfer_id)s could not be found." msgstr "" #: cinder/exception.py:505 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" #: cinder/exception.py:509 #, python-format -msgid "User %(user_id)s could not be found." +msgid "SSH command injection detected: %(command)s" msgstr "" #: cinder/exception.py:513 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" #: cinder/exception.py:517 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:522 #, python-format -msgid "Role %(role_id)s could not be found." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." -msgstr "" - -#: cinder/exception.py:529 +#: cinder/exception.py:527 #, python-format -msgid "%(req)s is required to create a network." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:531 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:536 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" #: cinder/exception.py:541 #, python-format -msgid "Network could not be found for uuid %(uuid)s" +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:546 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:550 #, python-format -msgid "Network could not be found for instance %(instance_id)s." -msgstr "" - -#: cinder/exception.py:553 -msgid "No networks defined." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:557 +#: cinder/exception.py:554 #, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:562 -#, python-format -msgid "Host is not set to the network (%(network_id)s)." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:566 -#, python-format -msgid "Network %(network)s has active ports, cannot delete." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:574 -#, python-format -msgid "No fixed IP associated with id %(id)s." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:576 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:580 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:584 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:591 -#, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:593 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "%(instance_name)s 인스턴스의 %(sr_ref)s SR에 대한 VDI 생성이 실패했습니다" + +#: cinder/exception.py:597 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:599 -#, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:605 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:609 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:620 -#, python-format -msgid "Floating ip not found for id %(id)s." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:626 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:630 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:636 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:641 +msgid "Unknown NFS exception" msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:644 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Floating ip %(address)s is not associated." -msgstr "" - -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:652 -#, python-format -msgid "Interface %(interface)s not found." +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" msgstr "" -#: cinder/exception.py:656 -#, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:660 -#, python-format -msgid "Certificate %(certificate_id)s not found." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:668 -#, python-format -msgid "Host %(host)s could not be found." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:672 +#: cinder/quota.py:105 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:676 +#: cinder/quota.py:748 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:680 +#: cinder/quota.py:770 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:684 +#: cinder/quota.py:790 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:692 -#, python-format -msgid "Quota for project %(project_id)s could not be found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:696 +#: cinder/quota_utils.py:46 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:700 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:704 +#: cinder/service.py:95 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:709 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Security group with rule %(rule_id)s not found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:713 +#: cinder/service.py:148 #, python-format msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:718 -#, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" msgstr "" -#: cinder/exception.py:723 -#, python-format -msgid "Migration %(migration_id)s could not be found." +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." msgstr "" -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +#: cinder/service.py:270 +msgid "Recovered model server connection!" msgstr "" -#: cinder/exception.py:732 -#, python-format -msgid "Console pool %(pool_id)s could not be found." +#: cinder/service.py:276 +msgid "model server went away" msgstr "" -#: cinder/exception.py:736 +#: cinder/service.py:298 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:742 -#, python-format -msgid "Console %(console_id)s could not be found." +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:746 -#, python-format -msgid "Console for instance %(instance_id)s could not be found." +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" msgstr "" -#: cinder/exception.py:750 +#: cinder/service.py:387 #, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:755 +#: cinder/utils.py:96 #, python-format -msgid "Invalid console type %(console_type)s " -msgstr "" - -#: cinder/exception.py:759 -msgid "Zero instance types found." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:763 +#: cinder/utils.py:127 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:767 -#, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:772 +#: cinder/utils.py:228 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "Error connecting via ssh: %s" msgstr "" -#: cinder/exception.py:776 +#: cinder/utils.py:412 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "Invalid backend: %s" msgstr "" -#: cinder/exception.py:780 +#: cinder/utils.py:423 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "backend %s" msgstr "" -#: cinder/exception.py:784 +#: cinder/utils.py:698 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:789 +#: cinder/utils.py:759 #, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:793 +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgid "Unable to find cert_file : %s" msgstr "" -#: cinder/exception.py:798 +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 #, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Unable to find key_file : %s" msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:807 +#: cinder/wsgi.py:169 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:811 +#: cinder/wsgi.py:206 #, python-format -msgid "LDAP group %(group_id)s could not be found." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:815 -#, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:819 -#, python-format -msgid "File %(file_path)s could not be found." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/wsgi.py:313 +msgid "You must implement __call__" msgstr "" -#: cinder/exception.py:827 -#, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:832 -#, python-format -msgid "Network adapter %(adapter)s could not be found." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:844 -#, python-format -msgid "Unable to use global role %(role_id)s" +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:861 +#: cinder/api/common.py:189 #, python-format -msgid "Key pair %(key_name)s already exists." +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:865 -#, python-format -msgid "User %(user)s already exists." +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:869 +#: cinder/api/extensions.py:197 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:873 +#: cinder/api/extensions.py:235 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:877 +#: cinder/api/extensions.py:236 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:882 +#: cinder/api/extensions.py:237 #, python-format -msgid "Project %(project)s already exists." +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:886 +#: cinder/api/extensions.py:239 #, python-format -msgid "Instance %(name)s already exists." +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:890 +#: cinder/api/extensions.py:240 #, python-format -msgid "Instance Type %(name)s already exists." +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:894 +#: cinder/api/extensions.py:242 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:898 +#: cinder/api/extensions.py:256 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:906 +#: cinder/api/extensions.py:276 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:910 +#: cinder/api/extensions.py:278 #, python-format -msgid "Could not find config at %(path)s" +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/extensions.py:287 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:938 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" msgstr "" -#: cinder/exception.py:958 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +msgid "Delete backup with id: %s" msgstr "" -#: cinder/exception.py:963 +#: cinder/api/contrib/backups.py:185 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/exception.py:967 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/exception.py:971 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/exception.py:975 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/exception.py:980 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "Host '%s' could not be found." msgstr "" -#: cinder/exception.py:984 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Invalid status: '%s'" msgstr "" -#: cinder/exception.py:988 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/exception.py:992 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/exception.py:1005 -#, python-format -msgid "Error in SolidFire API response: status=%(status)s" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/exception.py:1009 -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/exception.py:1013 -#, python-format -msgid "Detected existing vlan with id %(vlan)d" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/exception.py:1017 -#, python-format -msgid "Instance %(instance_id)s could not be found." +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/exception.py:1025 -#, python-format -msgid "Could not fetch image %(image)s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/log.py:315 -#, python-format -msgid "syslog facility must be one of: %s" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/manager.py:146 -#, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/service.py:177 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -#: cinder/service.py:195 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/service.py:340 -msgid "model server went away" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/service.py:440 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/utils.py:534 +#: cinder/api/contrib/volume_transfer.py:147 #, python-format -msgid "Link Local address is not found.:%s" +msgid "Creating new volume transfer %s" msgstr "" -#: cinder/utils.py:537 +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/utils.py:648 +#: cinder/api/contrib/volume_transfer.py:196 #, python-format -msgid "Invalid backend: %s" +msgid "Accepting transfer %s" msgstr "" -#: cinder/utils.py:659 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "backend %s" +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/utils.py:942 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgid "Valid control location are: %s" msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/utils.py:1138 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "Expected object of type: %s" +msgid "Caught error: %s" msgstr "" -#: cinder/utils.py:1169 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Invalid server_string: %s" +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/utils.py:1298 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/utils.py:1463 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "Class %(fullname)s is deprecated" +msgid "Extended resource: %s" msgstr "" -#: cinder/utils.py:1495 +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/utils.py:1497 +#: cinder/api/openstack/__init__.py:110 #, python-format -msgid "Function %(name)s in %(location)s is deprecated" +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/wsgi.py:97 -#, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/wsgi.py:117 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgid "Exception handling resource: %s" msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/direct.py:299 -#, python-format -msgid "Returned non-serializeable type: %s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/validator.py:142 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/__init__.py:95 -#, python-format -msgid "FaultWrapper: %s" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" msgstr "" -#: cinder/api/ec2/__init__.py:180 +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +msgid "%(url)s returned a fault: %(e)s" msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/__init__.py:388 -#, python-format -msgid "Authentication Failure: %s" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/__init__.py:404 -#, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +#, fuzzy +msgid "snapshot does not exist" +msgstr "인스턴스 %s: 스냅샷 저장중" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/__init__.py:435 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "action: %s" +msgid "Delete snapshot with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:437 -#, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" msgstr "" -#: cinder/api/ec2/__init__.py:512 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/__init__.py:584 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "InstanceNotFound raised: %s" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/__init__.py:590 -#, python-format -msgid "VolumeNotFound raised: %s" +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" msgstr "" -#: cinder/api/ec2/__init__.py:596 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "SnapshotNotFound raised: %s" +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/__init__.py:602 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "NotFound raised: %s" +msgid "Delete volume with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:605 -#, python-format -msgid "EC2APIError raised: %s" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "KeyPairExists raised: %s" +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:617 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "InvalidParameterValue raised: %s" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:621 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "InvalidPortRange raised: %s" +msgid "Create volume of %s GB" msgstr "" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "NotAuthorized raised: %s" +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/__init__.py:633 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "QuotaError raised: %s" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/__init__.py:637 -#, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" msgstr "" -#: cinder/api/ec2/__init__.py:646 -#, python-format -msgid "Unexpected error raised: %s" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" msgstr "" -#: cinder/api/ec2/__init__.py:647 -#, python-format -msgid "Environment: %s" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" msgstr "" -#: cinder/api/ec2/apirequest.py:64 -#, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/cloud.py:336 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Create snapshot of volume %s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:372 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:378 -#, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/ec2/cloud.py:382 +#: cinder/backup/api.py:154 #, python-format -msgid "Create key pair %s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:391 +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/backup/api.py:176 #, python-format -msgid "Import key %s" +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/backup/api.py:181 #, python-format -msgid "Delete key pair %s" +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/backup/manager.py:107 #, python-format -msgid "Revoke security group ingress %s" +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#: cinder/backup/manager.py:117 #, python-format -msgid "%s Not enough parameters to build a valid rule" +msgid "Manager requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/backup/manager.py:123 #, python-format -msgid "Authorize security group ingress %s" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/ec2/cloud.py:725 +#: cinder/backup/manager.py:129 #, python-format -msgid "%s - This rule already exists in group" +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:769 +#: cinder/backup/manager.py:147 #, python-format msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/backup/manager.py:154 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +msgid "Registering default backend %s." msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#: cinder/backup/manager.py:158 #, python-format -msgid "Create Security Group %s" +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/backup/manager.py:165 #, python-format -msgid "group %s already exists" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 -#, python-format -msgid "Delete security group %s" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#: cinder/backup/manager.py:189 #, python-format -msgid "Get console output for instance %s" +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/ec2/cloud.py:894 +#: cinder/backup/manager.py:194 #, python-format -msgid "Create volume from snapshot %s" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/backup/manager.py:206 #, python-format -msgid "Create volume of %s GB" +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/backup/manager.py:217 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/backup/manager.py:237 #, python-format -msgid "Detach volume %s" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:959 -msgid "Detach Volume Failed." +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/backup/manager.py:282 #, python-format -msgid "attribute not supported: %s" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/backup/manager.py:286 #, python-format -msgid "vol = %s\n" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1267 +#: cinder/backup/manager.py:310 #, python-format -msgid "Release address %s" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/manager.py:329 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/manager.py:360 #, python-format -msgid "Disassociate address %s" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/manager.py:399 #, python-format -msgid "Reboot instance %r" +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/drivers/ceph.py:147 #, python-format -msgid "De-registering image %s" +msgid "invalid user '%s'" msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "Updating image %s publicity" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/__init__.py:43 -#, python-format -msgid "Caught error: %s" +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/drivers/ceph.py:361 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/drivers/ceph.py:377 #, python-format -msgid "Extended resource: %s" +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/drivers/ceph.py:389 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/auth.py:90 -#, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/drivers/ceph.py:397 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "Could not find %s in request." +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "Successfully authenticated '%s'" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/drivers/ceph.py:586 #, python-format -msgid "marker [%s] not found" +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "href %s does not contain version" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "Converting nw_info: %s" +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/drivers/ceph.py:697 #, python-format -msgid "Converted networks: %s" +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/drivers/ceph.py:704 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:708 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "Loaded extension: %s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:225 -#, python-format -msgid "Ext name: %s" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "Ext alias: %s" +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/extensions.py:227 -#, python-format -msgid "Ext description: %s" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/drivers/ceph.py:846 #, python-format -msgid "Ext namespace: %s" +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/extensions.py:230 -#, python-format -msgid "Ext updated: %s" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "Exception loading extension: %s" +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/drivers/ceph.py:916 #, python-format -msgid "Loading extension %s" +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/extensions.py:252 -#, python-format -msgid "Calling extension factory %s" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/extensions.py:264 -#, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/drivers/ceph.py:964 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/extensions.py:368 -#, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:1023 #, python-format -msgid "Exception handling resource: %s" +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "Fault thrown: %s" +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/ceph.py:1037 #, python-format -msgid "HTTP exception thrown: %s" +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/swift.py:141 #, python-format -msgid "There is no such action: %s" +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/swift.py:173 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/swift.py:209 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "Invalid server status: %(status)s" +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/swift.py:423 #, python-format -msgid "Bad personality format: missing %s" +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:550 +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 #, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/tsm.py:85 #, python-format -msgid "Bad network format: missing %s" +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/tsm.py:338 #, python-format -msgid "Error in confirm-resize %s" +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/tsm.py:352 #, python-format -msgid "Error in revert-resize %s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/tsm.py:421 #, python-format -msgid "Error in reboot %s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 -#, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pause %s" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::unpause %s" +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::suspend %s" +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" +msgid "Multipath device discovered %(device)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "Error in migrate %s" +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "Compute.api::reset_network %s" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::lock %s" +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/brick/initiator/connector.py:834 #, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::unlock %s" +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 -#, python-format -msgid "createBackup entity requires %s attribute" +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/brick/initiator/linuxscsi.py:149 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/iscsi/iscsi.py:177 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/brick/iscsi/iscsi.py:184 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/brick/iscsi/iscsi.py:227 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Aggregates does not have %s action" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 -#, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 -msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/brick/iscsi/iscsi.py:532 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "인스턴스 %s를 재부팅합니다" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/iscsi/iscsi.py:571 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "No more floating ips in pool %s." +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/local_dev/lvm.py:370 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/local_dev/lvm.py:489 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Invalid status: '%s'" +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 -#, python-format -msgid "Invalid mode: '%s'" +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 -#, python-format -msgid "Invalid update setting: '%s'" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "Already mounted: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/compute/nova.py:97 #, python-format -msgid "Key pair '%s' already exists." +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "Unable to find address %r" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Network does not have %s action" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Disassociating network with id %s" +msgid "No backup with id %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" -msgstr "" +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "볼륨의 상태를 알 수 없습니다" -#: cinder/api/openstack/compute/contrib/networks.py:87 +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "Showing network with id %s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "Deleting network with id %s" +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 -#, python-format -msgid "Security group %s already exists" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Security group %s is not a string or unicode" +msgid "Table |%s| not created!" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 -#, python-format -msgid "Security group %s cannot be empty." +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 -#, python-format -msgid "Security group %s should not be greater than 255 characters." +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 -#, python-format -msgid "Security group (%s) not found" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 -#, python-format -msgid "This rule already exists in group %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 -#, python-format -msgid "Rule (%s) not found" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "start instance %r" +msgid "Exception while dropping table %s." msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "stop instance %r" +msgid "Exception while creating table %s." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "vol=%s" +msgid "Column |%s| not created!" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 -#, python-format -msgid "Delete volume with id: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 -#, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 -#, python-format -msgid "Delete snapshot with id: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 -#, python-format -msgid "Create snapshot from volume %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/auth/ldapdriver.py:650 -#, python-format +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/auth/manager.py:302 -#, python-format -msgid "Failed authorization for access key %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Using project name = user name (%s)" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/auth/manager.py:315 +#: cinder/image/image_utils.py:101 #, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/auth/manager.py:324 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#: cinder/image/image_utils.py:157 #, python-format -msgid "user.secret: %s" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#: cinder/image/image_utils.py:178 #, python-format -msgid "expected_signature: %s" +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#: cinder/image/image_utils.py:206 #, python-format -msgid "signature: %s" +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#: cinder/image/image_utils.py:224 #, python-format -msgid "Invalid signature for user %s" +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/auth/manager.py:353 +#: cinder/image/image_utils.py:260 #, python-format -msgid "host_only_signature: %s" +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/auth/manager.py:493 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +msgid "Not deleting key %s" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/openstack/common/excutils.py:48 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +msgid "Reloading cached file %s" msgstr "" -#: cinder/auth/manager.py:613 -#, python-format -msgid "modifying project %s" +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/auth/manager.py:676 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "Deleting project %s" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/auth/manager.py:734 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/auth/manager.py:743 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "Deleting user %s" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/auth/manager.py:753 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "Access Key change for user %s" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/auth/manager.py:755 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "Secret Key change for user %s" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/auth/manager.py:757 +#: cinder/openstack/common/log.py:301 #, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +msgid "Deprecated: %s" msgstr "" -#: cinder/auth/manager.py:802 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "No vpn data for project %s" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -msgid "Instance type for vpn instances" +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "Launching VPN for %s" +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/api.py:144 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "Unable to find host for Instance %s" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/api.py:192 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/api.py:203 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/api.py:259 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/api.py:261 +#: cinder/openstack/common/policy.py:149 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/api.py:312 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/api.py:383 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Going to run %s instances..." +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/api.py:447 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "bdm %s" +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "block_device_mapping %s" +msgid "Running cmd (subprocess): %s" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "Result was %s" msgstr "" -#: cinder/compute/api.py:871 -msgid "Going to try to soft delete instance" +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" msgstr "" -#: cinder/compute/api.py:939 -msgid "Going to try to terminate instance" +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/api.py:977 -msgid "Going to try to stop instance" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/api.py:996 -msgid "Going to try to start instance" +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/api.py:1000 -#, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 -#, python-format -msgid "Searching by: %s" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/api.py:1201 -#, python-format -msgid "Image type not recognized %s" +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/api.py:1377 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +msgid "Started child %d" msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/openstack/common/service.py:337 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "Starting %d workers" msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "DB error: %s" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "Instance type %s not found for deletion" +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: decorating: |%s|" +msgid "Caught %s, stopping children" +msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:144 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: locked: |%s|" +msgid "Invalid SSL version : %s" +msgstr "" -#: cinder/compute/manager.py:146 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: admin: |%s|" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: executing: |%s|" +msgid "Invalid string format: %s" +msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "check_instance_lock: not executing |%s|" +msgid "Unknown byte multiplier: %s" +msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/versionutils.py:69 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/versionutils.py:73 #, python-format msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "Setting up bdm %s" +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "SQL connection failed. %s attempts left." msgstr "" -#: cinder/compute/manager.py:406 -#, python-format -msgid "Exception encountered while terminating the instance %s" +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "Instance %s not found." +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" -msgstr "인스턴스가 이미 생성되었습니다" - -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/notifier/api.py:145 #, python-format msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" -msgstr "" - -#: cinder/compute/manager.py:538 -msgid "Starting instance..." -msgstr "" - -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +msgid "Failed to load notifier %s. These notifications will not be sent." msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/compute/manager.py:565 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "Instance network_info: |%s|" -msgstr "" - -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "%(action_str)s instance" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "terminating bdm %s" +msgid "unpacked context: %s" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +msgid "received %s" msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "no method for message: %s" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "Rebuilding instance %s" +msgid "No method for message: %s" msgstr "" -#: cinder/compute/manager.py:876 -#, python-format -msgid "Rebooting instance %s" -msgstr "인스턴스 %s를 재부팅합니다" - -#: cinder/compute/manager.py:891 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/compute/manager.py:933 -#, python-format -msgid "instance %s: snapshotting" -msgstr "인스턴스 %s: 스냅샷 저장중" - -#: cinder/compute/manager.py:939 -#, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "Rotating out %d backups" +msgid "MSG_ID is %s" msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "Deleting image %s" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:1035 -#, python-format -msgid "Failed to set admin password. Instance %s is not running" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "Instance %s: Root password set" +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" -msgstr "" - -#: cinder/compute/manager.py:1079 +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:1098 -#, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:1116 -#, python-format -msgid "instance %s: rescuing" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/rpc/common.py:151 #, python-format -msgid "instance %s: unrescuing" +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "instance %s: migrating" +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "instance %s: pausing" +msgid "Returning exception %s to caller" msgstr "" -#: cinder/compute/manager.py:1489 -#, python-format -msgid "instance %s: unpausing" +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/rpc/impl_kombu.py:477 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/rpc/impl_kombu.py:499 #, python-format -msgid "instance %s: suspending" +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format -msgid "instance %s: resuming" +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "instance %s: locking" +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 #, python-format -msgid "instance %s: unlocking" +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "instance %s: getting locked state" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 #, python-format -msgid "instance %s: reset network" +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format -msgid "instance %s: inject network info" +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "network_info to inject: |%s|" +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/compute/manager.py:1655 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "instance %s: getting vnc console" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/compute/manager.py:1703 -#, python-format -msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/compute/manager.py:1705 -#, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/compute/manager.py:1714 -#, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "Deserializing: %s" msgstr "" -#: cinder/compute/manager.py:1752 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Subscribing to %s" msgstr "" -#: cinder/compute/manager.py:1973 -#, python-format -msgid "Pre live migration failed at %(dest)s" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/compute/manager.py:2040 -#, python-format -msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/compute/manager.py:2073 -#, python-format -msgid "Migrating instance to %(dest)s finished successfully." +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" msgstr "" -#: cinder/compute/manager.py:2075 -msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/impl_zmq.py:506 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" -msgstr "" - -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" -msgstr "" - -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/compute/manager.py:2465 -#, python-format -msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/compute/manager.py:2472 -#, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/console/manager.py:97 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/console/vmrc_manager.py:122 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/impl_zmq.py:681 #, python-format -msgid "Removing console %(console_id)s." +msgid "Received message: %s" msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/console/xvp.py:116 -#, python-format -msgid "Re-wrote %s" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "Error starting xvp: %s" +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "topic is %s." msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/impl_zmq.py:815 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/consoleauth/manager.py:79 -#, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." msgstr "" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 -#, python-format -msgid "No ComputeNode for %(host)s" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "No backend config with id %(sm_backend_id)s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "No sm_flavor called %(sm_flavor)s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 -#, python-format -msgid "SQL connection failed. %s attempts left." +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 -#, python-format -msgid "Table |%s| not created!" +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "join list for moving mac_addresses |%s|" +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +msgid "Filtered %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/image/glance.py:278 -#, python-format -msgid "Creating image in Glance. Metadata passed in %s" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/image/glance.py:281 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/image/glance.py:289 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/image/glance.py:410 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/image/s3.py:309 -#, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/image/s3.py:328 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/image/s3.py:353 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/image/s3.py:379 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" -msgstr "" - -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +msgid "Faked command matched %s" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "Unknown chain: %r" +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/tests/test_misc.py:58 #, python-format msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/linux_net.py:694 -#, python-format -msgid "Hupping dnsmasq threw %s" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/linux_net.py:696 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "killing radvd threw %s" +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "Pid %d is stale, relaunching radvd" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format -msgid "Starting VLAN inteface %s" +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "Starting Bridge interface for %s" +msgid "Given data: %s" msgstr "" -#: cinder/network/linux_net.py:1142 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Starting bridge %s " +msgid "Result data: %s" msgstr "" -#: cinder/network/linux_net.py:1149 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "Done starting bridge %s" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/linux_net.py:1167 -#, python-format -msgid "Failed unplugging gateway interface '%s'" +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" msgstr "" -#: cinder/network/linux_net.py:1170 +#: cinder/tests/integrated/test_login.py:29 #, python-format -msgid "Unplugged gateway interface '%s'" +msgid "volume: %s" msgstr "" -#: cinder/network/manager.py:291 +#: cinder/tests/integrated/api/client.py:32 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 -#, python-format -msgid "Interface %(interface)s not found" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/network/manager.py:315 -#, python-format -msgid "floating IP allocation for instance |%s|" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/network/manager.py:353 -#, python-format -msgid "floating IP deallocation for instance |%s|" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Body: %s" msgstr "" -#: cinder/network/manager.py:402 +#: cinder/tests/integrated/api/client.py:121 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:614 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:660 -#, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "Decoding JSON: %s" msgstr "" -#: cinder/network/manager.py:778 -#, python-format -msgid "Disassociated %s stale fixed ip(s)" +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/network/manager.py:896 +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/transfer/api.py:136 #, python-format -msgid "network allocations for instance |%s|" +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/manager.py:901 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/transfer/api.py:182 #, python-format -msgid "network deallocation for instance |%s|" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/transfer/api.py:199 #, python-format -msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +msgid "Volume %s has been transferred." msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/volume/api.py:143 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/network/manager.py:1244 -#, python-format -msgid "Leased IP |%(address)s|" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/network/manager.py:1248 -#, python-format -msgid "IP %s leased that is not associated" +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/network/manager.py:1256 +#: cinder/volume/api.py:214 #, python-format -msgid "IP |%s| leased that isn't allocated" +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/network/manager.py:1261 -#, python-format -msgid "Released IP |%(address)s|" +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/network/manager.py:1265 +#: cinder/volume/api.py:229 #, python-format -msgid "IP %s released that is not associated" +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/network/manager.py:1268 +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 #, python-format -msgid "IP %s released that was not leased" +msgid "Searching by: %s" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/network/manager.py:1334 -#, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/network/manager.py:1345 -#, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" -msgstr "" +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "볼륨의 상태를 알 수 없습니다" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/network/manager.py:1423 -#, python-format -msgid "Network must be disassociated from project %s before delete" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/network/manager.py:1832 +#: cinder/volume/api.py:490 +#, python-format msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/volume/api.py:502 #, python-format msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" -msgstr "" +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "볼륨의 상태를 알 수 없습니다" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/network/quantum/client.py:180 -#, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/network/quantum/client.py:196 -#, python-format -msgid "Quantum entity not found: %s" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/network/quantum/client.py:206 -#, python-format -msgid "Server %(status_code)s error: %(data)s" -msgstr "" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "볼륨의 상태를 알 수 없습니다" -#: cinder/network/quantum/client.py:210 -#, python-format -msgid "Unable to connect to server. Got error: %s" +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/network/quantum/client.py:228 -#, python-format -msgid "unable to deserialize object of type = '%s'" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/volume/api.py:842 #, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" -#: cinder/network/quantum/manager.py:301 -#, python-format -msgid "network allocations for instance %s" +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/volume/api.py:862 #, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/volume/api.py:868 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/network/quantum/melange_connection.py:96 +#: cinder/volume/api.py:874 #, python-format -msgid "Server returned error: %s" +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/api.py:900 #, python-format -msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "No network with net_id = %s" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 +#: cinder/volume/driver.py:282 #, python-format -msgid "No fixed IPs to deallocate for vif %s" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 +#: cinder/volume/driver.py:327 #, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/notifier/api.py:115 +#: cinder/volume/driver.py:340 #, python-format -msgid "%s not in valid priorities" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/driver.py:358 #, python-format -msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/notifier/list_notifier.py:65 +#: cinder/volume/driver.py:394 #, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 +#: cinder/volume/driver.py:433 #, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/volume/driver.py:451 #, python-format -msgid "Returning exception %s to caller" +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/rpc/amqp.py:231 -#, python-format -msgid "received %s" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/rpc/amqp.py:236 +#: cinder/volume/driver.py:546 #, python-format -msgid "no method for message: %s" +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/rpc/amqp.py:237 +#: cinder/volume/driver.py:548 #, python-format -msgid "No method for message: %s" +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/rpc/amqp.py:321 -#, python-format -msgid "Making asynchronous call on %s ..." -msgstr "" +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" -#: cinder/rpc/amqp.py:324 +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "MSG_ID is %s" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/rpc/amqp.py:346 -#, python-format -msgid "Making asynchronous cast on %s..." +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" msgstr "" -#: cinder/rpc/amqp.py:379 -#, python-format -msgid "Sending notification on %s..." +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/manager.py:203 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/manager.py:235 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/manager.py:244 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "Re-exporting %s volumes" msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/manager.py:257 #, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/manager.py:264 #, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/manager.py:271 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "volume %s: skipping export" msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/manager.py:273 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 -#, python-format -msgid "Failed to consume message from queue: %s" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/manager.py:286 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/rpc/impl_qpid.py:341 -#, python-format -msgid "Unable to connect to AMQP server: %s " +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/rpc/impl_qpid.py:346 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "Connected to AMQP server on %s" +msgid "volume %s: deleting" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/volume/manager.py:427 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/volume/manager.py:430 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/scheduler/driver.py:89 +#: cinder/volume/manager.py:434 #, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +msgid "volume %s: deleted successfully" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/manager.py:451 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" -msgstr "" - -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" -msgstr "" - -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" -msgstr "" - -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" -msgstr "" - -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." -msgstr "" - -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +msgid "snapshot %s: creating" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/manager.py:462 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/manager.py:490 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/manager.py:496 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "No host selection for %s defined." +msgid "snapshot %s: deleting" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/manager.py:526 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "Cannot delete snapshot %s: snapshot is busy" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/manager.py:559 #, python-format -msgid "Filtered %(hosts)s" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 -#, python-format -msgid "Weighted %(weighted_host)s" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/scheduler/host_manager.py:144 -#, python-format -msgid "Host filter fails for ignored host %(host)s" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/scheduler/host_manager.py:151 -#, python-format -msgid "Host filter fails for non-forced host %(host)s" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/scheduler/host_manager.py:157 -#, python-format -msgid "Host filter function %(func)s failed for %(host)s" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/manager.py:698 #, python-format -msgid "Host filter passes for %(host)s" +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/manager.py:760 #, python-format -msgid "Received %(service_name)s service update from %(host)s." -msgstr "" - -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/manager.py:807 #, python-format -msgid "No service for compute ID %s" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/scheduler/manager.py:85 -#, python-format -msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/scheduler/manager.py:150 -#, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/manager.py:880 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/manager.py:909 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/manager.py:921 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/fake_utils.py:72 +#: cinder/volume/manager.py:1024 #, python-format -msgid "Faking execution of cmd (subprocess): %s" +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/manager.py:1044 #, python-format -msgid "Faked command matched %s" +msgid "Notification {%s} received" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:1091 #, python-format -msgid "Faked command raised an exception %s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:1103 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" -msgstr "" - -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +msgid "volume %s: extending" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:1107 #, python-format -msgid "Running instances: %s" +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/tests/test_compute.py:371 -#, python-format -msgid "After terminating instances: %s" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:1190 #, python-format -msgid "After force-killing instances: %s" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:1193 #, python-format msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" -msgstr "" - -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" -msgstr "" - -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" -msgstr "" - -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" -msgstr "" - -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "Target %s allocated" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/test_volume_types.py:58 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "Given data: %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/test_volume_types.py:59 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "Result data: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/tests/test_xenapi.py:637 -#, python-format -msgid "Removing simulated guest agent files in %s" +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/volume/utils.py:144 #, python-format -msgid "Quota exceeded: code=%(code)s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "_create: %s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "_delete: %s" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "_get: %s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "_get_all: %s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "Creating clone of volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 -#, python-format -msgid "test_snapshot_show: resp=%s" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 -#, python-format -msgid "test_snapshot_detail: resp_dict=%s" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "flavor: %s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/tests/integrated/api/client.py:38 -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/tests/integrated/api/client.py:105 -#, python-format -msgid "Doing %(method)s on %(relative_url)s" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Body: %s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" -msgstr "" - -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "Decoding JSON: %s" +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "Nested received %(queue)s, %(value)s" +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Nested return %s" +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Received %s" -msgstr "" - -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Compute_service record created for %s " +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 -#, python-format -msgid "Compute_service record updated for %s " +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Filters added to instance %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "Adding security group rule: %r" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "Adding provider rule: %s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." -msgstr "" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/eqlx.py:348 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" -msgstr "" - -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Got exception: %s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "No such domain (%s)" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/dom.py:134 -#, python-format -msgid "Failed power down Bare-metal node %s" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "No such domain %s" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Domains: %s" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Nodes: %s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "After storing domains: %s" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Created new domain: %s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:226 -#, python-format -msgid "change_domain_state: to new state %s" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/glusterfs.py:608 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "No base file found for %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "instance %s: rebooted" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "instance %s: rescued" +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "instance %s: is building" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "instance %s: booted" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "instance %s spawned successfully" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "instance %s:not booted" +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "instance %s: Creating image" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "file already exists at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "instance %s: starting toXML method" +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "instance %s: finished toXML method" +msgid "Available shares: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:216 -#, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "%s is not a directory." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "unknown disk image handler: %s" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Failed to remove container: %s" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "User %(username)s not found in password file." +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "unsupported partition: %s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "" - -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +msgid "%s" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "Failed to map partitions: %s" +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "qemu-nbd error: %s" +msgid " but size is now %d" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/nfs.py:361 #, python-format -msgid "nbd device %s did not show up" +msgid "%s is already mounted" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "Connecting to libvirt: %s" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Deleting instance files %(target)s" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -msgid "Instance soft rebooted successfully." +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Automatically confirming migration %d" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -msgid "Guest does not have a console available" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "block_device_list %s" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +msgid "deleting rbd volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/libvirt/connection.py:1942 -#, python-format -msgid "'' must be 1, but %d\n" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "connection data: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 -#, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 -#, python-format -msgid "Timeout migrating for %s. nwfilter not found." -msgstr "" +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "인스턴스 %s: 스냅샷 저장중" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/rbd.py:724 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "not cloneable: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 -#, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 -#, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "%s is a valid instance name" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "%s has a disk file" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Base file too young to remove: %s" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Removing base file: %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "API response: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 -#, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "Unknown base file: %s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Active base files: %s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 -#, python-format -msgid "Corrupt base files: %s" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Removable base files: %s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/vif.py:99 -#, python-format -msgid "Ensuring bridge %s" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 -#, python-format -msgid "Failed while unplugging vif of instance '%s'" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/libvirt/volume.py:166 -#, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/libvirt/volume.py:178 -#, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 -#, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 -#, python-format -msgid "%(text)s: _db_content => %(content)s" +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 -#, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Glance image %s is in killed state" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/zadara.py:472 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 -#, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, python-format -msgid "httplib error in %s: " +msgid "Create Volume: %(volume)s Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Socket error in %s: " +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format -msgid "Type error in %s: " +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "Exception in %s " +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Got total of %s instances" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 -#, python-format -msgid "Created VM with the name %s on the ESX host" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "Powered on the VM instance %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 -#, python-format -msgid "Created Snapshot of the VM instance %s " +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Uploading image %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Uploaded image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Deleted temporary vmdk file %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format -msgid "Did hard reboot of VM %s" +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 -#, python-format -msgid "instance - %s not present" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Powering off the VM %s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Powered off the VM %s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Unregistering the VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Unregistered the VM %s" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:592 -#, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "Suspending the VM %s " +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Suspended the VM %s " +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Resuming the VM %s" +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Resumed the VM %s " +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format -msgid "Creating directory with path %s" +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format -msgid "Created directory with path %s" +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format -msgid "Downloading image %s from glance image server" +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Create export: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." msgstr "" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:678 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "ExposePaths for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Calling %(localname)s %(impl)s" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Calling getter %s" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:711 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "HidePaths for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:775 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "RemoveMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "Found no network for bridge %s" +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/pool.py:146 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Unable to join %(host)s in the pool" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "%s 볼륨 탈착에 실패했습니다" +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "Found no PIF for device %s" +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "VBD not found in instance %s" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "VBD %s already detached" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "Snapshotting with label '%(label)s'" +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "Created snapshot %(template_vm_ref)s" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format -msgid "download_vhd failed: %r" +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" +msgid "Add target WWN: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Found iSCSI endpoint: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "VDI %s is still available" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "Re-scanning SR %s" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/hds/hds.py:178 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "LUN %s is deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "Plugging VBD %s ... " +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Plugging VBD %s done." +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 -#, python-format -msgid "Destroying VBD for VDI %s ... " +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Destroying VBD for VDI %s done." +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Running pygrub against %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Found Xen kernel %s" +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "Writing partition table %s done." +msgid "Bad response from server: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 -#, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -#, fuzzy, python-format -msgid "Starting instance" -msgstr "인스턴스 %s를 재부팅합니다" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Injecting file path: '%s'" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Instance agent version: %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "Updating Agent to %s" +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:686 -#, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "%s 인스턴스에 볼륨장착 할 수 없습니다" - -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 -#, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/ssh_common.py:792 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 +#: cinder/volume/drivers/huawei/ssh_common.py:933 #, python-format -msgid "Creating VIF for network %(network_ref)s" +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/huawei/ssh_common.py:1079 #, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 -#, python-format -msgid "OpenSSL error: %s" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "type is = %s" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "name = %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Forgetting SR %s..." +msgid "%s is not set" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "Introducing %s..." +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 #, python-format -msgid "Error finding vdis in SR %s" +msgid "initialize_connection: Did not find a preferred node for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "Creating SR %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "Introducing SR %s" +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "Checking for SR %s" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "SR %s not found in the xapi database" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 -#, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" -msgstr "%(instance_name)s 인스턴스의 %(sr_ref)s SR에 대한 VDI 생성이 실패했습니다" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "%(instance_name)s 인스턴스의 %(sr_ref)s SR을 사용 할 수 없습니다" +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "%s 인스턴스에 볼륨장착 할 수 없습니다" +msgid "Failed to get code level (%s)." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -msgstr "%(instance_name)s 인스턴스에 %(mountpoint)s 마운트지점이 장착되었습니다" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "볼륨 탈착: %(instance_name)s, %(mountpoint)s" +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "Unable to locate volume %s" -msgstr "%s 볼륨을 찾을수 없습니다" +msgid "Failed to find host %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "Unable to detach volume %s" -msgstr "%s 볼륨 탈착에 실패했습니다" +msgid "enter: get_host_from_connector: %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "Unable to destroy vbd %s" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Error purging SR %s" +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" -msgstr "%(instance_name)s 인스턴스에 %(mountpoint)s 마운트지점이 탈착되었습니다" +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "Error in handshake: %s" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 #, python-format -msgid "Invalid request: %s" +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 #, python-format -msgid "Request: %s" +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 #, python-format -msgid "Request made with missing token: %s" +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "Request made with invalid token: %s" +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "Unexpected error: %s" +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" msgstr "" -#: cinder/volume/api.py:85 -#, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "볼륨의 상태를 알 수 없습니다" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" -#: cinder/volume/api.py:142 -#, python-format -msgid "Volume still has %d dependent snapshots" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" msgstr "" -#: cinder/volume/api.py:325 -#, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "볼륨의 상태를 알 수 없습니다" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 #, python-format -msgid "volume group %s doesn't exist" +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 #, python-format -msgid "Could not find iSCSI export for volume %s" +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "FAKE ISCSI: %s" +msgid "enter: delete_vdisk: vdisk %s" msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 #, python-format -msgid "rbd has no pool %s" +msgid "Tried to delete non-existant vdisk %s." msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 #, python-format -msgid "Sheepdog is not working: %s" +msgid "leave: delete_vdisk: vdisk %s" msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 #, python-format -msgid "Re-exporting %s volumes" +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 #, python-format -msgid "volume %s: skipping export" +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 #, python-format -msgid "volume %s: creating" +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 #, python-format -msgid "volume %s: creating export" +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/netapp/api.py:419 #, python-format -msgid "volume %s: created successfully" +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/netapp/common.py:103 #, python-format -msgid "volume %s: removing export" +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/netapp/common.py:109 #, python-format -msgid "volume %s: deleting" +msgid "Storage family %s is not supported" msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/netapp/common.py:116 #, python-format -msgid "volume %s: volume is busy" +msgid "No default storage protocol found for storage family %(storage_family)s" msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/netapp/common.py:123 #, python-format -msgid "volume %s: deleted successfully" +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/netapp/common.py:130 #, python-format -msgid "snapshot %s: creating" +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/netapp/common.py:158 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/netapp/iscsi.py:69 #, python-format -msgid "snapshot %s: created successfully" +msgid "No metadata property %(prop)s defined for the LUN %(name)s" msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/netapp/iscsi.py:105 #, python-format -msgid "snapshot %s: deleting" +msgid "Using NetApp filer: %s" msgstr "" -#: cinder/volume/manager.py:214 -#, fuzzy, python-format -msgid "snapshot %s: snapshot is busy" -msgstr "인스턴스 %s: 스냅샷 저장중" +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/netapp/iscsi.py:166 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "Created LUN with name %s" msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/netapp/iscsi.py:191 #, python-format -msgid "New capabilities found: %s" +msgid "Destroyed LUN %s" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/netapp/iscsi.py:232 #, python-format -msgid "Notification {%s} received" +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/netapp/iscsi.py:238 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "Failed to get LUN target details for the LUN %s" msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/netapp/iscsi.py:249 #, python-format -msgid "%s is not set" +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" msgstr "" -#: cinder/volume/netapp.py:128 -msgid "Connected to DFM server" +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 #, python-format -msgid "Job failed: %s" +msgid "Resizing %s failed. Cleaning volume." msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 -#, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "%s 볼륨을 찾을수 없습니다" +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, fuzzy, python-format -msgid "No LUN ID for volume %s" -msgstr "%s 볼륨을 찾을수 없습니다" +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "볼륨의 상태를 알 수 없습니다" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "%s 볼륨 탈착에 실패했습니다" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "%s 볼륨을 찾을수 없습니다" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "%s 볼륨 탈착에 실패했습니다" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "%s 볼륨을 찾을수 없습니다" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "%s 볼륨을 찾을수 없습니다" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "" +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:614 -#, python-format -msgid "Failed to get host details for host ID %s" -msgstr "" +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:620 -#, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "" +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:625 -#, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "" +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#~ msgid "Connection to glance failed" +#~ msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Invalid snapshot" +#~ msgstr "" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Invalid input received" +#~ msgstr "" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid volume type" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, python-format -msgid "Got response: %s" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "The request is invalid." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" -#~ msgstr "인스턴스 %s가 이미 삭제되었습니다" +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "Reconnected to queue" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "response %s" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "topic is %s" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "message %s" +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "Publishing to route %s" +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "Declaring queue %s" +#~ msgid "zfs send/recv done, new volume %s created" #~ msgstr "" -#~ msgid "Declaring exchange %s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgid "rbd export-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "Created VM %s..." +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Can't find lun or lun goup in array" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " +#~ msgid "migrate_volume: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" -#~ msgstr "인스턴스 %s: 생성에 실패했습니다" +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "_wait_child %d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "wait wrap.failed %s" #~ msgstr "" #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "volume %s mapping to multi host" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/ko_KR/LC_MESSAGES/cinder.po b/cinder/locale/ko_KR/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..5bea4e9fde --- /dev/null +++ b/cinder/locale/ko_KR/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Korean (South Korea) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Korean (Korea) " +"(http://www.transifex.com/projects/p/openstack/language/ko_KR/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/ml_IN/LC_MESSAGES/cinder.po b/cinder/locale/ml_IN/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..c388d181b8 --- /dev/null +++ b/cinder/locale/ml_IN/LC_MESSAGES/cinder.po @@ -0,0 +1,10001 @@ +# Malayalam (India) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-11-26 20:45+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Malayalam (India) " +"(http://www.transifex.com/projects/p/openstack/language/ml_IN/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/mr_IN/LC_MESSAGES/cinder.po b/cinder/locale/mr_IN/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..b6b362fb3f --- /dev/null +++ b/cinder/locale/mr_IN/LC_MESSAGES/cinder.po @@ -0,0 +1,10085 @@ +# Marathi (India) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-10-16 22:17+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: Marathi (India) " +"(http://www.transifex.com/projects/p/openstack/language/mr_IN/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/ms/LC_MESSAGES/cinder.po b/cinder/locale/ms/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..4f29c4c21e --- /dev/null +++ b/cinder/locale/ms/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Malay translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Malay " +"(http://www.transifex.com/projects/p/openstack/language/ms/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/nb/LC_MESSAGES/cinder.po b/cinder/locale/nb/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..5622a6d6d2 --- /dev/null +++ b/cinder/locale/nb/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Norwegian Bokmål translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Norwegian Bokmål " +"(http://www.transifex.com/projects/p/openstack/language/nb/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/ne/LC_MESSAGES/cinder.po b/cinder/locale/ne/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..c8e0604175 --- /dev/null +++ b/cinder/locale/ne/LC_MESSAGES/cinder.po @@ -0,0 +1,10448 @@ +# Nepali translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-08-30 22:38+0000\n" +"Last-Translator: daisy.ycguo \n" +"Language-Team: Nepali " +"(http://www.transifex.com/projects/p/openstack/language/ne/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/nl_NL/LC_MESSAGES/cinder.po b/cinder/locale/nl_NL/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..249cc37506 --- /dev/null +++ b/cinder/locale/nl_NL/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Dutch (Netherlands) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Dutch (Netherlands) " +"(http://www.transifex.com/projects/p/openstack/language/nl_NL/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/pa_IN/LC_MESSAGES/cinder.po b/cinder/locale/pa_IN/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..0cd17c287f --- /dev/null +++ b/cinder/locale/pa_IN/LC_MESSAGES/cinder.po @@ -0,0 +1,9956 @@ +# Punjabi (Gurmukhi, India) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-12-15 11:10+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Panjabi (Punjabi) (India) " +"(http://www.transifex.com/projects/p/openstack/language/pa_IN/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "ਨੀਤੀ ਦੀ ਪ੍ਰਤੀਨਿਧਤਾ ਕਰਦੀ JSON ਫਾਈਲ" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "ਜਦੋਂ ਬੇਨਤੀ ਕੀਤਾ ਗਿਆ ਨਿਯਮ ਨਹੀਂ ਲੱਭਿਆ ਤਾਂ ਨਿਯਮ ਜਾਂਚੇ ਗਏ" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/pl_PL/LC_MESSAGES/cinder.po b/cinder/locale/pl_PL/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..963b3a7d71 --- /dev/null +++ b/cinder/locale/pl_PL/LC_MESSAGES/cinder.po @@ -0,0 +1,10737 @@ +# Polish (Poland) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-07-01 16:14+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Polish (Poland) " +"(http://www.transifex.com/projects/p/openstack/language/pl_PL/)\n" +"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && " +"(n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/pt/LC_MESSAGES/cinder.po b/cinder/locale/pt/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..4269494ddd --- /dev/null +++ b/cinder/locale/pt/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Portuguese translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Portuguese " +"(http://www.transifex.com/projects/p/openstack/language/pt/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/pt_BR/LC_MESSAGES/cinder.po b/cinder/locale/pt_BR/LC_MESSAGES/cinder.po index 8357b620c6..2deadcfd10 100644 --- a/cinder/locale/pt_BR/LC_MESSAGES/cinder.po +++ b/cinder/locale/pt_BR/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2012-02-06 21:07+0000\n" "Last-Translator: Adriano Steffler \n" "Language-Team: Brazilian Portuguese \n" @@ -15,8194 +15,10746 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "Nome do arquivo da CA raiz" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Nome do arquivo da chave privada" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" -msgstr "" - -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "Aonde armazenamos nossas chaves" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "Onde mantemos nosso CA raiz" - -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "Devemos usar um CA para cada projeto?" - -#: cinder/crypto.py:67 -#, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" -msgstr "Assunto do certificado para usuários, %s para projeto, usuário, timestamp" - -#: cinder/crypto.py:72 -#, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" -msgstr "Assunto do certificado para projetos, %s para projeto, timestamp" - -#: cinder/crypto.py:292 -#, python-format -msgid "Flags path: %s" -msgstr "Localização dos sinalizadores: %s" - -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Erro inesperado ao executar o comando." - -#: cinder/exception.py:59 -#, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" -msgstr "" -"%(description)s\n" -"Comando: %(cmd)s\n" -"Código de saída: %(exit_code)s\n" -"Saída padrão: %(stdout)r\n" -"Erro: %(stderr)r" - -#: cinder/exception.py:94 -msgid "DB exception wrapped." -msgstr "" - -#: cinder/exception.py:155 +#: cinder/exception.py:66 cinder/brick/exception.py:33 msgid "An unknown exception occurred." msgstr "" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" -msgstr "" - -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:195 -msgid "Connection to glance failed" +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" msgstr "" -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "" -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, fuzzy, python-format msgid "Not authorized for image %(image_id)s." msgstr "sem método para mensagem: %s" -#: cinder/exception.py:220 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "" -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:154 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:237 +#: cinder/exception.py:163 msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:167 +msgid "The results are invalid." msgstr "" -#: cinder/exception.py:245 +#: cinder/exception.py:171 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:253 +#: cinder/exception.py:179 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Invalid volume: %(reason)s" msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "%(err)s" msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:197 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:201 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:277 -#, python-format -msgid "Invalid cidr %(cidr)s." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:218 #, python-format -msgid "%(err)s" +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:228 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Volume %(volume_id)s could not be found." msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:232 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:237 #, python-format msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:242 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:246 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:250 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" -#: cinder/exception.py:334 -#, fuzzy -msgid "Failed to terminate instance" -msgstr "Começando a terminar instâncias" - -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:282 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:303 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:377 +#: cinder/exception.py:307 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:381 +#: cinder/exception.py:311 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:389 +#: cinder/exception.py:319 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:323 #, python-format msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:332 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:336 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:340 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:344 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:348 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:422 -msgid "Resource could not be found." +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:427 +#: cinder/exception.py:356 #, python-format -msgid "Required flag %(flag)s not set." +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:365 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "group %s já existe" + +#: cinder/exception.py:369 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:373 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Malformed message body: %(reason)s" msgstr "" -#: cinder/exception.py:440 +#: cinder/exception.py:377 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Could not find config at %(path)s" msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/exception.py:385 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:398 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:457 +#: cinder/exception.py:402 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:409 #, python-format msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:415 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:419 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:475 +#: cinder/exception.py:423 #, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:427 #, python-format -msgid "No target id found for volume %(volume_id)s." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:484 +#: cinder/exception.py:432 #, python-format -msgid "No disk at %(location)s" +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:436 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:440 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" -#: cinder/exception.py:496 +#: cinder/exception.py:444 +#, python-format msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" -#: cinder/exception.py:501 +#: cinder/exception.py:449 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" -#: cinder/exception.py:505 +#: cinder/exception.py:453 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Failed to export for volume: %(reason)s" msgstr "" -#: cinder/exception.py:509 +#: cinder/exception.py:457 #, python-format -msgid "User %(user_id)s could not be found." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:513 +#: cinder/exception.py:461 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:517 +#: cinder/exception.py:465 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:521 -#, python-format -msgid "Role %(role_id)s could not be found." -msgstr "" +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "Não é possível localizar o volume %s" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:529 -#, python-format -msgid "%(req)s is required to create a network." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:533 -#, python-format -msgid "Network %(network_id)s could not be found." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:485 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:541 -#, python-format -msgid "Network could not be found for uuid %(uuid)s" +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:545 -#, python-format -msgid "Network could not be found with cidr %(cidr)s." -msgstr "" +#: cinder/exception.py:493 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Backend inválido: %s" -#: cinder/exception.py:549 +#: cinder/exception.py:497 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "Connection to swift failed: %(reason)s" msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." msgstr "" -#: cinder/exception.py:557 +#: cinder/exception.py:505 #, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" -#: cinder/exception.py:562 +#: cinder/exception.py:509 #, python-format -msgid "Host is not set to the network (%(network_id)s)." +msgid "SSH command injection detected: %(command)s" msgstr "" -#: cinder/exception.py:566 +#: cinder/exception.py:513 #, python-format -msgid "Network %(network)s has active ports, cannot delete." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:522 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:527 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:531 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:536 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:541 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:546 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:599 +#: cinder/exception.py:550 #, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:554 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:608 -#, python-format -msgid "Fixed IP address %(address)s is invalid." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:620 -#, python-format -msgid "Floating ip not found for id %(id)s." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:576 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:580 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:584 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:593 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" msgstr "" +"Não é possível criar o VDI no SR %(sr_ref)s para a instância " +"%(instance_name)s" -#: cinder/exception.py:644 +#: cinder/exception.py:597 #, python-format -msgid "Floating ip %(address)s is not associated." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:652 +#: cinder/exception.py:605 #, python-format -msgid "Interface %(interface)s not found." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:656 +#: cinder/exception.py:609 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:613 #, python-format -msgid "Certificate %(certificate_id)s not found." +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:668 -#, python-format -msgid "Host %(host)s could not be found." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:672 +#: cinder/exception.py:626 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:676 +#: cinder/exception.py:630 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:680 +#: cinder/exception.py:636 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:684 -#, python-format -msgid "Access Key %(access_key)s could not be found." +#: cinder/exception.py:641 +msgid "Unknown NFS exception" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:692 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Quota for project %(project_id)s could not be found." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:696 -#, python-format -msgid "Quota class %(class_name)s could not be found." +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" msgstr "" -#: cinder/exception.py:700 -#, python-format -msgid "Security group %(security_group_id)s not found." +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:704 -#, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:709 -#, python-format -msgid "Security group with rule %(rule_id)s not found." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:713 -#, python-format -msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:718 +#: cinder/quota.py:105 #, python-format msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:723 +#: cinder/quota.py:748 #, python-format -msgid "Migration %(migration_id)s could not be found." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:727 +#: cinder/quota.py:770 #, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:732 +#: cinder/quota.py:790 #, python-format -msgid "Console pool %(pool_id)s could not be found." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:736 -#, python-format -msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:742 -#, python-format -msgid "Console %(console_id)s could not be found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:746 +#: cinder/quota_utils.py:46 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:750 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:755 +#: cinder/service.py:95 #, python-format -msgid "Invalid console type %(console_type)s " -msgstr "" - -#: cinder/exception.py:759 -msgid "Zero instance types found." +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:763 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:767 +#: cinder/service.py:148 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:772 -#, python-format -msgid "Flavor %(flavor_id)s could not be found." -msgstr "" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "Encerrado serviço que não tem entrada na base de dados" -#: cinder/exception.py:776 -#, python-format -msgid "Cell %(cell_id)s could not be found." -msgstr "" +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "O objeto da base de dados do serviço desapareceu, Recriando." -#: cinder/exception.py:780 -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "" +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "Recuperada conexão servidor de modelo." -#: cinder/exception.py:784 -#, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." -msgstr "" +#: cinder/service.py:276 +msgid "model server went away" +msgstr "servidor de modelo perdido" -#: cinder/exception.py:789 +#: cinder/service.py:298 #, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:793 -#, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:798 +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Conjunto completo de FLAGS:" + +#: cinder/service.py:387 #, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:807 +#: cinder/utils.py:127 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:811 -#, python-format -msgid "LDAP group %(group_id)s could not be found." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:815 +#: cinder/utils.py:228 #, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgid "Error connecting via ssh: %s" msgstr "" -#: cinder/exception.py:819 +#: cinder/utils.py:412 #, python-format -msgid "File %(file_path)s could not be found." -msgstr "" +msgid "Invalid backend: %s" +msgstr "Backend inválido: %s" -#: cinder/exception.py:823 -msgid "Zero files could be found." -msgstr "" +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "backend %s" -#: cinder/exception.py:827 +#: cinder/utils.py:698 #, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:832 +#: cinder/utils.py:759 #, python-format -msgid "Network adapter %(adapter)s could not be found." +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" -msgstr "" +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Impossível localizar uma porta aberta" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Impossível localizar uma porta aberta" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Não é possível destruir o VBD %s" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:844 +#: cinder/wsgi.py:169 #, python-format -msgid "Unable to use global role %(role_id)s" +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:861 -#, python-format -msgid "Key pair %(key_name)s already exists." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:865 -#, python-format -msgid "User %(user)s already exists." +#: cinder/wsgi.py:313 +msgid "You must implement __call__" msgstr "" -#: cinder/exception.py:869 -#, python-format -msgid "LDAP user %(user)s already exists." +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:873 -#, python-format -msgid "LDAP group %(group)s already exists." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:877 -#, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" -msgstr "Usuário %(uid)s já é um membro do grupo %(group_dn)s" +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" -#: cinder/exception.py:882 -#, python-format -msgid "Project %(project)s already exists." +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:886 -#, python-format -msgid "Instance %(name)s already exists." +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:890 -#, python-format -msgid "Instance Type %(name)s already exists." +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:894 +#: cinder/api/common.py:162 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:898 +#: cinder/api/common.py:189 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:906 +#: cinder/api/extensions.py:197 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:910 +#: cinder/api/extensions.py:235 #, python-format -msgid "Could not find config at %(path)s" +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/extensions.py:236 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:938 +#: cinder/api/extensions.py:262 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/extensions.py:278 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/extensions.py:287 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:958 +#: cinder/api/extensions.py:381 #, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:963 -#, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:967 -#, python-format -msgid "Aggregate %(aggregate_name)s already exists." +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:971 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:975 -#, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:980 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:984 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:988 +#: cinder/api/contrib/backups.py:140 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "delete called for member %s" msgstr "" -#: cinder/exception.py:992 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Delete backup with id: %s" msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/exception.py:1005 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Error in SolidFire API response: status=%(status)s" +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/exception.py:1009 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Error in SolidFire API response: data=%(data)s" +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/exception.py:1013 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Detected existing vlan with id %(vlan)d" +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/exception.py:1017 -#, python-format -msgid "Instance %(instance_id)s could not be found." +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/exception.py:1025 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Could not fetch image %(image)s" +msgid "Host '%s' could not be found." msgstr "" -#: cinder/log.py:315 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "syslog facility must be one of: %s" +msgid "Invalid status: '%s'" msgstr "" -#: cinder/manager.py:146 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/manager.py:152 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Running periodic task %(full_task_name)s" +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/service.py:177 -#, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/service.py:195 -#, python-format -msgid "Creating Consumer connection for Service %s" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" -msgstr "Encerrado serviço que não tem entrada na base de dados" - -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." -msgstr "O objeto da base de dados do serviço desapareceu, Recriando." +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" -msgstr "Recuperada conexão servidor de modelo." +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" -#: cinder/service.py:340 -msgid "model server went away" -msgstr "servidor de modelo perdido" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" -msgstr "Conjunto completo de FLAGS:" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" -#: cinder/service.py:440 -#, python-format -msgid "%(flag)s : FLAG SET " +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/utils.py:79 +#: cinder/api/contrib/quotas.py:111 #, python-format -msgid "Inner Exception: %s" -msgstr "Exceção interna: %s" +msgid "Bad key(s) in quota set: %s" +msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" -msgstr "Buscando %s" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Executando comando (subprocesso): %s" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Result was %s" -msgstr "Resultado foi %s" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" -#: cinder/utils.py:249 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "%r failed. Retrying." +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" -msgstr "Executando o comando (SSH): %s" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" -msgstr "depuração em retorno de chamada: %s" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" -#: cinder/utils.py:534 -#, python-format -msgid "Link Local address is not found.:%s" -msgstr "Endereço para Link Local não encontrado: %s" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" -#: cinder/utils.py:537 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" -msgstr "Não foi possível atribuir um IP para o Link Local de %(interface)s :%(ex)s" +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" -#: cinder/utils.py:648 -#, python-format -msgid "Invalid backend: %s" -msgstr "Backend inválido: %s" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" -#: cinder/utils.py:659 -#, python-format -msgid "backend %s" -msgstr "backend %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:942 -#, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/utils.py:1169 -#, python-format -msgid "Invalid server_string: %s" +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/utils.py:1298 +#: cinder/api/contrib/volume_transfer.py:147 +#, fuzzy, python-format +msgid "Creating new volume transfer %s" +msgstr "Criar volume de %s GB" + +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" -msgstr "" +#: cinder/api/contrib/volume_transfer.py:196 +#, fuzzy, python-format +msgid "Accepting transfer %s" +msgstr "Criar volume de %s GB" -#: cinder/utils.py:1461 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/utils.py:1495 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/wsgi.py:97 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +msgid "Valid control location are: %s" msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/wsgi.py:117 -#, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" -msgstr "" +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado o erro: %s" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/api/direct.py:299 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "Returned non-serializeable type: %s" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/api/sizelimit.py:51 +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 msgid "Request is too large." msgstr "" -#: cinder/api/validator.py:142 -#, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/api/ec2/__init__.py:73 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "%(code)s: %(message)s" +msgid "Extended resource: %s" msgstr "" -#: cinder/api/ec2/__init__.py:95 +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "FaultWrapper: %s" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." -msgstr "Muitas falhas de autenticação." - -#: cinder/api/ec2/__init__.py:180 +#: cinder/api/openstack/__init__.py:110 #, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "Authentication Failure: %s" -msgstr "Falha de Autenticação: %s" +msgid "Exception handling resource: %s" +msgstr "" -#: cinder/api/ec2/__init__.py:404 +#: cinder/api/openstack/wsgi.py:682 #, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgid "Fault thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:435 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "action: %s" -msgstr "ação: %s" +msgid "HTTP exception thrown: %s" +msgstr "" -#: cinder/api/ec2/__init__.py:437 -#, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:584 -#, python-format -msgid "InstanceNotFound raised: %s" +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:590 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "VolumeNotFound raised: %s" +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/__init__.py:596 -#, python-format -msgid "SnapshotNotFound raised: %s" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" -msgstr "NotFound lançado: %s" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" -#: cinder/api/ec2/__init__.py:605 -#, python-format -msgid "EC2APIError raised: %s" +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "KeyPairExists raised: %s" +msgid "%(url)s returned a fault: %(e)s" msgstr "" -#: cinder/api/ec2/__init__.py:617 -#, python-format -msgid "InvalidParameterValue raised: %s" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/__init__.py:621 -#, python-format -msgid "InvalidPortRange raised: %s" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "NotAuthorized raised: %s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/__init__.py:633 -#, fuzzy, python-format -msgid "QuotaError raised: %s" -msgstr "Erro inexperado lançado: %s" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +#, fuzzy +msgid "snapshot does not exist" +msgstr "instância %s: fazendo um snapshot" -#: cinder/api/ec2/__init__.py:637 -#, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/__init__.py:646 -#, python-format -msgid "Unexpected error raised: %s" -msgstr "Erro inexperado lançado: %s" - -#: cinder/api/ec2/__init__.py:647 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Environment: %s" +msgid "Delete snapshot with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." -msgstr "Ocorreu um erro desconhecido. Por favor tente sua requisição cindermente." +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" -#: cinder/api/ec2/apirequest.py:64 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/cloud.py:336 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "Create snapshot of volume %s" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/cloud.py:372 -#, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" msgstr "" -#: cinder/api/ec2/cloud.py:378 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/cloud.py:382 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "Create key pair %s" -msgstr "Criar par de chaves %s" +msgid "Delete volume with id: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:391 -#, python-format -msgid "Import key %s" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "Delete key pair %s" -msgstr "Remover par de chaves %s" - -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "Revoke security group ingress %s" -msgstr "Revogado entrada do grupo de segurança %s" +msgid "Create volume of %s GB" +msgstr "Criar volume de %s GB" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "%s Not enough parameters to build a valid rule" +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." -msgstr "Não existe regra para os parâmetros especificados" +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "Authorize security group ingress %s" -msgstr "Autorizada entrada do grupo de segurança %s" +msgid "Missing required element '%s' in request body" +msgstr "" -#: cinder/api/ec2/cloud.py:725 -#, fuzzy, python-format -msgid "%s - This rule already exists in group" -msgstr "Esta regra já existe no grupo %s" - -#: cinder/api/ec2/cloud.py:769 -#, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:776 -#, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" -msgstr "Criar Grupo de Segurança %s" - -#: cinder/api/ec2/cloud.py:783 -#, python-format -msgid "group %s already exists" -msgstr "group %s já existe" - -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 -#, python-format -msgid "Delete security group %s" -msgstr "Excluir grupo de segurança %s" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 -#, python-format -msgid "Get console output for instance %s" -msgstr "Obter saída do console para instância %s" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" -#: cinder/api/ec2/cloud.py:894 -#, python-format -msgid "Create volume from snapshot %s" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Create volume of %s GB" -msgstr "Criar volume de %s GB" - -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." -msgstr "" +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "O status do volume parece estar disponível" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 -#, python-format -msgid "Detach volume %s" -msgstr "Desanexar volume %s" +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "O status do volume parece estar disponível" -#: cinder/api/ec2/cloud.py:959 -#, fuzzy, python-format -msgid "Detach Volume Failed." -msgstr "Desanexar volume %s" +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "O status do volume parece estar disponível" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 -#, python-format -msgid "attribute not supported: %s" -msgstr "atributo não suportado: %s" +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/backup/api.py:154 #, python-format -msgid "vol = %s\n" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" -msgstr "Alocar endereço" - -#: cinder/api/ec2/cloud.py:1267 -#, python-format -msgid "Release address %s" -msgstr "Liberar endereço %s" +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "O status do volume parece estar disponível" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/api.py:176 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/api.py:181 #, python-format -msgid "Disassociate address %s" -msgstr "Desatribuir endereço %s" - -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" -msgstr "Começando a terminar instâncias" - -#: cinder/api/ec2/cloud.py:1343 -#, python-format -msgid "Reboot instance %r" -msgstr "Reiniciar instância %r" - -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/manager.py:107 #, python-format -msgid "De-registering image %s" -msgstr "Removendo o registro da imagem %s" - -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/manager.py:117 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "Manager requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" -msgstr "usuário ou grupo não especificado" - -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" -msgstr "apenas o grupo \"all\" é suportado" - -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" -msgstr "operation_type deve ser add ou remove" +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/manager.py:123 #, python-format -msgid "Updating image %s publicity" -msgstr "Atualizando publicidade da imagem %s" +msgid "Volume manager for backend '%s' does not exist." +msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/backup/manager.py:129 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/manager.py:147 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/backup/manager.py:154 #, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "Falha ao obter metadados para o ip: %s" +msgid "Registering default backend %s." +msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/backup/manager.py:158 #, python-format -msgid "Caught error: %s" -msgstr "Capturado o erro: %s" +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/manager.py:165 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/manager.py:189 #, python-format -msgid "Extended resource: %s" +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/manager.py:194 #, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/manager.py:206 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/manager.py:212 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/manager.py:217 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" -msgstr "" - -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/manager.py:225 #, python-format -msgid "Could not find %s in request." +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/manager.py:237 #, python-format -msgid "Successfully authenticated '%s'" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/manager.py:282 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/manager.py:360 #, python-format -msgid "marker [%s] not found" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/manager.py:379 #, python-format -msgid "href %s does not contain version" +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/manager.py:399 #, python-format -msgid "Converting nw_info: %s" +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/manager.py:422 #, python-format -msgid "Converted networks: %s" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/openstack/common.py:338 -#, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:147 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "invalid user '%s'" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "Loaded extension: %s" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "Ext name: %s" +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/backup/drivers/ceph.py:250 #, python-format -msgid "Ext alias: %s" +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "Ext description: %s" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Ext namespace: %s" +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "Ext updated: %s" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 #, python-format -msgid "Exception loading extension: %s" +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:246 -#, python-format -msgid "Loading extension %s" +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/backup/drivers/ceph.py:361 #, python-format -msgid "Calling extension factory %s" +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/drivers/ceph.py:377 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:407 #, python-format -msgid "Exception handling resource: %s" +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "Fault thrown: %s" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "HTTP exception thrown: %s" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:543 #, python-format -msgid "There is no such action: %s" +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/wsgi.py:889 -#, python-format -msgid "%(url)s returned a fault: %(e)s" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/ceph.py:846 #, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 -#, python-format -msgid "Invalid server status: %(status)s" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "Bad personality format: missing %s" +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/ceph.py:1037 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 +#: cinder/backup/drivers/ceph.py:1039 #, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/swift.py:106 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:123 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/swift.py:141 #, python-format -msgid "Bad network format: missing %s" +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/swift.py:224 #, python-format -msgid "Error in confirm-resize %s" +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/swift.py:234 #, python-format -msgid "Error in revert-resize %s" +msgid "volume size %d is invalid." msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/swift.py:278 #, python-format -msgid "Error in reboot %s" +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 -#, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/backup/drivers/swift.py:455 #, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pause %s" +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::unpause %s" +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 #, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::suspend %s" +msgid "delete %s finished" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/backup/drivers/tsm.py:85 #, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/backup/drivers/tsm.py:143 #, python-format -msgid "Error in migrate %s" +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/backup/drivers/tsm.py:173 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "Compute.api::reset_network %s" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/backup/drivers/tsm.py:199 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/backup/drivers/tsm.py:206 #, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::lock %s" +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/backup/drivers/tsm.py:213 #, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::unlock %s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/backup/drivers/tsm.py:260 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/backup/drivers/tsm.py:352 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#: cinder/backup/drivers/tsm.py:362 #, python-format msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/backup/drivers/tsm.py:413 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/backup/drivers/tsm.py:421 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/backup/drivers/tsm.py:432 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/exception.py:55 #, python-format -msgid "Aggregates does not have %s action" +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 -#, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 -#, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/exception.py:93 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 -msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" -msgstr "" +#: cinder/brick/exception.py:105 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Falha ao obter metadados para o ip: %s" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/exception.py:113 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/initiator/connector.py:229 #, python-format -msgid "No more floating ips in pool %s." +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "Invalid status: '%s'" +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 -#, fuzzy, python-format -msgid "Invalid mode: '%s'" -msgstr "Backend inválido: %s" - -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "Invalid update setting: '%s'" +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 -#, python-format -msgid "Key pair '%s' already exists." +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "Unable to find address %r" +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/initiator/linuxscsi.py:145 #, python-format -msgid "Network does not have %s action" +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/initiator/linuxscsi.py:149 #, python-format -msgid "Disassociating network with id %s" +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "Showing network with id %s" +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/iscsi/iscsi.py:177 #, python-format -msgid "Deleting network with id %s" +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" -msgstr "" - -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 #, fuzzy, python-format -msgid "Security group is still in use" -msgstr "Revogado entrada do grupo de segurança %s" +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Falha ao obter metadados para o ip: %s" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#: cinder/brick/iscsi/iscsi.py:227 #, python-format -msgid "Security group %s already exists" +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Security group %s is not a string or unicode" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Security group %s cannot be empty." +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 -#, python-format -msgid "Security group %s should not be greater than 255 characters." -msgstr "" +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Falha ao obter metadados para o ip: %s" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Security group (%s) not found" +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/iscsi/iscsi.py:489 #, python-format -msgid "This rule already exists in group %s" -msgstr "Esta regra já existe no grupo %s" - -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#: cinder/brick/iscsi/iscsi.py:532 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Reiniciando a instância %s" + +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Rule (%s) not found" +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "start instance %r" +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "stop instance %r" +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "vol=%s" +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 -#, python-format -msgid "Delete volume with id: %s" -msgstr "" +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Não é possível localizar o volume %s" -#: cinder/api/openstack/compute/contrib/volumes.py:329 -#, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 -#, python-format -msgid "Delete snapshot with id: %s" +#: cinder/brick/local_dev/lvm.py:370 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "Não é possível desconectar o volume %s" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 -#, python-format -msgid "Create snapshot from volume %s" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" -msgstr "Tentativa de instanciar singleton" +#: cinder/brick/local_dev/lvm.py:489 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Não é possível desconectar o volume %s" -#: cinder/auth/ldapdriver.py:650 -#, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -"Tentatica de remover o último membto de um grupo. Ao invés disso " -"excluindo o grupo %s." -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" -msgstr "Procurando usuário: %r" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" -#: cinder/auth/manager.py:302 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Failed authorization for access key %s" -msgstr "Falha de autorização para chave de acesso %s" +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" -#: cinder/auth/manager.py:308 -#, python-format -msgid "Using project name = user name (%s)" -msgstr "Usando nome do projeto = nome do usuário (%s)" +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" -#: cinder/auth/manager.py:315 -#, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/auth/manager.py:324 -#, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "expected_signature: %s" +msgid "Already mounted: %s" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" -msgstr "Assinatura inválida para usuário %s" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" -#: cinder/auth/manager.py:353 -#, python-format -msgid "host_only_signature: %s" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" -msgstr "Deve especificar projeto" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" -#: cinder/auth/manager.py:490 +#: cinder/compute/nova.py:97 #, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/auth/manager.py:613 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "modifying project %s" -msgstr "modificando projeto %s" +msgid "No backup with id %s" +msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" -msgstr "Adicionando usuário %(uid)s ao projeto %(pid)s" +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "O status do volume parece estar disponível" -#: cinder/auth/manager.py:646 +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "Remove user %(uid)s from project %(pid)s" -msgstr "Remover usuário %(uid)s do projeto %(pid)s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" -#: cinder/auth/manager.py:676 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "Deleting project %s" -msgstr "Excluindo projeto %s" +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" -msgstr "Apagando usuário %s" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" -#: cinder/auth/manager.py:753 -#, python-format -msgid "Access Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/auth/manager.py:755 -#, python-format -msgid "Secret Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/auth/manager.py:757 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +msgid "Table |%s| not created!" msgstr "" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -#, fuzzy, python-format -msgid "Instance type for vpn instances" -msgstr "Obter saída do console para instância %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/cloudpipe/pipelib.py:107 -#, python-format -msgid "Launching VPN for %s" -msgstr "Executando VPN para %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/compute/api.py:144 -#, python-format -msgid "Unable to find host for Instance %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/compute/api.py:192 -#, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/compute/api.py:203 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "Table |%s| not created" msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." msgstr "" -#: cinder/compute/api.py:259 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Exception while creating table %s." msgstr "" -#: cinder/compute/api.py:261 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "Column |%s| not created!" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/compute/api.py:312 -#, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/compute/api.py:383 -#, python-format -msgid "Going to run %s instances..." +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/compute/api.py:447 -#, python-format -msgid "bdm %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/compute/api.py:474 -#, python-format -msgid "block_device_mapping %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/compute/api.py:591 -#, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/compute/api.py:871 -#, fuzzy -msgid "Going to try to soft delete instance" -msgstr "Começando a terminar instâncias" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/compute/api.py:939 -#, fuzzy -msgid "Going to try to terminate instance" -msgstr "Começando a terminar instâncias" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" -#: cinder/compute/api.py:977 -msgid "Going to try to stop instance" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/compute/api.py:996 -#, fuzzy -msgid "Going to try to start instance" -msgstr "Começando a terminar instâncias" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" -#: cinder/compute/api.py:1000 -#, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 -#, python-format -msgid "Searching by: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Image type not recognized %s" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/api.py:1377 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/image/image_utils.py:157 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/image/image_utils.py:178 #, python-format -msgid "DB error: %s" +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/image/image_utils.py:206 #, python-format -msgid "Instance type %s not found for deletion" +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/image/image_utils.py:224 #, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: decorating: |%s|" +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/image/image_utils.py:260 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/manager.py:144 -#, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: locked: |%s|" +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" -#: cinder/compute/manager.py:146 -#, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: admin: |%s|" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: executando: |%s|" +msgid "Not deleting key %s" +msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "check_instance_lock: not executando |%s|" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/excutils.py:48 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/manager.py:240 -#, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "Setting up bdm %s" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "Instance %s not found." +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" -msgstr "A instância já foi criada" +#: cinder/openstack/common/log.py:301 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "recebido %s" -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/compute/manager.py:565 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "Instance network_info: |%s|" -msgstr "" - -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "%(action_str)s instance" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "terminating bdm %s" +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/policy.py:149 #, python-format msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/openstack/common/policy.py:163 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Rebuilding instance %s" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Rebooting instance %s" -msgstr "Reiniciando a instância %s" +msgid "No handler for matches of kind %s" +msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "instance %s: snapshotting" -msgstr "instância %s: fazendo um snapshot" +msgid "Running cmd (subprocess): %s" +msgstr "Executando comando (subprocesso): %s" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" -msgstr "" +msgid "Result was %s" +msgstr "Resultado foi %s" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "Rotating out %d backups" +msgid "Running cmd (SSH): %s" +msgstr "Executando o comando (SSH): %s" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:1005 -#, python-format -msgid "Deleting image %s" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 #, python-format -msgid "Failed to set admin password. Instance %s is not running" +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:1041 -#, python-format -msgid "Instance %s: Root password set" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Unhandled exception" +msgstr "Exceção interna: %s" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:1079 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/service.py:337 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "Starting %d workers" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "instance %s: rescuing" -msgstr "instância %s: resgatando" +msgid "pid %d not in child list" +msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "instance %s: unrescuing" -msgstr "instância %s: desfazendo o resgate" +msgid "Caught %s, stopping children" +msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "instance %s: migrating" +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "instance %s: pausing" -msgstr "instância %s: pausando" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "instance %s: unpausing" -msgstr "instância %s: saindo do pause" +msgid "Invalid string format: %s" +msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "instância %s: recuperando os diagnósticos" +msgid "Unknown byte multiplier: %s" +msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/versionutils.py:69 #, python-format -msgid "instance %s: suspending" -msgstr "instância %s: suspendendo" +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "instance %s: resuming" -msgstr "instância %s: resumindo" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "instance %s: locking" -msgstr "instância %s: bloqueando" +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "instance %s: unlocking" -msgstr "instância %s: desbloqueando" +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "instance %s: getting locked state" -msgstr "instância %s: obtendo estado de bloqueio" +msgid "Got mysql server has gone away: %s" +msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "instance %s: reset network" -msgstr "instância %s: reset da rede" +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "instance %s: inject network info" +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/openstack/common/notifier/api.py:145 #, python-format -msgid "network_info to inject: |%s|" +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/compute/manager.py:1655 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "instance %s: getting vnc console" +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:1705 -#, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:1714 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:1752 +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "conteúdo descompactado: %s" + +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "Desconectando volume da instância desconhecida %s" +msgid "received %s" +msgstr "recebido %s" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +msgid "no method for message: %s" +msgstr "sem método para mensagem: %s" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "Sem método para mensagem: %s" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." -msgstr "" - -#: cinder/compute/manager.py:2075 msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/common.py:156 #, python-format msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" -msgstr "" +msgid "Returning exception %s to caller" +msgstr "Retornando exceção %s ao método de origem" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" -msgstr "" - -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" -msgstr "Adicionando console" - -#: cinder/console/manager.py:97 +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/console/vmrc_manager.py:122 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "Removing console %(console_id)s." -msgstr "" - -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/console/xvp.py:116 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "Re-wrote %s" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/console/xvp.py:141 -#, python-format -msgid "Error starting xvp: %s" +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "Subscribing to %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 -#, python-format -msgid "No ComputeNode for %(host)s" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 -#, python-format -msgid "No backend config with id %(sm_backend_id)s" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 -#, python-format -msgid "No sm_flavor called %(sm_flavor)s" +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "conteúdo descompactado: %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 -#, python-format -msgid "No sm_volume with id %(volume_id)s" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/openstack/common/rpc/impl_zmq.py:371 +#, fuzzy +msgid "Registering reactor" +msgstr "Removendo o registro da imagem %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "SQL connection failed. %s attempts left." +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 -#, python-format -msgid "Table |%s| not created!" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "join list for moving mac_addresses |%s|" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "recebido %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/image/glance.py:278 -#, python-format -msgid "Creating image in Glance. Metadata passed in %s" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/image/glance.py:281 +#: cinder/openstack/common/rpc/impl_zmq.py:721 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "%(msg)s" msgstr "" -#: cinder/image/glance.py:289 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/image/glance.py:410 -#, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/image/s3.py:309 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +msgid "topic is %s." msgstr "" -#: cinder/image/s3.py:328 +#: cinder/openstack/common/rpc/impl_zmq.py:815 #, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/image/s3.py:353 +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake não tem uma implementação para %s" + +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/image/s3.py:379 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 -#, python-format -msgid "Bad prefix for to_global_ipv6: %s" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/ipv6/account_identifier.py:42 -#, python-format -msgid "Bad project_id for to_global_ipv6: %s" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/scheduler/filter_scheduler.py:114 #, python-format -msgid "Unknown chain: %r" +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/network/linux_net.py:694 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/network/linux_net.py:696 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" -msgstr "Pid %d está ultrapassado, reiniciando dnsmasq" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "killing radvd threw %s" +msgid "Filtered %s" msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/scheduler/filter_scheduler.py:276 #, python-format -msgid "Pid %d is stale, relaunching radvd" -msgstr "Pid %d está ultrapassado, reiniciando radvd" +msgid "Choosing %s" +msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Starting VLAN inteface %s" -msgstr "Iniciando a VLAN %s" +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "Starting Bridge interface for %s" -msgstr "Iniciando a Bridge para %s" - -#: cinder/network/linux_net.py:1142 -#, fuzzy, python-format -msgid "Starting bridge %s " -msgstr "Iniciando a Bridge para %s" - -#: cinder/network/linux_net.py:1149 -#, fuzzy, python-format -msgid "Done starting bridge %s" -msgstr "Removendo o registro da imagem %s" +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" -#: cinder/network/linux_net.py:1167 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "Failed unplugging gateway interface '%s'" +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/network/linux_net.py:1170 -#, python-format -msgid "Unplugged gateway interface '%s'" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/manager.py:291 -#, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 -#, python-format -msgid "Interface %(interface)s not found" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/manager.py:315 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "floating IP allocation for instance |%s|" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/manager.py:353 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "floating IP deallocation for instance |%s|" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/network/manager.py:402 -#, python-format -msgid "Quota exceeded for %s, tried to allocate address" +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/network/manager.py:614 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/network/manager.py:660 -#, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/network/manager.py:778 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/network/manager.py:896 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "network allocations for instance |%s|" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/network/manager.py:901 -#, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "network deallocation for instance |%s|" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Faked command matched %s" msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "Leased IP |%(address)s|" +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "IP %s leased that is not associated" +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/manager.py:1256 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "IP |%s| leased that isn't allocated" +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/tests/test_netapp_nfs.py:360 #, python-format -msgid "Released IP |%(address)s|" +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/manager.py:1265 -#, python-format -msgid "IP %s released that is not associated" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/manager.py:1268 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "IP %s released that was not leased" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Desanexar volume %s" + +#: cinder/tests/integrated/api/client.py:32 #, python-format msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/network/quantum/client.py:180 -#, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "Quantum entity not found: %s" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "Body: %s" msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/tests/integrated/api/client.py:121 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "unable to deserialize object of type = '%s'" -msgstr "" - -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." -msgstr "" +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "Falha ao obter metadados para o ip: %s" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/manager.py:301 +#: cinder/transfer/api.py:182 #, python-format -msgid "network allocations for instance %s" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/transfer/api.py:199 #, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +msgid "Volume %s has been transferred." msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/volume/api.py:143 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 -#, python-format -msgid "Server returned error: %s" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/api.py:214 #, python-format -msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 #, python-format -msgid "No network with net_id = %s" +msgid "Searching by: %s" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 -#, python-format -msgid "No fixed IPs to deallocate for vif %s" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 -#, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 -#, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/api.py:490 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/notifier/list_notifier.py:65 +#: cinder/volume/api.py:502 #, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/rpc/amqp.py:146 -#, python-format -msgid "Returning exception %s to caller" -msgstr "Retornando exceção %s ao método de origem" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" -msgstr "conteúdo descompactado: %s" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" -#: cinder/rpc/amqp.py:231 -#, python-format -msgid "received %s" -msgstr "recebido %s" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "O status do volume parece estar disponível" -#: cinder/rpc/amqp.py:236 -#, python-format -msgid "no method for message: %s" -msgstr "sem método para mensagem: %s" +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" -#: cinder/rpc/amqp.py:237 -#, python-format -msgid "No method for message: %s" -msgstr "Sem método para mensagem: %s" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" -#: cinder/rpc/amqp.py:321 +#: cinder/volume/api.py:757 #, python-format -msgid "Making asynchronous call on %s ..." +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/rpc/amqp.py:324 -#, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID é %s" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/api.py:797 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "No available service named %s" msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/rpc/amqp.py:379 -#, python-format -msgid "Sending notification on %s..." +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/api.py:868 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/api.py:874 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/api.py:887 #, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/api.py:900 #, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/driver.py:282 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/driver.py:327 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/driver.py:340 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/rpc/impl_qpid.py:346 +#: cinder/volume/driver.py:358 #, python-format -msgid "Connected to AMQP server on %s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/scheduler/driver.py:63 -#, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/volume/driver.py:546 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/scheduler/driver.py:89 +#: cinder/volume/driver.py:548 #, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/manager.py:228 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/manager.py:235 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/manager.py:244 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +msgid "Re-exporting %s volumes" +msgstr "Re-exportando %s volumes" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/volume/manager.py:264 #, python-format -msgid "No host selection for %s defined." +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: ignorando export" + +#: cinder/volume/manager.py:273 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/manager.py:286 #, python-format -msgid "Filtered %(hosts)s" +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 -#, python-format -msgid "Weighted %(weighted_host)s" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "Host filter fails for ignored host %(host)s" -msgstr "" +msgid "volume %s: deleting" +msgstr "volume %s: removendo" + +#: cinder/volume/manager.py:380 +#, fuzzy +msgid "volume is not local to this node" +msgstr "O volume não pertence à este node" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/manager.py:389 #, python-format -msgid "Host filter fails for non-forced host %(host)s" -msgstr "" +msgid "volume %s: removing export" +msgstr "volume %s: removendo export" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/manager.py:394 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/manager.py:427 #, python-format -msgid "Host filter passes for %(host)s" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/manager.py:430 #, python-format -msgid "Received %(service_name)s service update from %(host)s." +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/manager.py:462 #, python-format -msgid "No service for compute ID %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/manager.py:490 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/manager.py:496 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "snapshot %s: deleting" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/manager.py:526 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/manager.py:559 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/tests/fake_utils.py:72 -#, python-format -msgid "Faking execution of cmd (subprocess): %s" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/manager.py:698 #, python-format -msgid "Faked command matched %s" +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:760 #, python-format -msgid "Faked command raised an exception %s" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:807 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:880 #, python-format -msgid "Running instances: %s" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/manager.py:909 #, python-format -msgid "After terminating instances: %s" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:940 #, python-format -msgid "After force-killing instances: %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:976 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Target %s allocated" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/tests/test_volume_types.py:58 -#, python-format -msgid "Given data: %s" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/tests/test_volume_types.py:59 -#, python-format -msgid "Result data: %s" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "DB error: %s" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 -#, python-format -msgid "Quota exceeded: code=%(code)s" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "_create: %s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "_delete: %s" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "_get: %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "_get_all: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 -#, python-format -msgid "test_snapshot_create: resp_dict=%s" +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/utils.py:144 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "flavor: %s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Criar volume de %s GB" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:105 -#, python-format -msgid "Doing %(method)s on %(relative_url)s" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "Body: %s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/tests/integrated/api/client.py:125 -#, python-format -msgid "%(auth_uri)s => code %(http_status)s" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/tests/integrated/api/client.py:168 -#, python-format -msgid "Decoding JSON: %s" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/tests/rpc/common.py:133 -#, python-format -msgid "Nested received %(queue)s, %(value)s" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Nested return %s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "Received %s" +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" -msgstr "Falha ao abrir a conexão com o hypervisor" +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "Compute_service record created for %s " +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Compute_service record updated for %s " +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Filters added to instance %s" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "Adding security group rule: %r" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Adding provider rule: %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Erro inesperado ao executar o comando." + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/eqlx.py:348 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Got exception: %s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "No such domain (%s)" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/dom.py:134 -#, python-format -msgid "Failed power down Bare-metal node %s" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "No such domain %s" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Domains: %s" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Nodes: %s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "After storing domains: %s" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Created new domain: %s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:226 -#, python-format -msgid "change_domain_state: to new state %s" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/glusterfs.py:608 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "No base file found for %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "instance %s: rebooted" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "instance %s: rescued" +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "instance %s: is building" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "instance %s: booted" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "instance %s spawned successfully" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "instance %s:not booted" +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "instance %s: Creating image" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "file already exists at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "instance %s: starting toXML method" +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "instance %s: finished toXML method" +msgid "Available shares: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "#### RLK: cpu_arch = %s " -msgstr "" - -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:216 -#, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "%s is not a directory." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "unknown disk image handler: %s" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Failed to remove container: %s" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "User %(username)s not found in password file." +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "unsupported partition: %s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "" - -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +msgid "%s" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "Failed to map partitions: %s" +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "qemu-nbd error: %s" +msgid " but size is now %d" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/nfs.py:361 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "group %s já existe" + +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "nbd device %s did not show up" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "Connecting to libvirt: %s" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/nfs.py:526 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/rbd.py:160 #, python-format -msgid "Deleting instance files %(target)s" +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/virt/libvirt/connection.py:692 +#: cinder/volume/drivers/rbd.py:210 #, fuzzy, python-format -msgid "Instance soft rebooted successfully." -msgstr "volume %s: criado com sucesso" - -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." -msgstr "" +msgid "error opening rbd image %s" +msgstr "Desanexar volume %s" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Automatically confirming migration %d" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -msgid "Guest does not have a console available" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "block_device_list %s" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +msgid "deleting rbd volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/libvirt/connection.py:1942 -#, python-format -msgid "'' must be 1, but %d\n" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "connection data: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 -#, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 -#, python-format -msgid "Timeout migrating for %s. nwfilter not found." -msgstr "" +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "instância %s: fazendo um snapshot" -#: cinder/virt/libvirt/connection.py:2352 -#, python-format -msgid "skipping %(path)s since it looks like volume" -msgstr "" +#: cinder/volume/drivers/rbd.py:724 +#, fuzzy, python-format +msgid "not cloneable: %s" +msgstr "resposta %s" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 -#, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 -#, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "%s is a valid instance name" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "%s has a disk file" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Base file too young to remove: %s" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Removing base file: %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "resposta %s" + +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/solidfire.py:398 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 -#, python-format -msgid "Unknown base file: %s" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Active base files: %s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "Corrupt base files: %s" +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Removable base files: %s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" -msgstr "Impossível localizar uma porta aberta" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" -#: cinder/virt/libvirt/vif.py:90 -#, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/libvirt/vif.py:99 -#, python-format -msgid "Ensuring bridge %s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 -#, python-format -msgid "Failed while unplugging vif of instance '%s'" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/libvirt/volume.py:166 -#, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +#: cinder/volume/drivers/solidfire.py:673 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Começando a terminar instâncias" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/zadara.py:357 #, python-format -msgid "%(text)s: _db_content => %(content)s" -msgstr "%(text)s: _db_content => %(content)s" +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Glance image %s is in killed state" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 +#: cinder/volume/drivers/emc/emc_smis_common.py:40 msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 -#, python-format -msgid "Creating Port Group with name %s on the ESX host" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 -#, python-format -msgid "Created Port Group with name %s on the ESX host" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, fuzzy, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "volume %(vol_name)s: criando lv com tamanho %(vol_size)sG" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" -msgstr "" - -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "httplib error in %s: " +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Socket error in %s: " +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "Type error in %s: " +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Exception in %s " +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Got total of %s instances" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 -#, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Powered on the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Uploading image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Uploaded image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format -msgid "Deleted temporary vmdk file %s" +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Did hard reboot of VM %s" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "instance - %s not present" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Powering off the VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Powered off the VM %s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 -#, python-format -msgid "Unregistering the VM %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 #, python-format -msgid "Unregistered the VM %s" +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Suspending the VM %s " +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Suspended the VM %s " +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "Resuming the VM %s" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "Resumed the VM %s " -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format -msgid "Creating directory with path %s" +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "Created directory with path %s" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 -#, python-format -msgid "Downloading image %s from glance image server" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Re-exportando %s volumes" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Desanexar volume %s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" -msgstr "Aumento não implementado" +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "xenapi.fake does not have an implementation for %s" -msgstr "xenapi.fake não tem uma implementação para %s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Calling %(localname)s %(impl)s" -msgstr "Chamando %(localname)s %(impl)s" +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" -#: cinder/virt/xenapi/fake.py:594 -#, python-format -msgid "Calling getter %s" -msgstr "Chamando o pai %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -"xenapi.fake não tem implementação para %s ou foi chamado com um número de" -" argumentos inválido" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/host.py:137 -#, python-format -msgid "Unable to get SR for this host: %s" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "volume %s: remoção realizada com sucesso" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "Found non-unique network for bridge %s" -msgstr "Encontrado múltiplas redes para a bridge %s" +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Found no network for bridge %s" -msgstr "Não foi encontrada rede para bridge %s" +msgid "Initialize connection: %(volume)s" +msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:146 -#, python-format -msgid "Unable to join %(host)s in the pool" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "Não é possível desconectar o volume %s" - -#: cinder/virt/xenapi/pool.py:185 -#, python-format -msgid "Pool-set_name_label failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/vif.py:103 -#, python-format -msgid "Found no PIF for device %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 -#, python-format -msgid "VBD not found in instance %s" -msgstr "O VBD não foi encontrado na instância %s" - -#: cinder/virt/xenapi/vm_utils.py:262 -#, fuzzy, python-format -msgid "VBD %s already detached" -msgstr "group %s já existe" +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Reconectado à fila" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "Unable to unplug VBD %s" -msgstr "Não é possível desconectar o VBD %s" - -#: cinder/virt/xenapi/vm_utils.py:275 -#, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "Unable to destroy VBD %s" -msgstr "Não é possível destruir o VBD %s" - -#: cinder/virt/xenapi/vm_utils.py:305 -#, fuzzy, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -msgstr "VBD %(vbd_ref)s criado para VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Found Controller Configuration Service: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." -msgstr "VBD %(vbd_ref)s criado para VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -"VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) no SR " -"%(sr_ref)s criada com sucesso." -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 -#, fuzzy, python-format -msgid "Snapshotting with label '%(label)s'" -msgstr "Fazendo um snapshot da VM %(vm_ref)s com rótulo '%(label)s'..." - -#: cinder/virt/xenapi/vm_utils.py:392 -#, fuzzy, python-format -msgid "Created snapshot %(template_vm_ref)s" -msgstr "Snapshot %(template_vm_ref)s criado a partir da VM %(vm_ref)s." - -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -"Solicitando à xapi para realizar upload da imagem %(vdi_uuids)s com ID " -"%(image_id)s" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "download_vhd failed: %r" +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 -#, fuzzy, python-format -msgid "Size for image %(image)s: %(virtual_size)d" -msgstr "Tamanho da imagem %(image)s:%(virtual_size)d" +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" -msgstr "Copiando o VDI %s de /boot/guest no dom0" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" -msgstr "Kernel/Ramdisk %s destruidos" +msgid "Device info: %(data)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "Verificando o vdi %s para kernel PV" +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "VDI %s is still available" -msgstr "O VDI %s continua disponível" +msgid "Add target WWN: %s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" -msgstr "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Target WWNs: %s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" -msgstr "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, fuzzy, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "NotFound lançado: %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "Re-scanning SR %s" -msgstr "Re-escaneando SR %s" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/hds/hds.py:250 +#, fuzzy, python-format +msgid "HDP not found: %s" +msgstr "Endereço para Link Local não encontrado: %s" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/hds/hds.py:355 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/hds/hds.py:372 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/hds/hds.py:480 #, python-format -msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" -msgstr "O VHD %(vdi_uuid)s tem pai %(parent_ref)s" +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format -msgid "Plugging VBD %s ... " -msgstr "Conectando VBD %s ... " +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "Plugging VBD %s done." -msgstr "O VDB %s foi conectado." +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Destroying VBD for VDI %s ... " -msgstr "Destruindo VBD para o VDI %s ... " +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "Destroying VBD for VDI %s done." -msgstr "O VBD para o VDI %s foi destruído." +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "Running pygrub against %s" -msgstr "Rodando pygrub cindermente %s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Found Xen kernel %s" -msgstr "Kernel Xen encontrado: %s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." -msgstr "Kernel Xen não encontrado. Iniciando como HVM." +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/huawei_utils.py:129 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/rest_common.py:59 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid "Writing partition table %s done." +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Bad response from server: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 -msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -#, fuzzy, python-format -msgid "Starting instance" -msgstr "Reiniciando a instância %s" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Injecting file path: '%s'" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Instance agent version: %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "Updating Agent to %s" +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:686 -#, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "Não é possível anexar o volume na instância %s" - -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 -#, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/ssh_common.py:792 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 -#, fuzzy, python-format -msgid "Creating VIF for network %(network_ref)s" -msgstr "Criando a VIF para VM %(vm_ref)s, rede %(network_ref)s." +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 -#, fuzzy, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" -msgstr "Criando a VIF para VM %(vm_ref)s, rede %(network_ref)s." +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 -#, python-format -msgid "OpenSSL error: %s" -msgstr "Erro de OpenSSL: %s" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "type is = %s" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "name = %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Failed getting details for pool %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 -#, python-format -msgid "Forgetting SR %s..." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Introducing %s..." -msgstr "Introduzindo %s..." +msgid "%s is not set" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 -#, python-format -msgid "Unable to find SR from VBD %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 -#, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "ensure_export: Volume %s not found on storage" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 -#, python-format -msgid "Unable to get record of VDI %s on" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 -#, python-format -msgid "Error finding vdis in SR %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "Mountpoint cannot be translated: %s" -msgstr "Ponto de montagem não pode ser traduzido: %s" +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 #, python-format -msgid "Creating SR %s" +msgid "initialize_connection: Did not find a preferred node for volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "Introducing SR %s" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "Checking for SR %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 -#, python-format -msgid "SR %s not found in the xapi database" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 -#, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -"Não é possível criar o VDI no SR %(sr_ref)s para a instância " -"%(instance_name)s" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "Não é possível usar o SR %(sr_ref)s para a instância %(instance_name)s" +msgid "leave: extend_volume: volume %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "Não é possível anexar o volume na instância %s" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -msgstr "Ponto de montagem %(mountpoint)s conectada à instância %(instance_name)s" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "Unable to locate volume %s" -msgstr "Não é possível localizar o volume %s" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 -#, python-format -msgid "Unable to detach volume %s" -msgstr "Não é possível desconectar o volume %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 -#, python-format -msgid "Unable to destroy vbd %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "Error purging SR %s" +msgid "Could not find key in output of command %(cmd)s: %(out)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" -msgstr "Ponto de montagem %(mountpoint)s desanexada da instância %(instance_name)s" +msgid "Failed to get code level (%s)." +msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Error in handshake: %s" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "Invalid request: %s" +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "Request: %s" +msgid "Failed to find host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "Request made with missing token: %s" +msgid "enter: get_host_from_connector: %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "Request made with invalid token: %s" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Unexpected error: %s" +msgid "enter: create_host: host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 -#, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "O status do volume parece estar disponível" - -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/volume/api.py:325 -#, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "O status do volume parece estar disponível" - -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "volume group %s doesn't exist" +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/volume/driver.py:318 -#, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" msgstr "" -#: cinder/volume/driver.py:327 -#, python-format +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" msgstr "" -#: cinder/volume/driver.py:384 -#, python-format -msgid "Could not find iSCSI export for volume %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" msgstr "" -#: cinder/volume/driver.py:388 -#, python-format -msgid "ISCSI Discovery: Found %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 #, python-format -msgid "FAKE ISCSI: %s" +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" msgstr "" -#: cinder/volume/driver.py:505 -#, python-format -msgid "rbd has no pool %s" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Sheepdog is not working: %s" +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 #, python-format -msgid "Re-exporting %s volumes" -msgstr "Re-exportando %s volumes" +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 #, python-format -msgid "volume %s: skipping export" -msgstr "volume %s: ignorando export" +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 #, python-format -msgid "volume %s: creating" -msgstr "volume %s: criando" +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" -msgstr "volume %(vol_name)s: criando lv com tamanho %(vol_size)sG" +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "volume %s: creating export" -msgstr "volume %s: criando o export" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "volume %s: created successfully" -msgstr "volume %s: criado com sucesso" - -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" -msgstr "O volume continua atachado" - -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" -msgstr "O volume não pertence à este node" +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "volume %s: removing export" -msgstr "volume %s: removendo export" +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 #, python-format -msgid "volume %s: deleting" -msgstr "volume %s: removendo" +msgid "Tried to delete non-existant vdisk %s." +msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 #, python-format -msgid "volume %s: volume is busy" +msgid "leave: delete_vdisk: vdisk %s" msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 #, python-format -msgid "volume %s: deleted successfully" -msgstr "volume %s: remoção realizada com sucesso" +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 #, python-format -msgid "snapshot %s: creating" +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" msgstr "" -#: cinder/volume/manager.py:202 -#, python-format -msgid "snapshot %s: created successfully" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 #, python-format -msgid "snapshot %s: deleting" +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" msgstr "" -#: cinder/volume/manager.py:214 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 #, fuzzy, python-format -msgid "snapshot %s: snapshot is busy" -msgstr "instância %s: fazendo um snapshot" +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de saída: %(exit_code)s\n" +"Saída padrão: %(stdout)r\n" +"Erro: %(stderr)r" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 #, python-format -msgid "New capabilities found: %s" +msgid "Did not find success message nor error for %(fun)s: %(out)s" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "Notification {%s} received" +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/netapp/api.py:419 #, python-format -msgid "%s is not set" +msgid "No element by given name %s." msgstr "" -#: cinder/volume/netapp.py:128 -#, fuzzy -msgid "Connected to DFM server" -msgstr "Reconectado à fila" +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" -#: cinder/volume/netapp.py:159 -#, fuzzy, python-format -msgid "Job failed: %s" -msgstr "NotFound lançado: %s" +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 -#, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "Não é possível localizar o volume %s" +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, fuzzy, python-format -msgid "No LUN ID for volume %s" -msgstr "Não é possível localizar o volume %s" +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 #, fuzzy, python-format -msgid "Failed to get LUN details for LUN ID %s" +msgid "Failed to get LUN target details for the LUN %s" msgstr "Falha ao obter metadados para o ip: %s" -#: cinder/volume/netapp.py:614 +#: cinder/volume/drivers/netapp/iscsi.py:249 #, fuzzy, python-format -msgid "Failed to get host details for host ID %s" +msgid "Failed to get target portal for the LUN %s" msgstr "Falha ao obter metadados para o ip: %s" -#: cinder/volume/netapp.py:620 +#: cinder/volume/drivers/netapp/iscsi.py:252 #, fuzzy, python-format -msgid "Failed to get target portal for filer: %s" +msgid "Failed to get target IQN for the LUN %s" msgstr "Falha ao obter metadados para o ip: %s" -#: cinder/volume/netapp.py:625 +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 #, fuzzy, python-format -msgid "Failed to get target IQN for filer: %s" +msgid "Failed to get vol with required size for volume: %s" msgstr "Falha ao obter metadados para o ip: %s" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "resposta %s" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Erro inesperado ao executar o comando." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "group %s já existe" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Apagando usuário %s" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "O status do volume parece estar disponível" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "Capturado o erro: %s" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Erro inesperado ao executar o comando." + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "volume %(vol_name)s: criando lv com tamanho %(vol_size)sG" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "volume %s: criando o export" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "volume %s: criando o export" + +#~ msgid "volume %s: creating from image" +#~ msgstr "volume %s: criando" + +#~ msgid "volume %s: creating" +#~ msgstr "volume %s: criando" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "volume %s: criando o export" + +#~ msgid "volume %s: create failed" +#~ msgstr "volume %s: criando" + +#~ msgid "volume %s: created successfully" +#~ msgstr "volume %s: criado com sucesso" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "Falha ao obter metadados para o ip: %s" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "Buscando %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "Endereço para Link Local não encontrado: %s" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "Não é possível desconectar o volume %s" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "Não é possível localizar o volume %s" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "Não é possível desconectar o volume %s" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "resposta %s" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "Não é possível localizar o volume %s" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "Não é possível localizar o volume %s" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "Começando a terminar instâncias" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Erro inesperado ao executar o comando." + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "depuração em retorno de chamada: %s" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "Endereço para Link Local não encontrado: %s" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, fuzzy, python-format -msgid "Got response: %s" -msgstr "resposta %s" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "The request is invalid." #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" -#~ msgstr "tentando destruir instância já destruida: %s" +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" -#~ msgstr "Iniciando o Adaptador Consumidor para %s" +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "topic is %s" -#~ msgstr "topico é %s" +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" -#~ msgid "message %s" -#~ msgstr "mensagem %s" +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" -#~ msgstr "(%(nm)s) publicar (key: %(routing_key)s) %(message)s" +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" -#~ msgid "Publishing to route %s" -#~ msgstr "Publicando para rota %s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Declaring queue %s" -#~ msgstr "Declarando fila %s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Declaring exchange %s" -#~ msgstr "Declarando troca %s" +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" -#~ msgstr "Ligação %(queue)s para %(exchange)s com chave %(routing_key)s" +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" -#~ msgstr "Recebendo de %(queue)s: %(message)s" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Created VM %s..." -#~ msgstr "VM %s criada..." +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." -#~ msgstr "VM %(instance_name)s criada como %(vm_ref)s." +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " -#~ msgstr "Criando VBD para VDI %s ... " +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." -#~ msgstr "O VBD para VDI %s foi criado." +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" -#~ msgstr "Ignorando XenAPI.Failure em VBD.unplug: %s" +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" -#~ msgstr "instância %s: falha na geração" +#~ msgid "SIGTERM received" +#~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "_wait_child %d" #~ msgstr "" +#~ msgid "wait wrap.failed %s" +#~ msgstr "NotFound lançado: %s" + #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" +#~ msgid "volume %s mapping to multi host" +#~ msgstr "volume %s: ignorando export" + #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." -#~ msgstr "VIF %(vif_ref)s criada para VM %(vm_ref)s, rede %(network_ref)s." +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -#~ msgstr "Criando VBD para VM %(vm_ref)s, VDI %(vdi_ref)s ... " - -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/ro/LC_MESSAGES/cinder.po b/cinder/locale/ro/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..d44a07c5af --- /dev/null +++ b/cinder/locale/ro/LC_MESSAGES/cinder.po @@ -0,0 +1,10737 @@ +# Romanian translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-06-12 07:55+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Romanian " +"(http://www.transifex.com/projects/p/openstack/language/ro/)\n" +"Plural-Forms: nplurals=3; " +"plural=(n==1?0:(((n%100>19)||((n%100==0)&&(n!=0)))?2:1))\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/ru/LC_MESSAGES/cinder.po b/cinder/locale/ru/LC_MESSAGES/cinder.po index a934ed76f9..98cd0cf6df 100644 --- a/cinder/locale/ru/LC_MESSAGES/cinder.po +++ b/cinder/locale/ru/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2012-03-25 09:34+0000\n" "Last-Translator: Eugene Marshal \n" "Language-Team: Russian \n" @@ -16,403 +16,202 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "Имя файла корневого центра сертификации" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Имя файла секретного ключа" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" -msgstr "Имя файла корневого списка отзыва сертификатов" - -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "Путь к ключам" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "Место расположения нашего корневого центра сертификации" - -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "Должны ли мы использовать центр сертификации для каждого проекта?" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "Обнаружено неизвестное исключение." -#: cinder/crypto.py:67 -#, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -"Тема для сертификатов пользователей, %s для проекта, пользователя, " -"временной метки" -#: cinder/crypto.py:72 +#: cinder/exception.py:107 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" -msgstr "Тема для сертификатов проектов, %s для проекта, временная метка" - -#: cinder/crypto.py:292 -#, python-format -msgid "Flags path: %s" -msgstr "Расположение флагов: %s" - -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Неожиданная ошибка при выполнении команды." - -#: cinder/exception.py:59 -#, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -"%(description)s\n" -"Команда: %(cmd)s\n" -"Код выхода: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" -#: cinder/exception.py:94 -msgid "DB exception wrapped." +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:155 -msgid "An unknown exception occurred." -msgstr "Обнаружено неизвестное исключение." - -#: cinder/exception.py:178 -msgid "Failed to decrypt text" -msgstr "Ошибка дешифровки текста" - -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" -msgstr "Ошибка создания виртуального интерфейса" - -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" msgstr "" -"5 безуспешных попыток создания виртуального интерфейса с уникальным " -"mac-адресом" - -#: cinder/exception.py:195 -msgid "Connection to glance failed" -msgstr "Сбой соединения с glance" -#: cinder/exception.py:199 -msgid "Connection to melange failed" -msgstr "Сбой соединения c melange" - -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "Не авторизировано." -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "Пользователь не имеет административных привилегий" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Политика не допускает выполнения %(action)s." -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, fuzzy, python-format msgid "Not authorized for image %(image_id)s." msgstr "Ядро не найдено для образа %(image_id)s." -#: cinder/exception.py:220 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "Недопустимые параметры." -#: cinder/exception.py:225 -msgid "Invalid snapshot" -msgstr "Недопустимый снимок" - -#: cinder/exception.py:229 +#: cinder/exception.py:150 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" -msgstr "Том %(volume_id)s никуда не присоединён" +msgid "Invalid snapshot: %(reason)s" +msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:237 +#: cinder/exception.py:159 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "Том %(volume_id)s никуда не присоединён" + +#: cinder/exception.py:163 msgid "Failed to load data into json format" msgstr "Ошибка загрузки данных в формат json" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:167 +#, fuzzy +msgid "The results are invalid." msgstr "Недопустимый запрос." -#: cinder/exception.py:245 +#: cinder/exception.py:171 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." -msgstr "Недопустимая подпись %(signature)s для пользователя %(user)s." - -#: cinder/exception.py:249 -msgid "Invalid input received" +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:253 -#, python-format -msgid "Invalid instance type %(instance_type)s." -msgstr "Недопустимый тип копии %(instance_type)s." - -#: cinder/exception.py:257 -msgid "Invalid volume type" -msgstr "Недопустимый тип тома" - -#: cinder/exception.py:261 -msgid "Invalid volume" -msgstr "Недопустимый том" - -#: cinder/exception.py:265 +#: cinder/exception.py:175 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" -msgstr "Недопустимый диапазон портов %(from_port)s:%(to_port)s. %(msg)s" +msgid "Invalid volume type: %(reason)s" +msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:179 #, python-format -msgid "Invalid IP protocol %(protocol)s." -msgstr "Недопустимый протокол IP %(protocol)s." +msgid "Invalid volume: %(reason)s" +msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:183 #, python-format msgid "Invalid content type %(content_type)s." msgstr "Недопустимый тип содержимого %(content_type)s." -#: cinder/exception.py:277 +#: cinder/exception.py:187 #, python-format -msgid "Invalid cidr %(cidr)s." -msgstr "Недопустимый cidr %(cidr)s." - -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." -msgstr "" - -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format msgid "%(err)s" msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:197 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:301 -#, fuzzy, python-format -msgid "Group not valid. Reason: %(reason)s" -msgstr "Допустимый узел не найден. %(reason)s" - -#: cinder/exception.py:305 +#: cinder/exception.py:201 #, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -"Копия %(instance_uuid)s в %(attr)s %(state)s. Невозможно %(method)s во " -"время нахождения копии в этом состоянии." - -#: cinder/exception.py:310 -#, python-format -msgid "Instance %(instance_id)s is not running." -msgstr "Копия %(instance_id)s не выполняется." - -#: cinder/exception.py:314 -#, python-format -msgid "Instance %(instance_id)s is not suspended." -msgstr "Копия %(instance_id)s не переведена в режим приостановления." - -#: cinder/exception.py:318 -#, python-format -msgid "Instance %(instance_id)s is not in rescue mode" -msgstr "Копия %(instance_id)s не переведена в режим восстановления" - -#: cinder/exception.py:322 -msgid "Failed to suspend instance" -msgstr "Ошибка приостановления копии" - -#: cinder/exception.py:326 -msgid "Failed to resume server" -msgstr "Ошибка возобновления работы сервера" - -#: cinder/exception.py:330 -msgid "Failed to reboot instance" -msgstr "Ошибка перезагрузки копии" -#: cinder/exception.py:334 -#, fuzzy -msgid "Failed to terminate instance" -msgstr "Ошибка перезагрузки копии" - -#: cinder/exception.py:338 +#: cinder/exception.py:206 msgid "Service is unavailable at this time." msgstr "В данный момент служба недоступна." -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." -msgstr "Служба томов в данный момент недоступна." - -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." -msgstr "Служба Compute недоступна в настоящее время." - -#: cinder/exception.py:350 -#, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." -msgstr "Невозможно переместить копию (%(instance_id)s) на текущий узел (%(host)s)." - -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." -msgstr "Назначенный узел compute недоступен в настоящее время." - -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." -msgstr "Исходный узел compute недоступен в настоящее время." - -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." -msgstr "" - -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." -msgstr "Копии необходима новая версия гипервизора, вместо предоставленной." - -#: cinder/exception.py:372 -#, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." -msgstr "" -"Предоставленный адрес диска (%(path)s) уже существует, но ожидалось, что " -"отсутствует." - -#: cinder/exception.py:377 -#, python-format -msgid "The supplied device path (%(path)s) is invalid." -msgstr "Недопустимое размещение предоставленного устройства (%(path)s)." - -#: cinder/exception.py:381 -#, fuzzy, python-format -msgid "The supplied device (%(device)s) is busy." -msgstr "Недопустимое размещение предоставленного устройства (%(path)s)." - -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" -msgstr "Недопустимые сведения ЦПУ" - -#: cinder/exception.py:389 -#, python-format -msgid "%(address)s is not a valid IP v4/6 address." -msgstr "%(address)s не является допустимым IP-адресом в4/6." - -#: cinder/exception.py:393 -#, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." -msgstr "" - -#: cinder/exception.py:399 -#, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." -msgstr "" - -#: cinder/exception.py:406 -#, python-format -msgid "Disk format %(disk_format)s is not acceptable" -msgstr "Форматирование диска %(disk_format)s недопустимо" - -#: cinder/exception.py:410 +#: cinder/exception.py:210 #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "Образ %(image_id)s недопустим: %(reason)s" -#: cinder/exception.py:414 +#: cinder/exception.py:214 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" -msgstr "Копия %(instance_id)s недопустима: %(reason)s" +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:218 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." -msgstr "Ec2 id %(ec2_id)s недопустим." +msgid "Expected a uuid but received %(uuid)s." +msgstr "" -#: cinder/exception.py:422 +#: cinder/exception.py:222 cinder/brick/exception.py:68 msgid "Resource could not be found." msgstr "Ресурс не может быть найден." -#: cinder/exception.py:427 -#, python-format -msgid "Required flag %(flag)s not set." -msgstr "Необходимый флаг %(flag)s не назначен." - -#: cinder/exception.py:431 +#: cinder/exception.py:228 #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Том %(volume_id)s не найден." -#: cinder/exception.py:435 +#: cinder/exception.py:232 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "" +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "Том %(volume_id)s не имеет метаданных с ключом %(metadata_key)s." -#: cinder/exception.py:440 +#: cinder/exception.py:237 #, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "Не найден том для копии %(instance_id)s." +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:242 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "Том %(volume_id)s не имеет метаданных с ключом %(metadata_key)s." +msgid "Invalid metadata: %(reason)s" +msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:250 +#, fuzzy, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "Копия %(instance_id)s не имеет метаданных с ключом %(metadata_key)s." + +#: cinder/exception.py:255 #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "Тип тома %(volume_type_id)s не может быть найден." -#: cinder/exception.py:457 +#: cinder/exception.py:259 #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "Тип тома под названием %(volume_type_name)s не может быть найден." -#: cinder/exception.py:462 +#: cinder/exception.py:264 #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " @@ -421,7884 +220,10587 @@ msgstr "" "Тип тома %(volume_type_id)s не имеет дополнительных особенностей с ключом" " %(extra_specs_key)s." -#: cinder/exception.py:467 +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Снимок %(snapshot_id)s не может быть найден." -#: cinder/exception.py:471 +#: cinder/exception.py:278 #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "удаление тома %(volume_name)s, который имеет снимок" -#: cinder/exception.py:475 +#: cinder/exception.py:282 #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:287 #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:484 -#, python-format -msgid "No disk at %(location)s" -msgstr "Отсутствует диск в %(location)s" - -#: cinder/exception.py:488 -#, python-format -msgid "Could not find a handler for %(driver_type)s volume." -msgstr "Невозможно найти обработчик для тома %(driver_type)s." - -#: cinder/exception.py:492 +#: cinder/exception.py:291 #, python-format msgid "Invalid image href %(image_href)s." msgstr "Недопустимый образ href %(image_href)s." -#: cinder/exception.py:496 -msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." -msgstr "" - -#: cinder/exception.py:501 +#: cinder/exception.py:295 #, python-format msgid "Image %(image_id)s could not be found." msgstr "Образ %(image_id)s не найден." -#: cinder/exception.py:505 +#: cinder/exception.py:299 #, python-format -msgid "Kernel not found for image %(image_id)s." -msgstr "Ядро не найдено для образа %(image_id)s." +msgid "Service %(service_id)s could not be found." +msgstr "Служба %(service_id)s не найдена." -#: cinder/exception.py:509 +#: cinder/exception.py:303 #, python-format -msgid "User %(user_id)s could not be found." -msgstr "Пользователь %(user_id)s не найден." +msgid "Host %(host)s could not be found." +msgstr "Узел %(host)s не найден." -#: cinder/exception.py:513 +#: cinder/exception.py:307 #, python-format -msgid "Project %(project_id)s could not be found." -msgstr "Проект %(project_id)s не найден." +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" -#: cinder/exception.py:517 +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "Узел сompute %(host)s не найден." + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." -msgstr "Пользователь %(user_id)s не является участником проекта %(project_id)s." +msgid "Invalid reservation expiration %(expire)s." +msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:323 #, python-format -msgid "Role %(role_id)s could not be found." -msgstr "Полномочия %(role_id)s не могут быть найдены." +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." -msgstr "Невозможно найти SR для чтения/записи VDI." +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "Квота не найдена" -#: cinder/exception.py:529 +#: cinder/exception.py:332 #, python-format -msgid "%(req)s is required to create a network." -msgstr "%(req)s необходимо для создания сети." +msgid "Unknown quota resources %(unknown)s." +msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:336 #, python-format -msgid "Network %(network_id)s could not be found." -msgstr "Сеть %(network_id)s не найдена." +msgid "Quota for project %(project_id)s could not be found." +msgstr "Квота проекта %(project_id)s не найдена." + +#: cinder/exception.py:340 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "Класс %(class_name)s не найден: %(exception)s" + +#: cinder/exception.py:344 +#, fuzzy, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "Квота проекта %(project_id)s не найдена." + +#: cinder/exception.py:348 +#, fuzzy, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "Пользователь %(user_id)s не найден." + +#: cinder/exception.py:352 +#, fuzzy, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "Превышена квота" -#: cinder/exception.py:537 +#: cinder/exception.py:356 #, python-format -msgid "Network could not be found for bridge %(bridge)s" -msgstr "Сеть не может быть найдена для моста %(bridge)s" +msgid "File %(file_path)s could not be found." +msgstr "Файл %(file_path)s не может быть найден." -#: cinder/exception.py:541 +#: cinder/exception.py:365 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "Тип тома %(name)s уже существует." + +#: cinder/exception.py:369 #, python-format -msgid "Network could not be found for uuid %(uuid)s" -msgstr "Сеть не может быть найдена для uuid %(uuid)s" +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:373 #, python-format -msgid "Network could not be found with cidr %(cidr)s." -msgstr "Сеть не найдена с cidr %(cidr)s." +msgid "Malformed message body: %(reason)s" +msgstr "Неправильное тело сообщения: %(reason)s" -#: cinder/exception.py:549 +#: cinder/exception.py:377 #, python-format -msgid "Network could not be found for instance %(instance_id)s." -msgstr "Сеть не найдена для копии %(instance_id)s." +msgid "Could not find config at %(path)s" +msgstr "Невозможно найти конфигурацию по адресу %(path)s" -#: cinder/exception.py:553 -msgid "No networks defined." -msgstr "Сети не определены." +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "Невозможно найти конфигурацию по адресу %(path)s" -#: cinder/exception.py:557 +#: cinder/exception.py:385 #, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -"Или uuid %(network_uuid)s сети не предоставлено или не присвоено проекту " -"%(project_id)s." -#: cinder/exception.py:562 +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "Допустимый узел не найден. %(reason)s" + +#: cinder/exception.py:398 #, python-format -msgid "Host is not set to the network (%(network_id)s)." -msgstr "Узел не назначен сети (%(network_id)s)." +msgid "Host %(host)s is not up or doesn't exist." +msgstr "Узел %(host)s не работает или не существует." -#: cinder/exception.py:566 +#: cinder/exception.py:402 #, python-format -msgid "Network %(network)s has active ports, cannot delete." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:415 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:419 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:423 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:427 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" +"Невозможно создать volume_type с именем %(name)s и спецификациями " +"%(extra_specs)s" -#: cinder/exception.py:591 +#: cinder/exception.py:432 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:436 +#, fuzzy, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "Неправильное тело сообщения: %(reason)s" + +#: cinder/exception.py:440 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" -#: cinder/exception.py:599 +#: cinder/exception.py:444 #, python-format msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:449 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:453 #, python-format -msgid "Fixed IP address %(address)s is invalid." -msgstr "Недопустимый фиксированный IP-адрес %(address)s." - -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +msgid "Failed to export for volume: %(reason)s" msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:620 +#: cinder/exception.py:461 #, python-format -msgid "Floating ip not found for id %(id)s." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:465 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." -msgstr "Запись DNS %(name)s уже существует в домене %(domain)s." +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" -#: cinder/exception.py:628 -#, python-format -msgid "Floating ip not found for address %(address)s." +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "Невозможно найти том %s" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:632 -#, python-format -msgid "Floating ip not found for host %(host)s." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:485 +#, fuzzy, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "Группа LDAP %(group_id)s не найдена." + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:644 +#: cinder/exception.py:493 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "Недопустимый внутренний интерфейс: %s" + +#: cinder/exception.py:497 #, python-format -msgid "Floating ip %(address)s is not associated." +msgid "Connection to swift failed: %(reason)s" msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/exception.py:501 +#, fuzzy, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "Образ %(image_id)s не найден." + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" msgstr "" -#: cinder/exception.py:652 +#: cinder/exception.py:509 #, python-format -msgid "Interface %(interface)s not found." -msgstr "Интерфейс %(interface)s не найден." +msgid "SSH command injection detected: %(command)s" +msgstr "" -#: cinder/exception.py:656 +#: cinder/exception.py:513 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +msgid "QoS Specs %(specs_id)s already exists." msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:517 #, python-format -msgid "Certificate %(certificate_id)s not found." -msgstr "Сертификат %(certificate_id)s не найден." +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" -#: cinder/exception.py:664 +#: cinder/exception.py:522 #, python-format -msgid "Service %(service_id)s could not be found." -msgstr "Служба %(service_id)s не найдена." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" -#: cinder/exception.py:668 +#: cinder/exception.py:527 #, python-format -msgid "Host %(host)s could not be found." -msgstr "Узел %(host)s не найден." +msgid "No such QoS spec %(specs_id)s." +msgstr "" -#: cinder/exception.py:672 +#: cinder/exception.py:531 #, python-format -msgid "Compute host %(host)s could not be found." -msgstr "Узел сompute %(host)s не найден." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" -#: cinder/exception.py:676 +#: cinder/exception.py:536 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:680 +#: cinder/exception.py:541 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:684 +#: cinder/exception.py:546 #, python-format -msgid "Access Key %(access_key)s could not be found." -msgstr "Ключ доступа %(access_key)s не найден." +msgid "Invalid qos specs: %(reason)s" +msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" -msgstr "Квота не найдена" +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" -#: cinder/exception.py:692 +#: cinder/exception.py:554 #, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "Квота проекта %(project_id)s не найдена." +msgid "key manager error: %(reason)s" +msgstr "" -#: cinder/exception.py:696 -#, fuzzy, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "Класс %(class_name)s не найден: %(exception)s" +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" -#: cinder/exception.py:700 -#, python-format -msgid "Security group %(security_group_id)s not found." -msgstr "Группа безопасности %(security_group_id)s не найдена." +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" -#: cinder/exception.py:704 -#, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -"Группа безопасности %(security_group_id)s не найдена для проекта " -"%(project_id)s." -#: cinder/exception.py:709 -#, python-format -msgid "Security group with rule %(rule_id)s not found." -msgstr "Группа безопасности с правилом %(rule_id)s не найдена." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" -#: cinder/exception.py:713 +#: cinder/exception.py:576 #, python-format -msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -"Группа безопасности %(security_group_id)s уже ассоциирована с копией " -"%(instance_id)s" -#: cinder/exception.py:718 +#: cinder/exception.py:580 #, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +msgid "ESM configure request failed: %(message)s." msgstr "" -"Группа безопасности %(security_group_id)s не ассоциирована с копией " -"%(instance_id)s" -#: cinder/exception.py:723 +#: cinder/exception.py:584 #, python-format -msgid "Migration %(migration_id)s could not be found." -msgstr "Перемещение %(migration_id)s не найдено." +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." -msgstr "Перемещение не найдено для копии %(instance_id)s в состоянии %(status)s." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" -#: cinder/exception.py:732 -#, python-format -msgid "Console pool %(pool_id)s could not be found." -msgstr "Пул консоли %(pool_id)s не найден." +#: cinder/exception.py:593 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "Невозможно создать VDI на SR %(sr_ref)s для копии %(instance_name)s" -#: cinder/exception.py:736 -#, python-format -msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +#: cinder/exception.py:597 +#, fuzzy, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "Невозможно найти узел для копии %s" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:742 +#: cinder/exception.py:605 #, python-format -msgid "Console %(console_id)s could not be found." -msgstr "Консоль %(console_id)s не найдена." +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" -#: cinder/exception.py:746 +#: cinder/exception.py:609 #, python-format -msgid "Console for instance %(instance_id)s could not be found." -msgstr "Консоль для копии %(instance_id)s не найдена." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" -#: cinder/exception.py:750 -#, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." -msgstr "Консоль для копии %(instance_id)s в пуле %(pool_id)s не найдена." +#: cinder/exception.py:613 +#, fuzzy, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "Недопустимое состояние сервера: %(status)s" -#: cinder/exception.py:755 -#, python-format -msgid "Invalid console type %(console_type)s " -msgstr "Недопустимый тип консоли %(console_type)s " +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:763 +#: cinder/exception.py:626 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." -msgstr "Тип копии %(instance_type_id)s не найден." +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" -#: cinder/exception.py:767 +#: cinder/exception.py:630 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." -msgstr "Тип копии с названием %(instance_type_name)s не найден." +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" -#: cinder/exception.py:772 +#: cinder/exception.py:636 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:776 -#, fuzzy, python-format -msgid "Cell %(cell_id)s could not be found." -msgstr "Зона %(zone_id)s не найдена." +#: cinder/exception.py:641 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "Обнаружено неизвестное исключение." -#: cinder/exception.py:780 -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:784 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:789 -#, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +#: cinder/exception.py:654 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "Обнаружено неизвестное исключение." + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:793 -#, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." -msgstr "Копия %(instance_id)s не имеет метаданных с ключом %(metadata_key)s." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" -#: cinder/exception.py:798 -#, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" -msgstr "Объект LDAP не найден" +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" -#: cinder/exception.py:807 +#: cinder/quota.py:105 #, python-format -msgid "LDAP user %(user_id)s could not be found." -msgstr "Пользователь LDAP %(user_id)s не найден." +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" -#: cinder/exception.py:811 +#: cinder/quota.py:748 #, python-format -msgid "LDAP group %(group_id)s could not be found." -msgstr "Группа LDAP %(group_id)s не найдена." +msgid "Created reservations %s" +msgstr "" -#: cinder/exception.py:815 +#: cinder/quota.py:770 #, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." -msgstr "Пользователь LDAP %(user_id)s не является участником группы %(group_id)s." +msgid "Failed to commit reservations %s" +msgstr "" -#: cinder/exception.py:819 +#: cinder/quota.py:790 #, python-format -msgid "File %(file_path)s could not be found." -msgstr "Файл %(file_path)s не может быть найден." +msgid "Failed to roll back reservations %s" +msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:827 -#, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -"Не найден виртуальный переключатель ассоциированный с сетевым адаптером " -"%(adapter)s." -#: cinder/exception.py:832 +#: cinder/quota_utils.py:46 #, python-format -msgid "Network adapter %(adapter)s could not be found." -msgstr "Сетевой адаптер %(adapter)s не может быть найден." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" -msgstr "Класс %(class_name)s не найден: %(exception)s" +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "Превышена квота для %(pid)s, попытка создания тома %(size)sG" -#: cinder/exception.py:840 -msgid "Action not allowed." -msgstr "Действие не разрешено." +#: cinder/service.py:95 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "Запуск узла сети (версия %(vcs_string)s) %(topic)s" -#: cinder/exception.py:844 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Unable to use global role %(role_id)s" -msgstr "Невозможно использовать глобальные полномочия %(role_id)s" +msgid "Creating Consumer connection for Service %s" +msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "Служба завершила работу из-за отсутствия записи базы данных" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "Объект сервиса в базе данных отсутствует, Повторное создание." + +#: cinder/service.py:270 +msgid "Recovered model server connection!" msgstr "" -#: cinder/exception.py:861 -#, python-format -msgid "Key pair %(key_name)s already exists." +#: cinder/service.py:276 +msgid "model server went away" msgstr "" -#: cinder/exception.py:865 +#: cinder/service.py:298 #, python-format -msgid "User %(user)s already exists." -msgstr "Пользователь %(user)s уже существует." +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" -#: cinder/exception.py:869 -#, python-format -msgid "LDAP user %(user)s already exists." -msgstr "Пользователь LDAP %(user)s уже существует." +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" -#: cinder/exception.py:873 -#, python-format -msgid "LDAP group %(group)s already exists." -msgstr "Группа LDAP %(group)s уже существует." +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +#, fuzzy +msgid "Full set of CONF:" +msgstr "Полный набор ФЛАГОВ:" -#: cinder/exception.py:877 +#: cinder/service.py:387 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" -msgstr "Пользователь %(uid)s уже является участником группы %(group_dn)s" +msgid "%s : FLAG SET " +msgstr "" -#: cinder/exception.py:882 +#: cinder/utils.py:96 #, python-format -msgid "Project %(project)s already exists." -msgstr "Проект %(project)s уже существует." +msgid "Can not translate %s to integer." +msgstr "" -#: cinder/exception.py:886 +#: cinder/utils.py:127 #, python-format -msgid "Instance %(name)s already exists." -msgstr "Копия %(name)s уже существует." +msgid "May specify only one of %s" +msgstr "" -#: cinder/exception.py:890 -#, python-format -msgid "Instance Type %(name)s already exists." -msgstr "Тип копии %(name)s уже существует." +#: cinder/utils.py:212 +#, fuzzy +msgid "Specify a password or private_key" +msgstr "Задайте san_password или san_private_key" -#: cinder/exception.py:894 -#, python-format -msgid "Volume Type %(name)s already exists." -msgstr "Тип тома %(name)s уже существует." +#: cinder/utils.py:228 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "Подключение к libvirt: %s" -#: cinder/exception.py:898 +#: cinder/utils.py:412 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" -msgstr "%(path)s располагается на общедоступном накопителе: %(reason)s" - -#: cinder/exception.py:902 -msgid "Migration error" -msgstr "Ошибка перемещения" +msgid "Invalid backend: %s" +msgstr "Недопустимый внутренний интерфейс: %s" -#: cinder/exception.py:906 +#: cinder/utils.py:423 #, python-format -msgid "Malformed message body: %(reason)s" -msgstr "Неправильное тело сообщения: %(reason)s" +msgid "backend %s" +msgstr "внутренний интерфейс %s" -#: cinder/exception.py:910 -#, python-format -msgid "Could not find config at %(path)s" -msgstr "Невозможно найти конфигурацию по адресу %(path)s" +#: cinder/utils.py:698 +#, fuzzy, python-format +msgid "Could not remove tmpdir: %s" +msgstr "Ошибка удаления контейнера: %s" -#: cinder/exception.py:914 +#: cinder/utils.py:759 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" -msgstr "При изменении размера, копии должны изменить размер!" +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, fuzzy, python-format +msgid "Unable to find cert_file : %s" +msgstr "Невозможно найти адрес %r" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" -msgstr "Образ больше, чем допустимо для этого типа копии" +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "Невозможно найти адрес %r" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" -msgstr "1-а или несколько зон не могут завершить запрос" +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, fuzzy, python-format +msgid "Unable to find key_file : %s" +msgstr "Невозможно найти адрес %r" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:938 +#: cinder/wsgi.py:206 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." -msgstr "Недостаточно памяти на узле сети compute для запуска %(uuid)s." - -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:946 -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "Допустимый узел не найден. %(reason)s" +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "Выполняется останов сервера WSGI." -#: cinder/exception.py:950 -#, python-format -msgid "Host %(host)s is not up or doesn't exist." -msgstr "Узел %(host)s не работает или не существует." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "Сервер WSGI был остановлен." -#: cinder/exception.py:954 -msgid "Quota exceeded" -msgstr "Превышена квота" +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" -#: cinder/exception.py:958 -#, python-format +#: cinder/api/auth.py:26 msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:963 -#, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:967 -#, python-format -msgid "Aggregate %(aggregate_name)s already exists." +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:971 -#, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:975 -#, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:980 -#, python-format -msgid "Host %(host)s already member of another aggregate." +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:984 +#: cinder/api/common.py:162 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." -msgstr "" +msgid "marker [%s] not found" +msgstr "маркер [%s] не найден" -#: cinder/exception.py:988 +#: cinder/api/common.py:189 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "" +msgid "href %s does not contain version" +msgstr "href %s не содержит версию" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "Инициализация диспетчера расширений." -#: cinder/exception.py:992 +#: cinder/api/extensions.py:197 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Loaded extension: %s" +msgstr "Загруженное расширение: %s" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" msgstr "" -"Невозможно создать volume_type с именем %(name)s и спецификациями " -"%(extra_specs)s" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:1005 +#: cinder/api/extensions.py:239 #, python-format -msgid "Error in SolidFire API response: status=%(status)s" +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:1009 +#: cinder/api/extensions.py:240 #, python-format -msgid "Error in SolidFire API response: data=%(data)s" +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:1013 +#: cinder/api/extensions.py:242 #, python-format -msgid "Detected existing vlan with id %(vlan)d" +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:1017 +#: cinder/api/extensions.py:256 #, python-format -msgid "Instance %(instance_id)s could not be found." -msgstr "Копия %(instance_id)s не найдена." +msgid "Loading extension %s" +msgstr "Загрузка расширения %s" -#: cinder/exception.py:1021 +#: cinder/api/extensions.py:262 #, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:1025 -#, fuzzy, python-format -msgid "Could not fetch image %(image)s" -msgstr "Получение образа %(image)s" - -#: cinder/log.py:315 +#: cinder/api/extensions.py:276 #, python-format -msgid "syslog facility must be one of: %s" +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/manager.py:146 +#: cinder/api/extensions.py:278 #, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -"Пропуск %(full_task_name)s, %(ticks_to_skip)s раз осталось, для " -"произведения следующего запуска" -#: cinder/manager.py:152 +#: cinder/api/extensions.py:287 #, python-format -msgid "Running periodic task %(full_task_name)s" -msgstr "Запуск повторяющегося задания %(full_task_name)s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "Ошибка загрузки расширения %(ext_factory)s: %(exc)s" -#: cinder/manager.py:159 +#: cinder/api/extensions.py:356 #, python-format -msgid "Error during %(full_task_name)s: %(e)s" -msgstr "Ошибка во время %(full_task_name)s: %(e)s" +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "Ошибка загрузки расширения %(ext_name)s: %(exc)s" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "элемент не является потомком" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/service.py:177 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" -msgstr "Запуск узла сети (версия %(vcs_string)s) %(topic)s" +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" -#: cinder/service.py:195 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "show called for member %s" msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" -msgstr "Служба завершила работу из-за отсутствия записи базы данных" - -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." -msgstr "Объект сервиса в базе данных отсутствует, Повторное создание." - -#: cinder/service.py:334 -msgid "Recovered model server connection!" +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" msgstr "" -#: cinder/service.py:340 -msgid "model server went away" -msgstr "" +#: cinder/api/contrib/backups.py:143 +#, fuzzy, python-format +msgid "Delete backup with id: %s" +msgstr "Удалить снимок с идентификатором: %s" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" -msgstr "Полный набор ФЛАГОВ:" +#: cinder/api/contrib/backups.py:185 +#, fuzzy, python-format +msgid "Creating new backup %s" +msgstr "Создание SR %s" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "Неправильный формат тела запроса" -#: cinder/service.py:440 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/utils.py:79 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Inner Exception: %s" -msgstr "Вложенное исключение: %s" +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" -#: cinder/utils.py:165 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Fetching %s" -msgstr "Получение %s" +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" -msgstr "Приняты неизвестные аргументы ключевого слова для utils.execute: %r" - -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Выполнение команды (субпроцесс): %s" +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +#, fuzzy +msgid "Snapshot not found." +msgstr "Узел не найден" -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" -msgstr "Результат %s" +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." -msgstr "%r ошибка. Выполняется повтор." +#: cinder/api/contrib/hosts.py:136 +#, fuzzy, python-format +msgid "Host '%s' could not be found." +msgstr "Узел %(host)s не найден." -#: cinder/utils.py:291 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Running cmd (SSH): %s" -msgstr "Выполнение команды (SSH): %s" - -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" -msgstr "Среда не поддерживается с использованием SSH" - -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" -msgstr "process_input не поддерживается с использованием SSH" +msgid "Invalid status: '%s'" +msgstr "Недопустимое состояние: '%s'" -#: cinder/utils.py:352 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "debug in callback: %s" -msgstr "отладка в обратном вызове: %s" +msgid "Invalid update setting: '%s'" +msgstr "Недопустимый параметр обновления: '%s'" -#: cinder/utils.py:534 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Link Local address is not found.:%s" -msgstr "" +msgid "Setting host %(host)s to %(state)s." +msgstr "Перевод узла %(host)s в %(state)s." -#: cinder/utils.py:537 -#, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/utils.py:648 -#, python-format -msgid "Invalid backend: %s" -msgstr "Недопустимый внутренний интерфейс: %s" - -#: cinder/utils.py:659 -#, python-format -msgid "backend %s" -msgstr "внутренний интерфейс %s" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "Узел не найден" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/utils.py:942 -#, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" -msgstr "Ожидался объект типа: %s" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" -#: cinder/utils.py:1169 +#: cinder/api/contrib/quotas.py:111 #, python-format -msgid "Invalid server_string: %s" -msgstr "Недопустимая server_string: %s" +msgid "Bad key(s) in quota set: %s" +msgstr "" -#: cinder/utils.py:1298 -#, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" -msgstr "timefunc: '%(name)s' заняла %(total_time).2f с." +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" -msgstr "Исходное исключение было сброшено" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" -msgstr "Класс %(fullname)s устарел: %(msg)s" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" -#: cinder/utils.py:1463 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Class %(fullname)s is deprecated" -msgstr "Класс %(fullname)s устарел" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" -#: cinder/utils.py:1495 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" -msgstr "Функция %(name)s в %(location)s устарела: %(msg)s" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" -msgstr "Функция %(name)s в %(location)s устарела" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" -#: cinder/utils.py:1681 -#, fuzzy, python-format -msgid "Could not remove tmpdir: %s" -msgstr "Ошибка удаления контейнера: %s" +#: cinder/api/contrib/types_extra_specs.py:101 +#, fuzzy +msgid "Request body empty" +msgstr "Неправильный формат тела запроса" -#: cinder/wsgi.py:97 -#, python-format -msgid "Started %(name)s on %(host)s:%(port)s" -msgstr "Выполняется %(name)s на %(host)s:%(port)s" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "Тело запроса и URI не совпадают" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." -msgstr "Выполняется останов сервера WSGI." +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "Тело запроса содержит избыточное количество объектов" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." -msgstr "Выполняется останов сервера TCP." +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" -#: cinder/wsgi.py:117 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" -msgstr "Запуск TCP сервера %(arg0)s на %(host)s:%(port)s" - -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." -msgstr "Сервер WSGI был остановлен." +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" -msgstr "недоступно" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" -#: cinder/api/direct.py:299 -#, python-format -msgid "Returned non-serializeable type: %s" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/api/validator.py:142 -#, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/api/ec2/__init__.py:95 -#, python-format -msgid "FaultWrapper: %s" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." -msgstr "Слишком много неудачных попыток аутентификации." +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" -#: cinder/api/ec2/__init__.py:180 -#, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/api/ec2/__init__.py:388 -#, python-format -msgid "Authentication Failure: %s" -msgstr "Ошибка аутентификации: %s" +#: cinder/api/contrib/volume_transfer.py:131 +#, fuzzy +msgid "Listing volume transfers" +msgstr "Обновление состояния узла" -#: cinder/api/ec2/__init__.py:404 -#, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" -msgstr "Запрос аутентификации для %(uname)s:%(pname)s)" +#: cinder/api/contrib/volume_transfer.py:147 +#, fuzzy, python-format +msgid "Creating new volume transfer %s" +msgstr "Создать снимок тома %s" -#: cinder/api/ec2/__init__.py:435 -#, python-format -msgid "action: %s" -msgstr "действие: %s" +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." -#: cinder/api/ec2/__init__.py:437 -#, python-format -msgid "arg: %(key)s\t\tval: %(value)s" -msgstr "" +#: cinder/api/contrib/volume_transfer.py:183 +#, fuzzy, python-format +msgid "Accepting volume transfer %s" +msgstr "Обновление состояния узла" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" -msgstr "" -"Неавторизированный запрос для контроллера=%(controller)s и " -"действия=%(action)s" +#: cinder/api/contrib/volume_transfer.py:196 +#, fuzzy, python-format +msgid "Accepting transfer %s" +msgstr "Обновление состояния узла" -#: cinder/api/ec2/__init__.py:584 -#, python-format -msgid "InstanceNotFound raised: %s" -msgstr "" +#: cinder/api/contrib/volume_transfer.py:217 +#, fuzzy, python-format +msgid "Delete transfer with id: %s" +msgstr "Удалить том с идентификатором: %s" -#: cinder/api/ec2/__init__.py:590 -#, python-format -msgid "VolumeNotFound raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/api/ec2/__init__.py:596 -#, python-format -msgid "SnapshotNotFound raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/api/ec2/__init__.py:605 -#, python-format -msgid "EC2APIError raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "KeyPairExists raised: %s" +msgid "Valid control location are: %s" msgstr "" -#: cinder/api/ec2/__init__.py:617 -#, python-format -msgid "InvalidParameterValue raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/api/ec2/__init__.py:621 -#, python-format -msgid "InvalidPortRange raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/api/ec2/__init__.py:625 -#, python-format -msgid "NotAuthorized raised: %s" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/api/ec2/__init__.py:633 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "QuotaError raised: %s" -msgstr "" +msgid "Caught error: %s" +msgstr "Обнаружена ошибка: %s" -#: cinder/api/ec2/__init__.py:637 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" -msgstr "" +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s возвратил с HTTP %(status)d" -#: cinder/api/ec2/__init__.py:646 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "Unexpected error raised: %s" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/api/ec2/__init__.py:647 -#, python-format -msgid "Environment: %s" +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -"Произошла неизвестная ошибка. Пожалуйста, попытайтесь повторить ваш " -"запрос." -#: cinder/api/ec2/apirequest.py:64 +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "Расширенный ресурс: %s" + +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -"Неподдерживаемый запрос API: контроллер = %(controller)s, действие = " -"%(action)s" +"Расширение %(ext_name)s: Невозможно расширить ресурс %(collection)s: Нет " +"такого ресурса" -#: cinder/api/ec2/cloud.py:336 +#: cinder/api/openstack/__init__.py:110 #, python-format -msgid "Create snapshot of volume %s" -msgstr "Создать снимок тома %s" +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "Расширение %(ext_name)s расширение ресурса: %(collection)s" -#: cinder/api/ec2/cloud.py:372 -#, fuzzy, python-format +#: cinder/api/openstack/__init__.py:126 msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -"Значение (%s) для параметра GroupName недопустимо. Содержание ограничено " -"буквенно-цифровыми символами, пробелами, тире и подчёркиваниями." -#: cinder/api/ec2/cloud.py:378 -#, fuzzy, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -"Значение (%s) для параметра GroupName недопустимо. Длина превышает " -"максимально допустимое значение 255." - -#: cinder/api/ec2/cloud.py:382 -#, python-format -msgid "Create key pair %s" -msgstr "Создание пары ключей %s" -#: cinder/api/ec2/cloud.py:391 -#, python-format -msgid "Import key %s" -msgstr "Импортировать ключ %s" +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" -#: cinder/api/ec2/cloud.py:409 -#, python-format -msgid "Delete key pair %s" -msgstr "Удаление пары ключей %s" +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" -#: cinder/api/ec2/cloud.py:551 +#: cinder/api/openstack/wsgi.py:677 #, fuzzy, python-format -msgid "Invalid CIDR" -msgstr "Недопустимый cidr %(cidr)s." +msgid "Exception handling resource: %s" +msgstr "Расширенный ресурс: %s" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "Revoke security group ingress %s" +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, fuzzy, python-format -msgid "%s Not enough parameters to build a valid rule" -msgstr "Недостаточно параметров для сбора правильного правила." +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "Пустое тело предоставлено в запросе" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "В запросе предоставлен не распознанный тип-содержимого" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." -msgstr "Отсутствует правило для заданных параметров." +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "Тип содержимого не предоставлен в запросе" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "Authorize security group ingress %s" +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/cloud.py:725 -#, fuzzy, python-format -msgid "%s - This rule already exists in group" -msgstr "Это правило уже существует в группе %s" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "Неправильное тело запроса" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "Не поддерживаемый тип содержимого" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "Неправильный запрос url" -#: cinder/api/ec2/cloud.py:769 +#: cinder/api/openstack/wsgi.py:987 #, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s возвратил ошибку: %(e)s" + +#: cinder/api/openstack/volume/__init__.py:25 msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -"Значение (%s) для параметра GroupName недопустимо. Содержание ограничено " -"буквенно-цифровыми символами, пробелами, тире и подчёркиваниями." -#: cinder/api/ec2/cloud.py:776 -#, python-format +#: cinder/api/openstack/volume/versions.py:26 msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -"Значение (%s) для параметра GroupName недопустимо. Длина превышает " -"максимально допустимое значение 255." -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "Create Security Group %s" -msgstr "Создать группу безопасности %s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" +"Только %(value)s %(verb)s запрос(ов) могут быть сделаны для %(uri)s, " +"каждые %(unit_string)s." -#: cinder/api/ec2/cloud.py:783 -#, python-format -msgid "group %s already exists" -msgstr "группа %s уже существует" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 -#, python-format -msgid "Delete security group %s" -msgstr "Удалить группу безопасности %s" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +#, fuzzy +msgid "snapshot does not exist" +msgstr "Копия не существует" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 -#, python-format -msgid "Get console output for instance %s" -msgstr "Получить консольный вывод для копии %s" +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "Элемент метаданных не найден" -#: cinder/api/ec2/cloud.py:894 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Create volume from snapshot %s" -msgstr "Создать том из снимка %s" +msgid "Delete snapshot with id: %s" +msgstr "Удалить снимок с идентификатором: %s" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 -#, python-format -msgid "Create volume of %s GB" -msgstr "Создание раздела %s ГБ" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" -#: cinder/api/ec2/cloud.py:921 -#, fuzzy -msgid "Delete Failed" -msgstr "Ошибка создания" +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "Создать снимок из тома %s" -#: cinder/api/ec2/cloud.py:931 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -msgstr "Подключить том %(volume_id)s для копии %(instance_id)s на %(device)s" +msgid "Invalid value '%s' for force. " +msgstr "" -#: cinder/api/ec2/cloud.py:939 +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 #, fuzzy -msgid "Attach Failed." -msgstr "Ошибка создания" +msgid "volume does not exist" +msgstr "Сервер не существует" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "Detach volume %s" -msgstr "Отсоединить том %s" - -#: cinder/api/ec2/cloud.py:959 -#, fuzzy, python-format -msgid "Detach Volume Failed." -msgstr "Отсоединить том %s" +msgid "vol=%s" +msgstr "vol=%s" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "attribute not supported: %s" -msgstr "аттрибут не поддерживается: %s" +msgid "Delete volume with id: %s" +msgstr "Удалить том с идентификатором: %s" -#: cinder/api/ec2/cloud.py:1107 -#, python-format -msgid "vol = %s\n" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" -msgstr "Выделить адрес" - -#: cinder/api/ec2/cloud.py:1267 -#, python-format -msgid "Release address %s" -msgstr "Присвоить адрес %s" - -#: cinder/api/ec2/cloud.py:1272 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" -msgstr "Присвоить адрес %(public_ip)s копии %(instance_id)s" +msgid "snapshot id:%s not found" +msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "Disassociate address %s" -msgstr "Исключить адрес %s" - -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" -msgstr "Образ должен быть доступен" - -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" -msgstr "Выполнение завершения работы копий" +msgid "source vol id:%s not found" +msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "Reboot instance %r" -msgstr "Перезагрузить копию %r" +msgid "Create volume of %s GB" +msgstr "Создание раздела %s ГБ" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" -msgstr "Выполнение остановки копий" +#: cinder/api/v1/volumes.py:496 +#, fuzzy, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "Удаление параметров '%(unk_opt_str)s' из запроса" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" -msgstr "Выполнение запуска копий" +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "De-registering image %s" -msgstr "Исключение регистрации образа %s" - -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/cloud.py:1490 -#, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" -msgstr "Зарегистрированный образ %(image_location)s с идентификатором %(image_id)s" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" -msgstr "не указан пользователь или группа" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" -msgstr "поддерживается только группа \"все(all)\"" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" msgstr "" -#: cinder/api/ec2/cloud.py:1542 -#, python-format -msgid "Updating image %s publicity" -msgstr "Обновление осведомлённости об образе %s" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:1603 -#, python-format -msgid "Couldn't stop instance with in %d sec" -msgstr "Невозможно остановить копию в течении %d с." +#: cinder/api/v2/volumes.py:472 +#, fuzzy, python-format +msgid "Removing options '%s' from query" +msgstr "Удаление параметров '%(unk_opt_str)s' из запроса" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 -#, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "Ошибка получения метаданных для ip: %s" +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Состояние тома должно быть доступно" -#: cinder/api/openstack/__init__.py:43 -#, python-format -msgid "Caught error: %s" -msgstr "Обнаружена ошибка: %s" +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Состояние тома должно быть доступно" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s возвратил с HTTP %(status)d" +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "Состояние тома должно быть доступно" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/api.py:154 #, python-format -msgid "Extended resource: %s" -msgstr "Расширенный ресурс: %s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Состояние тома должно быть доступно" + +#: cinder/backup/api.py:176 #, python-format msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -"Расширение %(ext_name)s: Невозможно расширить ресурс %(collection)s: Нет " -"такого ресурса" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/api.py:181 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" -msgstr "Расширение %(ext_name)s расширение ресурса: %(collection)s" +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" -#: cinder/api/openstack/auth.py:90 -#, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" -msgstr "%(user_id)s не может быть найден с токеном '%(token)s'" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/manager.py:100 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" -msgstr "%(user_id)s должен быть администратором или участником %(project_id)s" - -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/manager.py:107 #, python-format -msgid "Could not find %s in request." -msgstr "В запросе невозможно найти %s." +msgid "Backend not found in hostname (%s) so using default." +msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/manager.py:117 #, python-format -msgid "Successfully authenticated '%s'" -msgstr "Аутентификация '%s' выполнена" +msgid "Manager requested for volume_backend '%s'." +msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." -msgstr "Не найден пользователь с предоставленным API ключом." +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/manager.py:123 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" -msgstr "Предоставленный ключ API допустим, но не для пользователя '%(username)s'" - -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/manager.py:158 #, python-format -msgid "marker [%s] not found" -msgstr "маркер [%s] не найден" +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/manager.py:165 #, python-format -msgid "href %s does not contain version" -msgstr "href %s не содержит версию" +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/manager.py:189 #, python-format -msgid "Converting nw_info: %s" +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/openstack/common.py:305 -#, fuzzy, python-format -msgid "Converted networks: %s" -msgstr "Непредвиденная ошибка: %s" +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/manager.py:206 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" -msgstr "Невозможно '%(action)s', когда копия в %(attr)s %(state)s" +msgid "Resetting backup %s to error (was creating)." +msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/manager.py:212 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" -msgstr "Копия в недопустимом состоянии для '%(action)s'" +msgid "Resetting backup %s to available (was restoring)." +msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" -msgstr "Отклонение запроса снимка, снимки в данный момент отключены" +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." -msgstr "Снимки копии в настоящий момент недопустимы." +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/backup/manager.py:237 #, python-format -msgid "Loaded extension: %s" -msgstr "Загруженное расширение: %s" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/manager.py:249 #, python-format -msgid "Ext name: %s" +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/backup/manager.py:282 #, python-format -msgid "Ext alias: %s" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/manager.py:286 #, python-format -msgid "Ext description: %s" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/manager.py:299 #, python-format -msgid "Ext namespace: %s" +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/manager.py:310 #, python-format -msgid "Ext updated: %s" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/manager.py:329 #, python-format -msgid "Exception loading extension: %s" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/manager.py:360 #, python-format -msgid "Loading extension %s" -msgstr "Загрузка расширения %s" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/backup/manager.py:379 #, python-format -msgid "Calling extension factory %s" +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/manager.py:386 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" -msgstr "Ошибка загрузки расширения %(ext_factory)s: %(exc)s" +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/manager.py:399 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/manager.py:422 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" -msgstr "Ошибка загрузки расширения %(ext_name)s: %(exc)s" +msgid "Delete backup finished, backup %s deleted." +msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/wsgi.py:582 -#, fuzzy, python-format -msgid "Exception handling resource: %s" -msgstr "Расширенный ресурс: %s" +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "Fault thrown: %s" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "HTTP exception thrown: %s" +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" -msgstr "В запросе предоставлен не распознанный тип-содержимого" +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" -msgstr "Тип содержимого не предоставлен в запросе" +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" -msgstr "Пустое тело предоставлено в запросе" +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "There is no such action: %s" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" -msgstr "Неправильное тело запроса" +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" -msgstr "Не поддерживаемый тип содержимого" +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" -msgstr "Неправильный запрос url" +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "%(url)s returned a fault: %(e)s" -msgstr "%(url)s возвратил ошибку: %(e)s" +msgid "image %s not found" +msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" -msgstr "элемент не является потомком" +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:389 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." -msgstr "Инициализация диспетчера расширений." +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." -msgstr "образ не найден." +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" -msgstr "Неправильный формат тела запроса" +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" -msgstr "Тело запроса и URI не совпадают" +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" -msgstr "Тело запроса содержит избыточное количество объектов" +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" -msgstr "Неправильный ключ метаданных" +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" -msgstr "Копия не существует" +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" -msgstr "Копия не является участником заданной сети" +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/ceph.py:555 #, python-format msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -"Только %(value)s %(verb)s запрос(ов) могут быть сделаны для %(uri)s, " -"каждые %(unit_string)s." -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" -msgstr "Сервер не существует" +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" -msgstr "Элемент метаданных не найден" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "Invalid server status: %(status)s" -msgstr "Недопустимое состояние сервера: %(status)s" +msgid "creating base image='%s'" +msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" -msgstr "Имя сервера является пустой строкой" +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "Bad personality format: missing %s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:550 -#, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" -msgstr "Недопустимый сетевой формат: сетевой uuid имеет неправильный формат (%s)" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/ceph.py:846 #, python-format -msgid "Invalid fixed IP address (%s)" -msgstr "Недопустимый фиксированный IP-адрес (%s)" +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" -#: cinder/api/openstack/compute/servers.py:566 -#, python-format -msgid "Duplicate networks (%s) are not allowed" -msgstr "Дубликаты сетей (%s) не разрешены" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "Bad network format: missing %s" -msgstr "Недопустимый сетевой формат: отсутствует %s" +msgid "restore transfer completed in %.4fs" +msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" -msgstr "Недопустимый сетевой формат" +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" -msgstr "Содержимое данных пользователя не может быть дешифровано" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" -msgstr "Имя сервера не задано" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." -msgstr "Предоставлен недопустимый flavorRef." +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" -msgstr "Невозможно найти запрошенный образ" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." -msgstr "Предоставлен недопустимый key_name." +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." -msgstr "С копией не производилось изменение размера." +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "Error in confirm-resize %s" +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/ceph.py:1037 #, python-format -msgid "Error in revert-resize %s" +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" -msgstr "Отсутствует аргумент типа 'type' для перезагрузки" +#: cinder/backup/drivers/swift.py:106 +#, fuzzy, python-format +msgid "unsupported compression algorithm: %s" +msgstr "неподдерживаемый раздел: %s" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/swift.py:123 #, python-format -msgid "Error in reboot %s" -msgstr "Ошибка при перезагрузке %s" +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." -msgstr "Изменение размера требует изменения объёма." +#: cinder/backup/drivers/swift.py:146 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "Копия не существует" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" -msgstr "Неправильный объект сервера" +#: cinder/backup/drivers/swift.py:151 +#, fuzzy, python-format +msgid "container %s exists" +msgstr "Копия не существует" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" -msgstr "Отсутствует атрибут imageRef" +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" -msgstr "Отсутствует атрибут flavorRef" +#: cinder/backup/drivers/swift.py:182 +#, fuzzy, python-format +msgid "generated object list: %s" +msgstr "Ожидался объект типа: %s" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" -msgstr "adminPass не был задан" +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" -msgstr "Недопустимый adminPass" +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." -msgstr "Запрос изменения размера имеет недопустимый атрибут 'flavorRef'." +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." -msgstr "Запросы изменение размера требуют атрибут 'flavorRef'." +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" -msgstr "Недопустимый запрос тела" +#: cinder/backup/drivers/swift.py:234 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "Недопустимый запрос." -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" -msgstr "Копия не найдена" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" -msgstr "Недопустимые метаданные" +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" -msgstr "Удаление параметров '%(unk_opt_str)s' из запроса" +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/backup/drivers/swift.py:301 #, python-format -msgid "Compute.api::pause %s" +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/backup/drivers/swift.py:304 #, python-format -msgid "Compute.api::unpause %s" +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 -#, python-format -msgid "compute.api::suspend %s" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "compute.api::resume %s" +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/backup/drivers/swift.py:345 #, python-format -msgid "Error in migrate %s" -msgstr "Ошибка перемещения %s" +msgid "v1 swift volume backup restore of %s started" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/backup/drivers/swift.py:350 #, python-format -msgid "Compute.api::reset_network %s" +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" -msgstr "Сервер не найден" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/backup/drivers/swift.py:362 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/backup/drivers/swift.py:378 #, python-format -msgid "Compute.api::lock %s" +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/backup/drivers/swift.py:401 #, python-format -msgid "Compute.api::unlock %s" +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/backup/drivers/swift.py:409 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" -msgstr "Копия не найдена" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 -#, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 -#, fuzzy, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -"Невозможно создать volume_type с именем %(name)s и спецификациями " -"%(extra_specs)s" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 #, fuzzy, python-format -msgid "Cannot update aggregate: %(id)s" -msgstr "Ошибка обновления агента: %(resp)r" +msgid "delete %s finished" +msgstr "_удалить: %s" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/backup/drivers/tsm.py:85 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/backup/drivers/tsm.py:143 #, python-format -msgid "Aggregates does not have %s action" +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/backup/drivers/tsm.py:173 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/backup/drivers/tsm.py:199 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/backup/drivers/tsm.py:206 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/backup/drivers/tsm.py:260 +#, python-format msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" -msgstr "" - -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/backup/drivers/tsm.py:298 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." -msgstr "Сервер не найден." - -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -#, fuzzy -msgid "Flavor not found." -msgstr "Сервер не найден." - -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" -msgstr "Нет тела запроса" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/backup/drivers/tsm.py:308 #, python-format -msgid "No more floating ips in pool %s." +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" -msgstr "Адрес не задан" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/backup/drivers/tsm.py:421 #, python-format -msgid "Invalid status: '%s'" -msgstr "Недопустимое состояние: '%s'" - -#: cinder/api/openstack/compute/contrib/hosts.py:148 -#, fuzzy, python-format -msgid "Invalid mode: '%s'" -msgstr "Недопустимое состояние: '%s'" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/backup/drivers/tsm.py:432 #, python-format -msgid "Invalid update setting: '%s'" -msgstr "Недопустимый параметр обновления: '%s'" - -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, fuzzy, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." -msgstr "Перевод узла %(host)s в %(state)s." +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/exception.py:55 #, python-format -msgid "Setting host %(host)s to %(state)s." -msgstr "Перевод узла %(host)s в %(state)s." - -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" -msgstr "Узел не найден" - -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/exception.py:93 #, python-format -msgid "Key pair '%s' already exists." +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" -msgstr "Отсутствует аргумент 'networkId' для addFixedIp" - -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" -msgstr "Отсутствует аргумент 'address' для removeFixedIp" - -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/exception.py:97 #, python-format -msgid "Unable to find address %r" -msgstr "Невозможно найти адрес %r" +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/exception.py:101 #, python-format -msgid "Network does not have %s action" +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 -#, python-format -msgid "Disassociating network with id %s" -msgstr "Исключение сети с идентификатором %s" +#: cinder/brick/exception.py:105 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" -msgstr "Сеть не найдена" +#: cinder/brick/exception.py:109 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." -#: cinder/api/openstack/compute/contrib/networks.py:87 +#: cinder/brick/exception.py:113 #, python-format -msgid "Showing network with id %s" -msgstr "Отображение сети с идентификатором %s" +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/exception.py:117 #, python-format -msgid "Deleting network with id %s" -msgstr "Удаление сети с идентификатором %s" - -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -#, fuzzy, python-format -msgid "Security group is still in use" -msgstr "Группа безопасности (%s) не найдена" - -#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Security group %s already exists" -msgstr "Группа безопасности %s уже существует" +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#: cinder/brick/initiator/connector.py:229 #, python-format -msgid "Security group %s is not a string or unicode" +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "Security group %s cannot be empty." -msgstr "Группа безопасности %s не может быть пустой." +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#: cinder/brick/initiator/connector.py:317 #, python-format -msgid "Security group %s should not be greater than 255 characters." +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "Security group (%s) not found" -msgstr "Группа безопасности (%s) не найдена" - -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." -msgstr "Недостаточно параметров для сбора правильного правила." - -#: cinder/api/openstack/compute/contrib/security_groups.py:376 -#, python-format -msgid "This rule already exists in group %s" -msgstr "Это правило уже существует в группе %s" - -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "Rule (%s) not found" -msgstr "Правило (%s) не найдено" - -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" -msgstr "Группа безопасности не задана" - -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" -msgstr "Наименование группы безопасности не может отсутствовать" +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "start instance %r" +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "stop instance %r" +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "vol=%s" -msgstr "vol=%s" +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "Delete volume with id: %s" -msgstr "Удалить том с идентификатором: %s" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" -msgstr "Присоединить том %(volume_id)s к копии %(server_id)s на %(device)s" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Delete snapshot with id: %s" -msgstr "Удалить снимок с идентификатором: %s" +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/brick/initiator/connector.py:834 #, python-format -msgid "Create snapshot from volume %s" -msgstr "Создать снимок из тома %s" - -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/auth/ldapdriver.py:650 -#, python-format +#: cinder/brick/initiator/connector.py:858 msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -"Попытка удаление последнего участника группы. Будет выполнено удаление " -"группы в %s." -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" -msgstr "Поиск пользователя: %r" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" -#: cinder/auth/manager.py:302 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "Failed authorization for access key %s" -msgstr "Ошибка авторизации для ключа доступа %s" +msgid "multipath call failed exit (%(code)s)" +msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/brick/initiator/linuxscsi.py:145 #, python-format -msgid "Using project name = user name (%s)" +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/auth/manager.py:315 +#: cinder/brick/initiator/linuxscsi.py:149 #, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" -msgstr "ошибка авторизации: нет проекта под названием %(pjid)s (user=%(uname)s)" +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" -#: cinder/auth/manager.py:324 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -"Ошибка авторизации: пользователь %(uname)s не является администратором и " -"не является участником проекта %(pjname)s" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#: cinder/brick/iscsi/iscsi.py:177 #, python-format -msgid "user.secret: %s" +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#: cinder/brick/iscsi/iscsi.py:184 #, python-format -msgid "expected_signature: %s" +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" -msgstr "подпись: %s" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#: cinder/brick/iscsi/iscsi.py:227 #, python-format -msgid "Invalid signature for user %s" -msgstr "Не допустимая подпись для пользователя %s" +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" -#: cinder/auth/manager.py:353 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "host_only_signature: %s" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" -msgstr "Необходимо указать проект" - -#: cinder/auth/manager.py:490 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" -msgstr "Добавление полномочий %(role)s для пользователя %(uid)s в проекте %(pid)s" +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" -msgstr "Удаление полномочий %(role)s для пользователя %(uid)s в проекте %(pid)s" +msgid "Removing iscsi_target for volume: %s" +msgstr "" -#: cinder/auth/manager.py:522 -#, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/brick/iscsi/iscsi.py:489 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" -msgstr "Созданный проект %(name)s с диспетчером %(manager_user)s" +msgid "Creating iscsi_target for volume: %s" +msgstr "" -#: cinder/auth/manager.py:613 +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 #, python-format -msgid "modifying project %s" -msgstr "изменение проекта %s" +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" -msgstr "Добавление пользователя %(uid)s в проект %(pid)s" +#: cinder/brick/iscsi/iscsi.py:532 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "Перезагрузка копии %s" -#: cinder/auth/manager.py:646 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Remove user %(uid)s from project %(pid)s" -msgstr "Исключить пользователя %(uid)s из проекта %(pid)s" +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" -#: cinder/auth/manager.py:676 +#: cinder/brick/iscsi/iscsi.py:571 #, python-format -msgid "Deleting project %s" -msgstr "Удаление проекта %s" +msgid "Failed to add initiator iqn %s to target" +msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" -msgstr "Созданный пользователь %(rvname)s (admin: %(rvadmin)r)" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" -#: cinder/auth/manager.py:743 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Deleting user %s" -msgstr "Удаление пользователя %s" +msgid "Cmd :%s" +msgstr "" -#: cinder/auth/manager.py:753 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "Access Key change for user %s" +msgid "StdOut :%s" msgstr "" -#: cinder/auth/manager.py:755 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "Secret Key change for user %s" +msgid "StdErr :%s" msgstr "" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" -msgstr "Режим администратора назначен %(admin)r для пользователя %(uid)s" +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "Невозможно найти том %s" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" -msgstr "Нет vpn данных для проекта %s" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" -#: cinder/cloudpipe/pipelib.py:46 +#: cinder/brick/local_dev/lvm.py:370 #, fuzzy, python-format -msgid "Instance type for vpn instances" -msgstr "Недопустимый тип копии %(instance_type)s." +msgid "Unable to find VG: %s" +msgstr "Ошибка поиска vbd для vdi %s" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/brick/local_dev/lvm.py:489 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "Ошибка поиска vbd для vdi %s" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Launching VPN for %s" -msgstr "Запуск VPN для %s" +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/compute/api.py:144 -#, python-format -msgid "Unable to find host for Instance %s" -msgstr "Невозможно найти узел для копии %s" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" -#: cinder/compute/api.py:192 -#, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -"Превышена квота для %(pid)s, попытка назначить %(num_metadata)s свойств " -"метаданных" -#: cinder/compute/api.py:203 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "Already mounted: %s" msgstr "" -"Превышена квота для %(pid)s, превышено ключевое свойство метаданных или " -"значение" -#: cinder/compute/api.py:257 -#, fuzzy -msgid "Cannot run any more instances of this type." +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -"Превышена квота копий. Вы не можете запустить дополнительные копии этого " -"типа." -#: cinder/compute/api.py:259 -#, fuzzy, python-format -msgid "Can only run %s more instances of this type." -msgstr "Превышена квота копий. Вы можете запустить только %s копий этого типа." +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" -#: cinder/compute/api.py:261 -#, fuzzy, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " -msgstr "Превышена квота для %(pid)s, попытка выполнить %(min_count)s копий" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" -msgstr "Создание необработанной копии" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" -#: cinder/compute/api.py:312 +#: cinder/compute/nova.py:97 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/compute/api.py:383 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "Going to run %s instances..." -msgstr "Выполняет запуск %s копий..." +msgid "Unrecognized read_deleted value '%s'" +msgstr "Нераспознанное значение read_deleted '%s'" -#: cinder/compute/api.py:447 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "bdm %s" -msgstr "bdm %s" +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" -#: cinder/compute/api.py:474 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "block_device_mapping %s" -msgstr "block_device_mapping %s" +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" -#: cinder/compute/api.py:591 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" -msgstr "Отправка операции создания в расписание для %(pid)s/%(uid)s's" +msgid "No backup with id %s" +msgstr "" -#: cinder/compute/api.py:871 +#: cinder/db/sqlalchemy/api.py:2615 #, fuzzy -msgid "Going to try to soft delete instance" -msgstr "Выполнение запуска копий" +msgid "Volume must be available" +msgstr "Состояние тома должно быть доступно" -#: cinder/compute/api.py:891 -#, fuzzy, python-format -msgid "No host for instance, deleting immediately" -msgstr "Отсутствует узел для копии %s, немедленное удаление" +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" -#: cinder/compute/api.py:939 -#, fuzzy, python-format -msgid "Going to try to terminate instance" -msgstr "Будет выполнения попытка завершить работу %s" +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" -#: cinder/compute/api.py:977 -#, fuzzy, python-format -msgid "Going to try to stop instance" -msgstr "Попытка остановить %s" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" -#: cinder/compute/api.py:996 -#, fuzzy, python-format -msgid "Going to try to start instance" -msgstr "Попытка запустить %s" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" -#: cinder/compute/api.py:1000 -#, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" -msgstr "Копия %(instance_uuid)s не остановлена. (%(vm_state)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 -#, python-format -msgid "Searching by: %s" -msgstr "Поиск по: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Image type not recognized %s" -msgstr "Тип образа не распознан %s" +msgid "Table |%s| not created!" +msgstr "Таблица |%s| не создана!" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/compute/api.py:1377 -#, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -"Устаревший тип копии %(current_instance_type_name)s, новый тип копии " -"%(new_instance_type_name)s" -#: cinder/compute/api.py:1644 -#, python-format -msgid "multiple fixedips exist, using the first: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 -#, python-format -msgid "DB error: %s" -msgstr "Ошибка БД: %s" - -#: cinder/compute/instance_types.py:86 -#, python-format -msgid "Instance type %s not found for deletion" -msgstr "Копия типа %s не найдена для выполнения удаления" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" -#: cinder/compute/manager.py:138 -#, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: оформление: |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" -#: cinder/compute/manager.py:140 -#, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -"check_instance_lock: аргументы: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" -#: cinder/compute/manager.py:144 -#, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: заблокирован: |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" -#: cinder/compute/manager.py:146 -#, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: администратор: |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" -#: cinder/compute/manager.py:151 -#, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: исполнение: |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "Table |%s| not created" msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Unable to load the virtualization driver: %s" -msgstr "Невозможно загрузить драйвер виртуализации: %s" +msgid "Exception while dropping table %s." +msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Exception while creating table %s." msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "Column |%s| not created!" msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" -msgstr "Драйвер гипервизора не поддерживает правила брандмауэра" - -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/compute/manager.py:329 -#, python-format -msgid "Setting up bdm %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/compute/manager.py:400 -#, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/compute/manager.py:406 -#, fuzzy, python-format -msgid "Exception encountered while terminating the instance %s" -msgstr "исключение завершает работу копии %(instance_uuid)s" - -#: cinder/compute/manager.py:444 -#, python-format -msgid "Instance %s not found." -msgstr "Копия %s не найдена." - -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" -msgstr "Копия уже была создана" - -#: cinder/compute/manager.py:523 -#, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" -#: cinder/compute/manager.py:528 -#, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -"Образ '%(image_id)s' размером %(size_bytes)d, превышает exceeded " -"instance_type допустимый размер %(allowed_size_bytes)d" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/compute/manager.py:565 -#, python-format -msgid "Instance network_info: |%s|" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "%(action_str)s instance" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/manager.py:699 -#, python-format -msgid "Ignoring DiskNotFound: %s" -msgstr "Пропуск DiskNotFound: %s" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "Ошибка анализа 'qemu-img info'." -#: cinder/compute/manager.py:708 +#: cinder/image/image_utils.py:101 #, python-format -msgid "terminating bdm %s" -msgstr "завершение работы bdm %s" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format -msgid "%s. Setting instance vm_state to ERROR" -msgstr "%s. Установка состояния копии vm_state на ERROR" +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/image/image_utils.py:157 #, python-format msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/manager.py:816 -#, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/image/image_utils.py:178 #, python-format -msgid "Rebuilding instance %s" -msgstr "Обновление сборки %s" +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/image/image_utils.py:206 #, python-format -msgid "Rebooting instance %s" -msgstr "Перезагрузка копии %s" +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/image/image_utils.py:224 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -"попытка перезагрузки не выполняемой копии: %(instance_uuid)s (состояние: " -"%(state)s ожидалось: %(running)s)" -#: cinder/compute/manager.py:933 -#, python-format -msgid "instance %s: snapshotting" -msgstr "копия %s: выполнение снимка" +#: cinder/image/image_utils.py:260 +#, fuzzy, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "Преобразование в необработанный, но текущий формат %s" -#: cinder/compute/manager.py:939 -#, python-format +#: cinder/keymgr/conf_key_mgr.py:78 msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -"попытка создания снимка не выполняемой копии: %(instance_uuid)s " -"(состояние: %(state)s ожидалось: %(running)s)" -#: cinder/compute/manager.py:995 -#, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" -msgstr "Найдено %(num_images)d образов (ротация: %(rotation)d)" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "Rotating out %d backups" +msgid "Not deleting key %s" msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Deleting image %s" -msgstr "Удаление образа %s" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" -#: cinder/compute/manager.py:1035 -#, python-format -msgid "Failed to set admin password. Instance %s is not running" -msgstr "Невозможно назначить пароль администратора. Копия %s не выполняется" +#: cinder/openstack/common/excutils.py:48 +#, fuzzy, python-format +msgid "Original exception being dropped: %s" +msgstr "Исходное исключение было сброшено" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "Instance %s: Root password set" -msgstr "Копия %s: Назначение административного пароля" - -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." -msgstr "set_admin_password не реализован в этой драйвере." +msgid "Reloading cached file %s" +msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" -msgstr "Ошибка назначения пароля администратора" +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" -#: cinder/compute/manager.py:1079 -#, python-format +#: cinder/openstack/common/gettextutils.py:261 msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" -msgstr "копия %(instance_uuid)s: обновление агента до %(url)s" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "instance %s: rescuing" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "instance %s: unrescuing" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" -msgstr "назначение совпадает с источником!" +#: cinder/openstack/common/log.py:301 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "_удалить: %s" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "instance %s: migrating" -msgstr "копия %s: перемещение" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "instance %s: pausing" -msgstr "копия %s: приостановление" +msgid "syslog facility must be one of: %s" +msgstr "" -#: cinder/compute/manager.py:1489 -#, python-format -msgid "instance %s: unpausing" -msgstr "копия %s: снятие с приостановления" +#: cinder/openstack/common/log.py:623 +#, fuzzy, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "Класс %(fullname)s устарел: %(msg)s" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "копия %s: принятие диагностики" +msgid "task run outlasted interval by %s sec" +msgstr "" -#: cinder/compute/manager.py:1534 -#, python-format -msgid "instance %s: suspending" -msgstr "копия %s: приостановление" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "instance %s: resuming" -msgstr "копия %s: возобновление" +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" -#: cinder/compute/manager.py:1579 -#, python-format -msgid "instance %s: locking" -msgstr "копия %s: блокирование" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "instance %s: unlocking" -msgstr "копия %s: разблокирование" +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "instance %s: getting locked state" -msgstr "копия %s: получение заблокированного состояния" +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "instance %s: reset network" -msgstr "копия %s: сброс сети" - -#: cinder/compute/manager.py:1614 -#, python-format -msgid "instance %s: inject network info" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "network_info to inject: |%s|" -msgstr "" +msgid "Running periodic task %(full_task_name)s" +msgstr "Запуск повторяющегося задания %(full_task_name)s" -#: cinder/compute/manager.py:1655 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "instance %s: getting vnc console" -msgstr "копия %s: получение консоли vnc" +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "Ошибка во время %(full_task_name)s: %(e)s" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/policy.py:149 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" -msgstr "" - -#: cinder/compute/manager.py:1703 -#, fuzzy, python-format msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" -msgstr "копия %(instance_uuid)s: обновление агента до %(url)s" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/policy.py:163 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:1714 -#, fuzzy, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" -msgstr "Копия %(instance_id)s не переведена в режим восстановления" - -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:1752 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "Отсоединение тома от неизвестной копии %s" +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "Приняты неизвестные аргументы ключевого слова для utils.execute: %r" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." -msgstr "" -"Создание временного файла %s для оповещения других узлов сети compute о " -"необходимости присоединения того же хранилища." +msgid "Running cmd (subprocess): %s" +msgstr "Выполнение команды (субпроцесс): %s" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." -msgstr "" +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "Результат %s" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." -msgstr "plug_vifs() ошибка %(cnt)d.До %(max_retry)d попыток для %(hostname)s." +msgid "%r failed. Retrying." +msgstr "%r ошибка. Выполняется повтор." -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "Pre live migration failed at %(dest)s" -msgstr "" +msgid "Running cmd (SSH): %s" +msgstr "Выполнение команды (SSH): %s" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." -msgstr "Запущено post_live_migration().." +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "Среда не поддерживается с использованием SSH" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" -msgstr "" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "process_input не поддерживается с использованием SSH" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." -msgstr "" +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, fuzzy, python-format +msgid "Caught %s, exiting" +msgstr "снимок %s: удаление" -#: cinder/compute/manager.py:2040 -#, python-format -msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:2073 -#, python-format -msgid "Migrating instance to %(dest)s finished successfully." +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:2075 -msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." -msgstr "" -"Вы можете увидеть ошибку \"libvirt: ошибка QEMU: Домен не найден: " -"отсутствует домен с соответствующим именем.\" Эта ошибка может быть " -"безопасно пропущена." +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Unhandled exception" +msgstr "Вложенное исключение: %s" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" -msgstr "Обновление временных данных использования полосы пропускания" - -#: cinder/compute/manager.py:2277 -msgid "Updating host status" -msgstr "Обновление состояния узла" +#: cinder/openstack/common/service.py:337 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "установка сетевого узла" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -"Найдено %(num_db_instances)s в базе данных и %(num_vm_instances)s в " -"гипервизоре." -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." -msgstr "FLAGS.reclaim_instance_interval <= 0, пропуск..." - -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "Invalid string format: %s" msgstr "" -"Не распознанное значение '%(action)s' для " -"FLAGS.running_deleted_instance_action" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" -msgstr "Добавление консоли" - -#: cinder/console/manager.py:97 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." -msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" -#: cinder/console/vmrc_manager.py:122 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." -msgstr "Попытка удаления несуществующей консоли %(console_id)s." +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "Removing console %(console_id)s." -msgstr "Удаление консоли %(console_id)s." - -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" -msgstr "Обновление сборки xvp conf" +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" -#: cinder/console/xvp.py:116 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "Re-wrote %s" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" -msgstr "Выполнение останова xvp" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" -msgstr "Запуск xvp" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "Error starting xvp: %s" -msgstr "Ошибка запуска xvp: %s" +msgid "Got mysql server has gone away: %s" +msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" -msgstr "Выполнение перезагрузки xvp" +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, fuzzy, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "Ошибка соединения с SQL (%(connstring)s). %(attempts)d попыток осталось." -#: cinder/console/xvp.py:146 -msgid "xvp not running..." -msgstr "xvp не выполняется..." +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "Deleting Expired Token: (%s)" -msgstr "Удаление токена с истёкшим сроком: (%s)" +msgid "%s not in valid priorities" +msgstr "%s не в допустимых приоритетах" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/notifier/api.py:145 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" -msgstr "Полученный токен: %(token)s, %(token_dict)s)" +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" +"Неполадка '%(e)s', попытка отправить в систему уведомлений. " +"Нагрузка=%(payload)s" -#: cinder/consoleauth/manager.py:79 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" -msgstr "Проверка токена: %(token)s, %(token_valid)s)" +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "Нераспознанное значение read_deleted '%s'" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format -msgid "No ComputeNode for %(host)s" +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 -#, python-format -msgid "No backend config with id %(sm_backend_id)s" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "No sm_flavor called %(sm_flavor)s" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." -msgstr "python-migrate не установлен. Выход." +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "неизвлечённый контекст: %s" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/db/sqlalchemy/session.py:137 -#, fuzzy, python-format -msgid "SQL connection failed. %s attempts left." -msgstr "Ошибка соединения с SQL (%(connstring)s). %(attempts)d попыток осталось." - -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" -msgstr "столбец интерфейса не добавлен в таблицу сетей" - -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "Table |%s| not created!" -msgstr "Таблица |%s| не создана!" +msgid "received %s" +msgstr "получено %s" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" -msgstr "Столбец VIF не добавлен в таблицу fixed_ips" +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "не определен метод для сообщения: %s" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "join list for moving mac_addresses |%s|" -msgstr "" +msgid "No method for message: %s" +msgstr "Не определен метод для сообщения: %s" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" -msgstr "столбец приоритета не добавлен в таблицу сетей" - -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" -msgstr "ограничения внешнего ключа не могут быть удалены" +#: cinder/openstack/common/rpc/amqp.py:594 +#, fuzzy, python-format +msgid "Making synchronous call on %s ..." +msgstr "Выполнение асинхронного вызова %s ..." -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" -msgstr "столбец выполнения не добавлен в таблицу копий" +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" -msgstr "столбец выполнения не добавлен в таблицу compute_nodes" - -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "Обнаружено неизвестное исключение." + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" +"Удалённая ошибка: %(exc_type)s %(value)s\n" +"%(traceback)s." -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/image/glance.py:281 -#, python-format -msgid "Metadata after formatting for Glance %s" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/image/glance.py:289 +#: cinder/openstack/common/rpc/common.py:151 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" -msgstr "Не владелец образа" - -#: cinder/image/glance.py:410 +#: cinder/openstack/common/rpc/common.py:156 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/image/s3.py:309 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" -msgstr "Ошибка загрузки %(image_location)s в %(image_path)s" +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" -#: cinder/image/s3.py:328 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" -msgstr "Ошибка расшифрования %(image_location)s в %(image_path)s" +msgid "Returning exception %s to caller" +msgstr "Возврат исключения %s вызывающему" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" -#: cinder/image/s3.py:340 +#: cinder/openstack/common/rpc/impl_kombu.py:477 #, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" -msgstr "Ошибка извлечения %(image_location)s в %(image_path)s" +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Повторное подключение к серверу AMQP на %(hostname)s:%(port)d" -#: cinder/image/s3.py:353 +#: cinder/openstack/common/rpc/impl_kombu.py:499 #, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" -msgstr "Ошибка выгрузки %(image_location)s в %(image_path)s" +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Подключение к серверу AMQP на %(hostname)s:%(port)d" -#: cinder/image/s3.py:379 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format -msgid "Failed to decrypt private key: %s" -msgstr "Ошибка дешифрирования личного ключа: %s" +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" +"Невозможно подключиться к серверу AMQP на %(hostname)s:%(port)d после " +"%(max_retries)d попыток: %(err_str)s" -#: cinder/image/s3.py:387 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "Failed to decrypt initialization vector: %s" -msgstr "Ошибка дешифрирования вектора инициализации: %s" +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP сервер на %(hostname)s:%(port)d недоступен: %(err_str)s. Повторная " +"попытка через %(sleep_time)d секунд." -#: cinder/image/s3.py:398 +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" -msgstr "Ошибка дешифрирования файла образа %(image_file)s: %(err)s" +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Ошибка объявления потребителю темы '%(topic)s': %(err_str)s" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" -msgstr "В образе небезопасные имена файлов" +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 #, python-format -msgid "Bad mac for to_global_ipv6: %s" -msgstr "Недопустимый mac для to_global_ipv6: %s" +msgid "Failed to consume message from queue: %s" +msgstr "Ошибка принятия сообщения из очереди: %s" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" -msgstr "Недопустимый префикс для to_global_ipv6: %s" +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Ошибка публикации сообщения в тему '%(topic)s': %(err_str)s" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" -msgstr "Недопустимый project_id для to_global_ipv6: %s" +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." -msgstr "Этот драйвер поддерживает только записи типа 'a'." +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, fuzzy, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "Невозможно подключиться к серверу AMQP: %s " -#: cinder/network/linux_net.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "Attempted to remove chain %s which does not exist" -msgstr "Предпринята удалить цепочку %s, которая не существует" +msgid "Connected to AMQP server on %s" +msgstr "" -#: cinder/network/linux_net.py:192 -#, python-format -msgid "Unknown chain: %r" -msgstr "Неизвестная цепочка: %r" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" -#: cinder/network/linux_net.py:215 -#, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -"Предпринята попытка удалить правило, которого там нет: %(chain)r %(rule)r" -" %(wrap)r %(top)r" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/network/linux_net.py:694 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "Deserializing: %s" msgstr "" -#: cinder/network/linux_net.py:696 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "killing radvd threw %s" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "Pid %d is stale, relaunching radvd" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/network/linux_net.py:967 -#, python-format -msgid "Starting VLAN inteface %s" -msgstr "Запуск интерфейса VLAN %s" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "Starting Bridge interface for %s" -msgstr "Запуск моста интерфейса для %s" +msgid "Subscribing to %s" +msgstr "" -#: cinder/network/linux_net.py:1142 -#, fuzzy, python-format -msgid "Starting bridge %s " -msgstr "Запуск копии %s" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" -#: cinder/network/linux_net.py:1149 -#, fuzzy, python-format -msgid "Done starting bridge %s" -msgstr "Ошибка запуска xvp: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" -#: cinder/network/linux_net.py:1167 +#: cinder/openstack/common/rpc/impl_zmq.py:267 #, fuzzy, python-format -msgid "Failed unplugging gateway interface '%s'" -msgstr "Ошибка отсоединения vif копии '%s'" +msgid "Running func with context: %s" +msgstr "неизвлечённый контекст: %s" -#: cinder/network/linux_net.py:1170 -#, python-format -msgid "Unplugged gateway interface '%s'" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/network/manager.py:291 -#, fuzzy, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" -msgstr "Сертификат %(certificate_id)s не найден." +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 -#, python-format -msgid "Interface %(interface)s not found" -msgstr "Интерфейс %(interface)s не найден" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +#, fuzzy +msgid "Registering reactor" +msgstr "Исключение регистрации ВМ %s" -#: cinder/network/manager.py:315 -#, python-format -msgid "floating IP allocation for instance |%s|" -msgstr "" +#: cinder/openstack/common/rpc/impl_zmq.py:383 +#, fuzzy +msgid "In reactor registered" +msgstr "Отсутствуют зарегистрированные ВМ" -#: cinder/network/manager.py:353 -#, python-format -msgid "floating IP deallocation for instance |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "Address |%(address)s| is not allocated" -msgstr "Адрес |%(address)s| не выделен" +msgid "Creating proxy for topic: %s" +msgstr "" -#: cinder/network/manager.py:390 -#, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" -msgstr "Адрес |%(address)s| не выделен вашему проекту |%(project)s|" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" -#: cinder/network/manager.py:402 -#, python-format -msgid "Quota exceeded for %s, tried to allocate address" -msgstr "Превышена квота для %s, попытка выделения адреса" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" -#: cinder/network/manager.py:614 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/network/manager.py:660 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." -msgstr "Домен |%(domain)s| уже существует, изменение зоны на |%(av_zone)s|." +msgid "Required IPC directory does not exist at %s" +msgstr "" -#: cinder/network/manager.py:670 +#: cinder/openstack/common/rpc/impl_zmq.py:506 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." -msgstr "Домен |%(domain)s| уже существует, изменение проекта на |%(project)s|." +msgid "Permission denied to IPC directory at %s" +msgstr "" -#: cinder/network/manager.py:778 -#, python-format -msgid "Disassociated %s stale fixed ip(s)" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" -msgstr "установка сетевого узла" +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, fuzzy, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "Заданные данные: %s" -#: cinder/network/manager.py:896 -#, python-format -msgid "network allocations for instance |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/network/manager.py:901 -#, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/network/manager.py:930 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "network deallocation for instance |%s|" -msgstr "исключение сетевых распределений для копии |%s|" +msgid "Consumer is a zmq.%s" +msgstr "" -#: cinder/network/manager.py:1152 -#, python-format -msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +#: cinder/openstack/common/rpc/impl_zmq.py:649 +#, fuzzy +msgid "Creating payload" +msgstr "Создание изображения" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -"instance-dns-zone является |%(domain)s|, который располагается в " -"доступной зоне |%(zone)s|. Копия |%(instance)s| расположена в зоне " -"|%(zone2)s|. Запись DNS не будет создана." -#: cinder/network/manager.py:1227 -#, python-format -msgid "Unable to release %s because vif doesn't exist." +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/network/manager.py:1244 -#, python-format -msgid "Leased IP |%(address)s|" -msgstr "Арендованный IP |%(address)s|" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" -#: cinder/network/manager.py:1248 -#, python-format -msgid "IP %s leased that is not associated" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "получено %s" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/network/manager.py:1256 -#, python-format -msgid "IP |%s| leased that isn't allocated" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/network/manager.py:1261 -#, python-format -msgid "Released IP |%(address)s|" -msgstr "Присвоенный IP |%(address)s|" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "Недопустимый запрос." -#: cinder/network/manager.py:1265 +#: cinder/openstack/common/rpc/impl_zmq.py:721 #, python-format -msgid "IP %s released that is not associated" +msgid "%(msg)s" msgstr "" -#: cinder/network/manager.py:1268 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "IP %s released that was not leased" +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" -msgstr "cidr уже используется" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" -#: cinder/network/manager.py:1334 -#, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -"запрошенная cidr (%(cidr)s) конфликтует с существующей супер-сетью " -"(%(super)s)" -#: cinder/network/manager.py:1345 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +msgid "topic is %s." msgstr "" -"запрошенная cidr (%(cidr)s) конфликтует с существующей, меньшей cidr " -"(%(smaller)s)" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" -msgstr "Сеть уже существует!" - -#: cinder/network/manager.py:1423 +#: cinder/openstack/common/rpc/impl_zmq.py:815 #, python-format -msgid "Network must be disassociated from project %s before delete" -msgstr "Сеть должна быть исключена из проекта %s перед удалением" - -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/network/manager.py:1839 -#, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -"Сетевой диапазон недостаточен для соответствия %(num_networks)s. Размер " -"сети %(network_size)s" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" -msgstr "Этот драйвер поддерживает только тип 'a'" +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake не имеет реализации для %s" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/network/quantum/client.py:180 -#, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Quantum entity not found: %s" -msgstr "Объект Quantum не найден: %s" +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Server %(status_code)s error: %(data)s" -msgstr "Серверная %(status_code)s ошибка: %(data)s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Unable to connect to server. Got error: %s" -msgstr "Невозможно подключиться к серверу. Принята ошибка: %s" +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" -#: cinder/network/quantum/client.py:228 -#, python-format -msgid "unable to deserialize object of type = '%s'" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/network/quantum/manager.py:301 -#, python-format -msgid "network allocations for instance %s" -msgstr "сетевые распределения для копии %s" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "Server returned error: %s" -msgstr "Сервер возвратил ошибку: %s" +msgid "Filtered %s" +msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/scheduler/host_manager.py:264 +#, fuzzy, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "Принято служебное обновление для %(service_name)s от %(host)s." + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "Принято служебное обновление для %(service_name)s от %(host)s." + +#: cinder/scheduler/host_manager.py:294 #, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" -msgstr "Ошибка создания сетевой записи" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "No network with net_id = %s" -msgstr "Нет сети с net_id = %s" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 -#, fuzzy, python-format -msgid "No fixed IPs to deallocate for vif %s" -msgstr "исключение сетевых распределений для копии |%s|" +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "Ошибка schedule_%(method)s: %(ex)s" -#: cinder/network/quantum/quantum_connection.py:99 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -"Подключение интерфейса %(interface_id)s к сети %(net_id)s для " -"%(tenant_id)s" -#: cinder/network/quantum/quantum_connection.py:113 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" -msgstr "Удаление порта %(port_id)s в сети %(net_id)s для %(tenant_id)s" +msgid "Could not decode scheduler options: '%s'" +msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" -msgstr "%s не в допустимых приоритетах" +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -"Неполадка '%(e)s', попытка отправить в систему уведомлений. " -"Нагрузка=%(payload)s" -#: cinder/notifier/list_notifier.py:65 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "Returning exception %s to caller" -msgstr "Возврат исключения %s вызывающему" +msgid "FAKE ISCSI: %s" +msgstr "" -#: cinder/rpc/amqp.py:188 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "unpacked context: %s" -msgstr "неизвлечённый контекст: %s" +msgid "FAKE ISER: %s" +msgstr "" -#: cinder/rpc/amqp.py:231 +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "received %s" -msgstr "получено %s" +msgid "LoggingVolumeDriver: %s" +msgstr "LoggingVolumeDriver: %s" -#: cinder/rpc/amqp.py:236 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "no method for message: %s" -msgstr "не определен метод для сообщения: %s" +msgid "Faking execution of cmd (subprocess): %s" +msgstr "Имитация выполнения команды (субпроцесс): %s" -#: cinder/rpc/amqp.py:237 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "No method for message: %s" -msgstr "Не определен метод для сообщения: %s" +msgid "Faked command matched %s" +msgstr "" -#: cinder/rpc/amqp.py:321 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "Making asynchronous call on %s ..." -msgstr "Выполнение асинхронного вызова %s ..." +msgid "Faked command raised an exception %s" +msgstr "Имитация команды привела к исключению %s" -#: cinder/rpc/amqp.py:324 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID is %s" +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "Ответ на имитацию команды в stdout='%(stdout)s' stderr='%(stderr)s'" -#: cinder/rpc/amqp.py:346 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/rpc/amqp.py:379 -#, python-format -msgid "Sending notification on %s..." -msgstr "Отправка оповещения на %s..." +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, fuzzy, python-format +msgid "unrecognized argument %s" +msgstr "Нераспознанное значение read_deleted '%s'" -#: cinder/rpc/common.py:54 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +msgid "Run CLI command: %s" msgstr "" -"Удалённая ошибка: %(exc_type)s %(value)s\n" -"%(traceback)s." -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." -msgstr "" +#: cinder/tests/test_storwize_svc.py:1510 +#, fuzzy, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "Ответ на имитацию команды в stdout='%(stdout)s' stderr='%(stderr)s'" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" -msgstr "Повторное подключение к серверу AMQP на %(hostname)s:%(port)d" +msgid "Given data: %s" +msgstr "Заданные данные: %s" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" -msgstr "Подключение к серверу AMQP на %(hostname)s:%(port)d" +msgid "Result data: %s" +msgstr "Итоговые данные: %s" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" -msgstr "" -"Невозможно подключиться к серверу AMQP на %(hostname)s:%(port)d после " -"%(max_retries)d попыток: %(err_str)s" +msgid "Volume not found for instance %(instance_id)s." +msgstr "Не найден том для копии %(instance_id)s." + +#: cinder/tests/api/contrib/test_backups.py:741 +#, fuzzy +msgid "Invalid input" +msgstr "Недопустимый снимок" + +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Отсоединить том %s" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/tests/integrated/api/client.py:32 #, python-format msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -"AMQP сервер на %(hostname)s:%(port)d недоступен: %(err_str)s. Повторная " -"попытка через %(sleep_time)d секунд." +"%(message)s\n" +"Код состояния: %(_status)s\n" +"Тело: %(_body)s" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 -#, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" -msgstr "Ошибка объявления потребителю темы '%(topic)s': %(err_str)s" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "Ошибка аутентификации" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "Ошибка авторизации" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "объект не найден" + +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "Timed out waiting for RPC response: %s" -msgstr "" +msgid "Doing %(method)s on %(relative_url)s" +msgstr "Выполнение %(method)s на %(relative_url)s" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "Failed to consume message from queue: %s" -msgstr "Ошибка принятия сообщения из очереди: %s" +msgid "Body: %s" +msgstr "Тело: %s" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/tests/integrated/api/client.py:121 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" -msgstr "Ошибка публикации сообщения в тему '%(topic)s': %(err_str)s" +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "%(auth_uri)s => код %(http_status)s" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "Unable to connect to AMQP server: %s " -msgstr "Невозможно подключиться к серверу AMQP: %s " +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "%(relative_uri)s => код %(http_status)s" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "Непредвиденный код состояния" -#: cinder/rpc/impl_qpid.py:346 +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "Connected to AMQP server on %s" -msgstr "" +msgid "Decoding JSON: %s" +msgstr "Декодирование JSON: %s" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/transfer/api.py:68 +#, fuzzy +msgid "Volume in unexpected state" +msgstr "Непредвиденный код состояния" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" -msgstr "Выполняется ли соответствующая служба?" +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "Превышена квота для %(pid)s, попытка создания тома %(size)sG" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" -msgstr "Невозможно найти другой compute" +#: cinder/transfer/api.py:182 +#, fuzzy, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "Ошибка обновления агента: %(resp)r" -#: cinder/scheduler/driver.py:63 +#: cinder/transfer/api.py:199 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "Volume %s has been transferred." msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/volume/api.py:143 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/scheduler/driver.py:89 -#, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/api.py:214 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "Поиск по: %s" + +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 -#, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." -msgstr "узел %(dest)s несовместим с исходным узлом %(src)s." +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" -#: cinder/scheduler/driver.py:416 -#, python-format +#: cinder/volume/api.py:490 +#, fuzzy, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" -msgstr "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "Превышена квота для %(pid)s, попытка создания тома %(size)sG" -#: cinder/scheduler/driver.py:472 -#, python-format +#: cinder/volume/api.py:502 +#, fuzzy, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "Превышена квота для %(pid)s, попытка выполнить %(min_count)s копий" + +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 -#, python-format -msgid "No host selection for %s defined." +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 -#, python-format -msgid "Attempting to build %(num_instances)d instance(s)" -msgstr "Попытка собрать %(num_instances)d копию(й)" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:723 +#, fuzzy +msgid "Volume status is in-use." +msgstr "том %s: том занят" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/api.py:757 #, python-format -msgid "Filtered %(hosts)s" -msgstr "Отфильтрованы %(hosts)s" +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 -#, python-format -msgid "Weighted %(weighted_host)s" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/scheduler/host_manager.py:144 -#, python-format -msgid "Host filter fails for ignored host %(host)s" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/api.py:797 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "No available service named %s" msgstr "" -#: cinder/scheduler/host_manager.py:157 -#, python-format -msgid "Host filter function %(func)s failed for %(host)s" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/scheduler/host_manager.py:163 -#, fuzzy, python-format -msgid "Host filter passes for %(host)s" -msgstr "Отфильтрованы %(hosts)s" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/api.py:842 #, python-format -msgid "Received %(service_name)s service update from %(host)s." -msgstr "Принято служебное обновление для %(service_name)s от %(host)s." +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" -msgstr "host_manager реализован только для 'compute'" +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/api.py:862 #, python-format -msgid "No service for compute ID %s" -msgstr "Нет службы для compute ID %s" +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/api.py:868 #, python-format -msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/api.py:874 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" -msgstr "Ошибка schedule_%(method)s: %(ex)s" - -#: cinder/scheduler/manager.py:159 -#, fuzzy, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." -msgstr "Копия %(instance_uuid)s не остановлена. (%(vm_state)s" +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/api.py:887 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/api.py:900 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" -msgstr "Невозможно декодировать параметры расписания: '%(e)s'" - -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/tests/fake_utils.py:72 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Faking execution of cmd (subprocess): %s" -msgstr "Имитация выполнения команды (субпроцесс): %s" +msgid "Recovering from a failed execute. Try number %s" +msgstr "Восстановление после недопустимого выполнения. Попытка номер %s" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/driver.py:282 #, python-format -msgid "Faked command matched %s" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "Faked command raised an exception %s" -msgstr "Имитация команды привела к исключению %s" +msgid "Failed to attach volume %(vol)s" +msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/driver.py:327 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" -msgstr "Ответ на имитацию команды в stdout='%(stdout)s' stderr='%(stderr)s'" - -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/driver.py:358 #, python-format -msgid "Running instances: %s" -msgstr "Выполняемые копии: %s" +msgid "copy_volume_to_image %s." +msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/driver.py:394 #, python-format -msgid "After terminating instances: %s" -msgstr "После завершения работы копий: %s" - -#: cinder/tests/test_compute.py:589 -msgid "Internal error" -msgstr "Внутренняя ошибка" +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/driver.py:433 #, python-format -msgid "After force-killing instances: %s" -msgstr "После принудительного завершения работы копий: %s" +msgid "Creating a new backup for volume %s." +msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/driver.py:451 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" -msgstr "IPv4" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" -msgstr "IPv6" +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" -msgstr "проект" - -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/manager.py:203 #, python-format -msgid "Target %s allocated" +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/tests/test_volume.py:468 -#, python-format -msgid "Cannot confirm exported volume id:%s." -msgstr "Невозможно подтвердить идентификатор экспортированного тома:%s." +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" -#: cinder/tests/test_volume_types.py:58 +#: cinder/volume/manager.py:228 #, python-format -msgid "Given data: %s" -msgstr "Заданные данные: %s" +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" -#: cinder/tests/test_volume_types.py:59 +#: cinder/volume/manager.py:235 #, python-format -msgid "Result data: %s" -msgstr "Итоговые данные: %s" +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/manager.py:244 #, python-format -msgid "Creating files in %s to simulate guest agent" -msgstr "Создание файлов в %s для имитации гостевого агента" +msgid "Re-exporting %s volumes" +msgstr "Повторное экспортирование %s томов" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/manager.py:257 #, python-format -msgid "Removing simulated guest agent files in %s" -msgstr "Удаление файлов имитации гостевого агента в %s" - -#: cinder/tests/api/openstack/compute/test_servers.py:2144 -#, fuzzy, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "Превышена квота" +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/manager.py:264 #, python-format -msgid "_create: %s" -msgstr "_создать: %s" +msgid "volume %s stuck in a downloading state" +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/manager.py:271 #, python-format -msgid "_delete: %s" -msgstr "_удалить: %s" +msgid "volume %s: skipping export" +msgstr "том %s: пропуск экспортирования" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/manager.py:273 #, python-format -msgid "_get: %s" -msgstr "_get: %s" +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 -#, python-format -msgid "_get_all: %s" -msgstr "_get_all: %s" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/manager.py:286 #, python-format -msgid "test_snapshot_create: param=%s" -msgstr "test_snapshot_create: param=%s" +msgid "Resuming delete on volume: %s" +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 -#, python-format -msgid "test_snapshot_create: resp_dict=%s" -msgstr "test_snapshot_create: resp_dict=%s" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "test_snapshot_create_force: param=%s" -msgstr "test_snapshot_create_force: param=%s" +msgid "volume %s: deleting" +msgstr "том %s: удаление" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 -#, python-format -msgid "test_snapshot_create_force: resp_dict=%s" -msgstr "test_snapshot_create_force: resp_dict=%s" +#: cinder/volume/manager.py:380 +#, fuzzy +msgid "volume is not local to this node" +msgstr "Том до сих пор присоединён" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/manager.py:389 #, python-format -msgid "test_snapshot_show: resp=%s" -msgstr "test_snapshot_show: resp=%s" +msgid "volume %s: removing export" +msgstr "том %s: удаление экспортирования" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/manager.py:394 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" -msgstr "test_snapshot_detail: resp_dict=%s" +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +#, fuzzy +msgid "Failed to update usages deleting volume" +msgstr "Ошибка обновления агента: %(resp)r" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/manager.py:427 #, python-format -msgid "flavor: %s" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/manager.py:430 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +msgid "no glance metadata found for volume %s" msgstr "" -"%(message)s\n" -"Код состояния: %(_status)s\n" -"Тело: %(_body)s" - -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" -msgstr "Ошибка аутентификации" - -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" -msgstr "Ошибка авторизации" - -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" -msgstr "объект не найден" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/manager.py:434 #, python-format -msgid "Doing %(method)s on %(relative_url)s" -msgstr "Выполнение %(method)s на %(relative_url)s" +msgid "volume %s: deleted successfully" +msgstr "том %s: удаление завершено" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/volume/manager.py:451 #, python-format -msgid "Body: %s" -msgstr "Тело: %s" +msgid "snapshot %s: creating" +msgstr "снимок %s: создание" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/manager.py:462 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" -msgstr "%(auth_uri)s => код %(http_status)s" +msgid "snapshot %(snap_id)s: creating" +msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/manager.py:490 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" -msgstr "%(relative_uri)s => код %(http_status)s" - -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" -msgstr "Непредвиденный код состояния" +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/manager.py:496 #, python-format -msgid "Decoding JSON: %s" -msgstr "Декодирование JSON: %s" +msgid "snapshot %s: created successfully" +msgstr "снимок %s: создание завершено" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Nested received %(queue)s, %(value)s" -msgstr "" +msgid "snapshot %s: deleting" +msgstr "снимок %s: удаление" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/manager.py:526 #, python-format -msgid "Nested return %s" +msgid "Cannot delete snapshot %s: snapshot is busy" msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/manager.py:559 #, python-format -msgid "Received %s" -msgstr "Получено %s" - -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" -msgstr "Ошибка открытия соединения c гипервизором" +msgid "snapshot %s: deleted successfully" +msgstr "снимок %s: удаление выполнено" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 -#, python-format -msgid "Compute_service record created for %s " -msgstr "Запись Compute_service создана для %s " +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 -#, python-format -msgid "Compute_service record updated for %s " -msgstr "Запись Compute_service обновлена для %s " +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" -#: cinder/virt/firewall.py:130 -#, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/virt/firewall.py:137 -#, python-format -msgid "Filters added to instance %s" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" -msgstr "Выполненое обновление правил брандмауэра поставщика" +#: cinder/volume/manager.py:698 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "Подключить том %(volume_id)s для копии %(instance_id)s на %(device)s" -#: cinder/virt/firewall.py:291 +#: cinder/volume/manager.py:760 #, python-format -msgid "Adding security group rule: %r" -msgstr "Добавление правила группы безопасности: %r" +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/manager.py:807 #, python-format -msgid "Adding provider rule: %s" -msgstr "Добавление правила поставщика: %s" +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." -msgstr "Ошибка анализа 'qemu-img info'." +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" -#: cinder/virt/images.py:92 -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/manager.py:880 #, python-format -msgid "Converted to raw, but format is now %s" -msgstr "Преобразование в необработанный, но текущий формат %s" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 +#: cinder/volume/manager.py:909 +#, python-format msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -"Необходимо указать vmwareapi_host_ip,vmwareapi_host_username и " -"vmwareapi_host_password useconnection_type=vmwareapi" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/manager.py:921 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" -msgstr "В vmwareapi:_create_session, принято это исключение: %s" +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/manager.py:940 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" -msgstr "В vmwareapi:_call_method, принято это исключение: %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/manager.py:976 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" -msgstr "Состояние задачи [%(task_name)s] %(task_ref)s: готово" +msgid "volume %s: calling driver migrate_volume" +msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 -#, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" -msgstr "Состояние задачи [%(task_name)s] %(task_ref)s: ошибка %(error_info)s" +#: cinder/volume/manager.py:1016 +#, fuzzy +msgid "Updating volume status" +msgstr "Обновление состояния узла" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/manager.py:1024 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" -msgstr "В vmwareapi:_poll_task, принята эта ошибка %s" - -#: cinder/virt/xenapi_conn.py:140 msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" -msgstr "" -"Необходимо указать xenapi_connection_url, xenapi_connection_username " -"(необязательно) и xenapi_connection_password для использования " -"connection_type=xenapi" - -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." -msgstr "Запуск узла на XenServer не поддерживается." - -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" -msgstr "" -"Невозможно выполнить вход в XenAPI (недостаточно свободного места на " -"Dom0?)" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "Принято уведомление {%s}" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/manager.py:1103 #, python-format -msgid "Got exception: %s" -msgstr "Исключение: %s" - -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +msgid "volume %s: extending" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/manager.py:1105 #, python-format -msgid "============= initial domains =========== : %s" +msgid "volume %s: extended successfully" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/manager.py:1190 #, python-format -msgid "No such domain (%s)" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Failed power down Bare-metal node %s" +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "No such domain %s" -msgstr "Домен %s не существует" +msgid "Valid consumer of QoS specs are: %s" +msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Domains: %s" +msgid "DB error: %s" +msgstr "Ошибка БД: %s" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "Nodes: %s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "After storing domains: %s" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/utils.py:144 #, python-format -msgid "Created new domain: %s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:226 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "change_domain_state: to new state %s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 -#, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" -msgstr "копия %(instance_name)s: удаление файлов копии %(target)s" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Создать снимок тома %s" -#: cinder/virt/baremetal/proxy.py:189 -#, python-format -msgid "instance %s: rebooted" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "instance %s: rescued" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:255 -#, python-format -msgid "instance %s: is building" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/virt/baremetal/proxy.py:265 -#, python-format -msgid "instance %s: booted" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "instance %s spawned successfully" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "instance %s:not booted" +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "instance %s: Creating image" -msgstr "копия %s: Создание образа" +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/virt/baremetal/proxy.py:484 -#, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "instance %s: starting toXML method" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "instance %s: finished toXML method" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 -msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -"Невозможно получить количество ЦПУ, так как эта возможность не " -"реализована для этой платформы. Эту ошибку можно безопасно пропустить." -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/baremetal/tilera.py:216 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Ошибка в соглашении: %s" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" -msgstr "нет настроенного совместимого обработчика образа" - -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/eqlx.py:374 #, python-format -msgid "unknown disk image handler: %s" -msgstr "неизвестный обработчик образа диска: %s" +msgid "Failed to create clone of volume %s" +msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" -msgstr "образ уже присоединён" +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/eqlx.py:405 #, python-format -msgid "Failed to mount filesystem: %s" -msgstr "Ошибка монтирования файловой системы: %s" +msgid "Failed to initialize connection to volume %s" +msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/eqlx.py:415 #, python-format -msgid "Failed to remove container: %s" -msgstr "Ошибка удаления контейнера: %s" +msgid "Failed to terminate connection to volume %s" +msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "User %(username)s not found in password file." +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/eqlx.py:440 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "unsupported partition: %s" -msgstr "неподдерживаемый раздел: %s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/glusterfs.py:91 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "Невозможно прикрепить образ для замыкания: %s" +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" -msgstr "разделы не найдена" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "Failed to map partitions: %s" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" -msgstr "Нет свободных устройств nbd" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "_создать: %s" -#: cinder/virt/disk/nbd.py:81 -#, python-format -msgid "qemu-nbd error: %s" -msgstr "ошибка qemu-nbd: %s" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "nbd device %s did not show up" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Connecting to libvirt: %s" -msgstr "Подключение к libvirt: %s" - -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" -msgstr "Неполадка с подключением к libvirt" +msgid "will copy from snapshot at %s" +msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/glusterfs.py:403 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "nova call result: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/libvirt/connection.py:435 -#, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/glusterfs.py:431 #, python-format -msgid "Deleting instance files %(target)s" +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -#, fuzzy, python-format -msgid "Instance soft rebooted successfully." -msgstr "снимок %s: создание завершено" - -#: cinder/virt/libvirt/connection.py:696 -#, fuzzy -msgid "Failed to soft reboot instance." -msgstr "Ошибка перезагрузки копии" +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -"Найдены %(migration_count)d неподтверждённых перемещений, старше " -"%(confirm_window)d секунд" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Automatically confirming migration %d" -msgstr "Автоматические подтверждение перемещения %d" - -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -#, fuzzy -msgid "Guest does not have a console available" -msgstr "Пользователь не имеет административных привилегий" +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/glusterfs.py:690 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/glusterfs.py:701 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "No file depends on %s." msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" -msgstr "Создание изображения" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "block_device_list %s" -msgstr "block_device_list %s" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +msgid "Unrecognized backing format: %s" msgstr "" -"Ошибка libvirt во время поиска %(instance_name)s: [Код ошибки " -"%(error_code)s] %(ex)s" - -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" -msgstr "устаревшая версия libvirt (не поддерживается getVersion)" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "'' must be 1, but %d\n" -msgstr "'' должно быть 1, но %d\n" +msgid "creating new volume at %s" +msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "file already exists at %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, fuzzy, python-format +msgid "Exception during mounting %s" +msgstr "Расширенный ресурс: %s" + +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +msgid "Available shares: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 +#: cinder/volume/drivers/glusterfs.py:1038 #, python-format msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/libvirt/connection.py:2136 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "Timeout migrating for %s. nwfilter not found." +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/gpfs.py:97 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 -#, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/libvirt/connection.py:2458 -#, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -#, fuzzy, python-format -msgid "Instance running successfully." -msgstr "Копия %s: выполнение" - -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/gpfs.py:160 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/gpfs.py:169 #, python-format -msgid "Instance %s: Starting finish_revert_migration" -msgstr "" - -#: cinder/virt/libvirt/firewall.py:42 msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -"Модуль Libvirt не может быть загружен. NWFilterFirewall не будет работать" -" надлежащим образом." -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." msgstr "" -#: cinder/virt/libvirt/firewall.py:171 +#: cinder/volume/drivers/gpfs.py:197 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/gpfs.py:556 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." -msgstr "nwfilter(%(instance_filter_name)s) для%(name)s не найден." - -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" -msgstr "Брандмауэр iptables: Настройка базовой фильтрации" - -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "%s is a valid instance name" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/gpfs.py:637 #, python-format -msgid "%s has a disk file" +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "Base file too young to remove: %s" +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "Removing base file: %s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 -#, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "%s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 -#, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +#: cinder/volume/drivers/lvm.py:573 +#, fuzzy, python-format +msgid "Symbolic link %s not found" +msgstr "маркер [%s] не найден" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/nfs.py:263 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid " but size is now %d" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/nfs.py:361 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "образ уже присоединён" + +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/nfs.py:526 #, python-format -msgid "Unknown base file: %s" +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Active base files: %s" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Corrupt base files: %s" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/rbd.py:160 #, python-format -msgid "Removable base files: %s" +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" -msgstr "Невозможно найти открытый порт" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" -#: cinder/virt/libvirt/vif.py:90 -#, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +#: cinder/volume/drivers/rbd.py:210 +#, fuzzy, python-format +msgid "error opening rbd image %s" +msgstr "Ошибка запуска xvp: %s" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/vif.py:99 -#, python-format -msgid "Ensuring bridge %s" +#: cinder/volume/drivers/rbd.py:265 +#, fuzzy +msgid "error connecting to ceph cluster" +msgstr "Подключение к libvirt: %s" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "Failed while unplugging vif of instance '%s'" -msgstr "Ошибка отсоединения vif копии '%s'" +msgid "clone depth exceeds limit of %s" +msgstr "" -#: cinder/virt/libvirt/volume.py:163 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "iSCSI device not found at %s" -msgstr "iSCSI-устройство не найдено в %s" +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" -#: cinder/virt/libvirt/volume.py:166 +#: cinder/volume/drivers/rbd.py:423 #, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +msgid "flattening source volume %s" msgstr "" -"Том ISCSI не найден в: %(mount_device)s. Будет выполнена повторная " -"проверка и попытка. Повторение: %(tries)s" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/rbd.py:435 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" -msgstr "Найден узел iSCSI %(mount_device)s (после %(tries)s повторных проверок)" +msgid "creating snapshot='%s'" +msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 -#, python-format -msgid "%(text)s: _db_content => %(content)s" -msgstr "%(text)s: _db_content => %(content)s" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" -msgstr "Свойство %(attr)s не назначено для управляемого объекта %(objName)s" - -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" -msgstr "Отсутствуют зарегистрированные ВМ" +msgid "creating volume '%s'" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" -msgstr "Завершение сеанса с неполадками или уже завершённого сеанса: %s" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" -msgstr "Сеанс имеет неполадки" - -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" -msgstr "Недопустимый сеанс" - -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" -msgstr " Отсутствуют зарегистриованные виртуальные машины" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Glance image %s is in killed state" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/rbd.py:593 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 -#, python-format -msgid "Created Port Group with name %s on the ESX host" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "deleting rbd volume %s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." -msgstr "Невозможно импортировать suds." - -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" -msgstr "Необходимо задать vmwareapi_wsdl_loc" - -#: cinder/virt/vmwareapi/vim.py:145 -#, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 -#, python-format -msgid "httplib error in %s: " +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 -#, python-format -msgid "Socket error in %s: " +#: cinder/volume/drivers/rbd.py:696 +#, fuzzy, python-format +msgid "connection data: %s" +msgstr "Заданные данные: %s" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 -#, python-format -msgid "Type error in %s: " +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 -#, python-format -msgid "Exception in %s " -msgstr "Исключение в %s " +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "Недопустимый снимок" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" -msgstr "Перечень копий" +#: cinder/volume/drivers/rbd.py:724 +#, fuzzy, python-format +msgid "not cloneable: %s" +msgstr "ответ %s" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Got total of %s instances" -msgstr "Всего %s копий" +msgid "%s is in a different ceph cluster" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 -#, python-format -msgid "Creating VM with the name %s on the ESX host" -msgstr "Создание ВМ с именем %s на узле ESX" +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "Невозможно найти том %s" -#: cinder/virt/vmwareapi/vmops.py:205 -#, python-format -msgid "Created VM with the name %s on the ESX host" -msgstr "Создание ВМ с именем %s на узле ESX" +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 -#, python-format -msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 -#, python-format -msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/scality.py:78 #, python-format -msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 -#, python-format -msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 -#, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/sheepdog.py:59 #, python-format -msgid "Powering on the VM instance %s" -msgstr "Включение питания на копии ВМ %s" +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog не выполняется: %s" -#: cinder/virt/vmwareapi/vmops.py:335 -#, python-format -msgid "Powered on the VM instance %s" -msgstr "Питание включено на копии ВМ %s" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "Sheepdog не выполняется" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "Creating Snapshot of the VM instance %s " -msgstr "Создание снимка копии ВМ %s " +msgid "Payload for SolidFire API call: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "Created Snapshot of the VM instance %s " -msgstr "Создание снимка копии ВМ %s " +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/solidfire.py:161 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "Uploading image %s" -msgstr "Выгрузка образа %s" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "Uploaded image %s" -msgstr "Выгруженный образ %s" +msgid "Call to json.loads() raised an exception: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Deleting temporary vmdk file %s" -msgstr "Удаление временного файла vmdk %s" +msgid "Results of SolidFire API call: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Deleted temporary vmdk file %s" -msgstr "Удалённый временный файл vmdk %s" - -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" -msgstr "копия не включена" +msgid "Clone operation encountered: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Rebooting guest OS of VM %s" -msgstr "Перезагрузка гостевой ОС ВМ %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Rebooted guest OS of VM %s" -msgstr "Выполнена перезагрузка гостевой ОС ВМ %s" +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 -#, python-format -msgid "Doing hard reboot of VM %s" -msgstr "Выполнение безотказной перезагрузки ВМ %s" +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "ответ %s" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "Did hard reboot of VM %s" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "instance - %s not present" -msgstr "копия - %s не представлена" +msgid "solidfire account: %s does not exist, create it..." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "Powering off the VM %s" -msgstr "Выключение питания ВМ %s" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 -#, python-format -msgid "Powered off the VM %s" -msgstr "Питание ВМ %s выключено" +#: cinder/volume/drivers/solidfire.py:398 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "Ошибка получения метаданных для ip: %s" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "Unregistering the VM %s" -msgstr "Исключение регистрации ВМ %s" +msgid "Failed volume create: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "Unregistered the VM %s" -msgstr "Незарегистрированная ВМ %s" +msgid "More than one valid preset was detected, using %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 -#, python-format -msgid "Suspending the VM %s " -msgstr "Приостановка ВМ %s " +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "Suspended the VM %s " -msgstr "Приостановленная ВМ %s " - -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." -msgstr "Питание копии выключено и следовательно она не может быть приостановлена." +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 -#, python-format -msgid "Resuming the VM %s" -msgstr "Возобновление работы ВМ %s" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 -#, python-format -msgid "Resumed the VM %s " -msgstr "Работа ВМ %s возобновлена " +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" -msgstr "копия не в приостановленном состоянии" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 -#, python-format -msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 -#, python-format -msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +#: cinder/volume/drivers/solidfire.py:665 +#, fuzzy +msgid "Updating cluster status info" +msgstr "Обновление состояния узла" + +#: cinder/volume/drivers/solidfire.py:673 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "Невозможно получить обновлённое состояние: %s" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 -#, python-format -msgid "Creating directory with path %s" -msgstr "Создание папки с адресом %s" +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Created directory with path %s" -msgstr "Создана папка с адресом %s" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Downloading image %s from glance image server" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/zadara.py:357 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, fuzzy, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "Том %(volume_id)s не найден." + +#: cinder/volume/drivers/zadara.py:438 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Getting image size for the image %s" -msgstr "Получение размера образа для образа %s" +msgid "Delete snapshot: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/zadara.py:464 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" -msgstr "Принят размер образа %(size)s для образа %(image)s" - -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/zadara.py:472 #, python-format -msgid "xenapi.fake does not have an implementation for %s" -msgstr "xenapi.fake не имеет реализации для %s" +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "Calling %(localname)s %(impl)s" -msgstr "Выполняется вызов %(localname)s %(impl)s" +msgid "Creating volume from snapshot: %s" +msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Calling getter %s" -msgstr "Выполняется вызов getter %s" +msgid "Snapshot %(name)s not found" +msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "Attach properties: %(properties)s" msgstr "" -"xenapi.fake не имеет реализации для %s или был вызван с использованием " -"неправильным числом аргументов" -#: cinder/virt/xenapi/host.py:67 -#, python-format +#: cinder/volume/drivers/emc/emc_smis_common.py:40 msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/xenapi/host.py:137 -#, python-format -msgid "Unable to get SR for this host: %s" -msgstr "Невозможно принять SR для этого узла: %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" -#: cinder/virt/xenapi/host.py:169 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, fuzzy, python-format -msgid "Unable to get updated status" -msgstr "Невозможно получить обновлённое состояние: %s" - -#: cinder/virt/xenapi/host.py:172 -#, python-format -msgid "The call to %(method)s returned an error: %(e)s." -msgstr "Вызов %(method)s возвратил ошибку: %(e)s." +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "том %(vol_name)s: создание lv объёмом %(vol_size)sG" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Found non-unique network for name_label %s" -msgstr "Найдена не уникальная сеть для name_label %s" +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format -msgid "Found non-unique network for bridge %s" -msgstr "Найдена не уникальная сеть для моста %s" +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "Found no network for bridge %s" -msgstr "Не найдена сеть для моста %s" +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/xenapi/pool.py:146 -#, fuzzy, python-format -msgid "Unable to join %(host)s in the pool" -msgstr "Невозможно найти узел для копии %s" - -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "Невозможно использовать глобальные полномочия %(role_id)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +#, fuzzy +msgid "Entering create_volume_from_snapshot." +msgstr "Создать том из снимка %s" -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format -msgid "Found no PIF for device %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -#, fuzzy, python-format -msgid "Created VM" -msgstr "_создать: %s" - -#: cinder/virt/xenapi/vm_utils.py:245 -#, python-format -msgid "VBD not found in instance %s" -msgstr "VBD не найдено для копии %s" - -#: cinder/virt/xenapi/vm_utils.py:262 -#, fuzzy, python-format -msgid "VBD %s already detached" -msgstr "группа %s уже существует" - -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format -msgid "Unable to unplug VBD %s" -msgstr "Невозможно отсоединить VBD %s" - -#: cinder/virt/xenapi/vm_utils.py:275 -#, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format -msgid "Unable to destroy VBD %s" -msgstr "Невозможно ликвидировать VBD %s" - -#: cinder/virt/xenapi/vm_utils.py:305 -#, fuzzy, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -msgstr "Создана VBD %(vbd_ref)s для ВМ %(vm_ref)s, VDI %(vdi_ref)s." +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." -msgstr "Создана VBD %(vbd_ref)s для ВМ %(vm_ref)s, VDI %(vdi_ref)s." +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "Unable to destroy VDI %s" -msgstr "Невозможно ликвидировать VDI %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -"Создан VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) " -"на %(sr_ref)s." -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 -#, fuzzy, python-format -msgid "No primary VDI found for %(vm_ref)s" -msgstr "Отсутствует первичный VDI для%(vm_ref)s" - -#: cinder/virt/xenapi/vm_utils.py:379 -#, fuzzy, python-format -msgid "Snapshotting with label '%(label)s'" -msgstr "Снимок ВМ %(vm_ref)s с меткой '%(label)s'..." - -#: cinder/virt/xenapi/vm_utils.py:392 -#, fuzzy, python-format -msgid "Created snapshot %(template_vm_ref)s" -msgstr "Создан снимок %(template_vm_ref)s из ВМ %(vm_ref)s." +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" -msgstr "Запрос xapi для выгрузки %(vdi_uuids)s в качестве ID %(image_id)s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "download_vhd failed: %r" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" -msgstr "Запрос xapi на приём образа vhd %(image)s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -"xapi 'download_vhd' возвратил VDI типа '%(vdi_type)s' с UUID " -"'%(vdi_uuid)s'" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" -msgstr "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" -msgstr "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -"Размер образа %(size_bytes)d превышает допустимый instance_type размер " -"%(allowed_size_bytes)d" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, fuzzy, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" -msgstr "Получение образа %(image)s" +msgid "Delete Volume: %(volume)s" +msgstr "Удалить том с идентификатором: %s" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -"Превышен размер ядра/Ramdisk образа: %(vdi_size)d байт, макс. " -"%(max_size)d байт" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" -msgstr "Копирование VDI %s в /boot/guest на dom0" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" -msgstr "Ядро/Ramdisk VDI %s ликвидирован" - -#: cinder/virt/xenapi/vm_utils.py:895 -#, fuzzy -msgid "Failed to fetch glance image" -msgstr "Ошибка перезагрузки копии" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" -msgstr "Обнаружен %(image_type_str)s формат для образа %(image_ref)s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "Поиск vdi %s для ядра PV" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 -#, python-format -msgid "Unknown image format %(disk_image_type)s" -msgstr "Неизвестный формат образа %(disk_image_type)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 -#, python-format -msgid "VDI %s is still available" -msgstr "VDI %s до сих пор доступен" +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, fuzzy, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Создать снимок тома %s" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 -#, python-format -msgid "Re-scanning SR %s" -msgstr "Повторная проверка SR %s" - -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" -msgstr "" - -#: cinder/virt/xenapi/vm_utils.py:1154 msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" -msgstr "Невозможно найти SR типа содержимого ISO" +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, fuzzy, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "Создать снимок тома %s" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" -msgstr "ISO: поиск SR %(sr_rec)s" - -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" -msgstr "ISO: не содержимое iso типа" - -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" -msgstr "" - -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, fuzzy, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "Создать снимок тома %s" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" -msgstr "ISO: SR с локальной PBD" - -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "Повторное экспортирование %s томов" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "Присоединить том %(volume_id)s к копии %(server_id)s на %(device)s" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 -#, python-format -msgid "Timeout waiting for device %s to be created" -msgstr "Время ожидания при создании устройства %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Ошибка запуска xvp: %s" -#: cinder/virt/xenapi/vm_utils.py:1473 -#, python-format -msgid "Plugging VBD %s ... " -msgstr "Подсоединение VBD %s ... " +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "том %s: удаление завершено" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Plugging VBD %s done." -msgstr "Подсоединение VBD %s выполнено." +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" -msgstr "VBD %(vbd_ref)s подсоединено как %(orig_dev)s" +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "том %s: удаление завершено" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -"VBD %(vbd_ref)s подсоединено в неправильный dev, изменение назначения на " -"%(dev)s" -#: cinder/virt/xenapi/vm_utils.py:1490 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Destroying VBD for VDI %s ... " -msgstr "Ликвидирование VBD для VDI %s ... " +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 -#, python-format -msgid "Destroying VBD for VDI %s done." -msgstr "Ликвидирование VBD для VDI %s завершено." +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "том %s: удаление завершено" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format -msgid "Running pygrub against %s" +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Found Xen kernel %s" -msgstr "Найдено ядро Xen %s" - -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." -msgstr "Ядро Xen не найдено. Загрузка HVM." - -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" -msgstr "Разделы:" +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 -#, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" -msgstr " %(num)s: %(ptype)s %(size)d секторов" +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "том %s: удаление завершено" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "Map volume: %(volume)s" msgstr "" -"Запись таблицы разделов %(primary_first)d %(primary_last)d в " -"%(dev_path)s..." -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "Writing partition table %s done." -msgstr "Запись таблицы разделов %s выполнена." +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 -msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" -msgstr "Средства XenServer не установлены в этот образ" - -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Found Storage Type: %s" msgstr "" -"Ошибка присоединения файловой системы (ожидаемо для копий не на базе " -"linux): %s" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 -#, fuzzy, python-format -msgid "Updating progress to %(progress)d" -msgstr "Обновление выполнения копии '%(instance_uuid)s' до %(progress)d" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +#, fuzzy +msgid "Storage type not found." +msgstr "образ не найден." -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "Found Masking View: %s" msgstr "" -"Предпринята попытка включения несуществующей копии, копии с неполадками с" -" идентификатором %s" - -#: cinder/virt/xenapi/vmops.py:233 -#, fuzzy, python-format -msgid "Starting instance" -msgstr "Запуск копии %s" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" -msgstr "Удаление ядра/ramdisk файлов из dom0" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +#, fuzzy +msgid "Masking View not found." +msgstr "образ не найден." -#: cinder/virt/xenapi/vmops.py:358 +#: cinder/volume/drivers/emc/emc_smis_common.py:928 #, fuzzy -msgid "Failed to spawn, rolling back" -msgstr "Ошибка обновления тома в базе данных" +msgid "Ecom user not found." +msgstr "Сервер не найден." -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" -msgstr "" - -#: cinder/virt/xenapi/vmops.py:462 -#, fuzzy, python-format -msgid "Auto configuring disk, attempting to resize partition..." -msgstr "" -"Автоматическая настройка диска для копии %(instance_uuid)s, выполняется " -"попытка изменения размера раздела..." - -#: cinder/virt/xenapi/vmops.py:515 -#, fuzzy, python-format -msgid "Invalid value for injected_files: %r" -msgstr "Недопустимое значение для injected_files: '%s'" - -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "Injecting file path: '%s'" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" -msgstr "Настройка пароля администратора" +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +#, fuzzy +msgid "Ecom server not found." +msgstr "Сервер не найден." -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" -msgstr "Восстановление сети" +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Переподлючено к очереди" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -#, fuzzy -msgid "Starting VM" -msgstr "Выполнение перезагрузки xvp" - -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" -msgstr "Запрос версии агента" +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, fuzzy, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "Полномочия %(role_id)s не могут быть найдены." -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Instance agent version: %s" -msgstr "Версия агента копии: %s" +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Updating Agent to %s" -msgstr "Обновление агента до %s" +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "No opaque_ref could be determined for '%s'." -msgstr "opaque_ref не может быть определён для '%s'." - -#: cinder/virt/xenapi/vmops.py:670 -#, fuzzy, python-format -msgid "Finished snapshot and upload for VM" -msgstr "Готовый снимок и выгрузка для ВМ %s" +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:677 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, fuzzy, python-format -msgid "Starting snapshot for VM" -msgstr "Запуск снимка для ВМ %s" +msgid "Volume %(volumename)s not found on the array." +msgstr "Том %(volume_id)s не найден." -#: cinder/virt/xenapi/vmops.py:686 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "Невозможно найти узел для копии %s" +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "Не найден том для копии %(instance_id)s." -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" -msgstr "Ошибка перемещения vhd на новый узел" +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" -msgstr "Изменение размера VDI %(cow_uuid)s с %(old_gb)dГБ до %(new_gb)dГБ" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" -#: cinder/virt/xenapi/vmops.py:893 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" -msgstr "Изменение размера VDI %(vdi_uuid)s с %(old_gb)dГБ до %(new_gb)dГБ" +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:901 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, fuzzy, python-format -msgid "Resize complete" -msgstr "Изменение размера копии %s завершено" +msgid "Error finding %s." +msgstr "Ошибка поиска vdis в SR %s" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "Failed to query agent version: %(resp)r" -msgstr "Ошибка запроса версии агента: %(resp)r" +msgid "Found %(name)s: %(initiator)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "Failed to update agent: %(resp)r" -msgstr "Ошибка обновления агента: %(resp)r" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "Failed to exchange keys: %(resp)r" -msgstr "Ошибка обмена ключей: %(resp)r" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format -msgid "Failed to update password: %(resp)r" -msgstr "Ошибка обновления пароля: %(resp)r" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" -msgstr "Невозможно найти VBD для ВМ" - -#: cinder/virt/xenapi/vmops.py:1097 -#, fuzzy, python-format -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -"Копия %(instance_uuid)s использует RAW или VHD, пропуск ядра и удаление " -"ramdisk" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" -msgstr "копия содержит ядро или ramdisk, но не оба" +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" -msgstr "файлы ядра/ramdisk удалены" +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -#, fuzzy -msgid "Destroying VM" -msgstr "Выполнение перезагрузки xvp" +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." -msgstr "ВМ не предоставлена, пропуск выполнения ликвидации..." +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format -msgid "Instance is already in Rescue Mode: %s" -msgstr "Копия в состоянии режима восстановления: %s" +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 -#, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -#, fuzzy, python-format -msgid "Automatically hard rebooting" -msgstr "Автоматическая безотказная перезагрузка %d" +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 -#, fuzzy, python-format -msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" -msgstr "Завершение работы ВМ для копии %(instance_uuid)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 -#, fuzzy, python-format -msgid "Instance %(instance_uuid)s not found" -msgstr "Копия %(instance_id)s не найдена" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" -#: cinder/virt/xenapi/vmops.py:1383 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 #, fuzzy, python-format -msgid "In ERROR state" -msgstr "Ошибка БД: %s" +msgid "Cannot find device number for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "Found iSCSI endpoint: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." -msgstr "Невозможно получить сведения о пропускной способности." +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -#, fuzzy -msgid "Injecting network info to xenstore" -msgstr "установка сетевого узла" +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -#, fuzzy -msgid "Creating vifs" -msgstr "Создание изображения" +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 -#, fuzzy, python-format -msgid "Creating VIF for network %(network_ref)s" -msgstr "Создание VIF для ВМ %(vm_ref)s, сеть %(network_ref)s." +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, fuzzy, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" -msgstr "Создание VIF для ВМ %(vm_ref)s, сеть %(network_ref)s." +msgid "XML exception reading parameter: %s" +msgstr "Расширенный ресурс: %s" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 -#, fuzzy, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -"Вызов агентом %(method)s возвратил недопустимый ответ: %(ret)r. VM " -"id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/hds/hds.py:250 #, fuzzy, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "HDP not found: %s" +msgstr "Узел не найден" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" msgstr "" -"ВРЕМЯ ПРОСТОЯ: Срок вызова %(method)s истёк. VM id=%(instance_uuid)s; " -"args=%(args)r" -#: cinder/virt/xenapi/vmops.py:1570 -#, fuzzy, python-format -msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -"НЕ РЕАЛИЗОВАНО: Вызов %(method)s не поддерживается агентом. VM " -"id=%(instance_uuid)s; args=%(args)r" -#: cinder/virt/xenapi/vmops.py:1575 -#, fuzzy, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" -msgstr "Вызов %(method)s возвратил ошибку: %(e)s." +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 +#: cinder/volume/drivers/hds/hds.py:372 #, python-format -msgid "OpenSSL error: %s" -msgstr "Ошибка OpenSSL: %s" +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" -msgstr "создание sr в volume_utils" +#: cinder/volume/drivers/hds/hds.py:395 +#, fuzzy, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "Удалить том с идентификатором: %s" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/hds/hds.py:480 #, python-format -msgid "type is = %s" -msgstr "тип = %s" +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "name = %s" -msgstr "наименование = %s" +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "Created %(label)s as %(sr_ref)s." -msgstr "Создано %(label)s как %(sr_ref)s." +msgid "LUN %s is deleted." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" -msgstr "Невозможно создать репозиторий хранилища" +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" -msgstr "внедрение sr в volume_utils" +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." -msgstr "внедрён %(label)s в качестве %(sr_ref)s." +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" -msgstr "Создание pbd для SR" +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" -msgstr "Подключение SR" +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" -msgstr "Невозможно внедрить Репозиторий хранения" +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" -msgstr "Невозможно получить SR с использованием uuid" +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Forgetting SR %s..." -msgstr "Забывание SR %s..." +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" -msgstr "Невозможно забыть Репозиторий хранения" +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Introducing %s..." -msgstr "Внедрение %s..." +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "Unable to find SR from VBD %s" -msgstr "Невозможно найти SR из VBD %s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" -msgstr "Пропуск исключения %(exc)s при получении PBD для %(sr_ref)s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" -msgstr "Пропуск исключения %(exc)s при отсоединении PBD %(pbd)s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Unable to introduce VDI on SR %s" -msgstr "Невозможно внедрить VDI на SR %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Unable to get record of VDI %s on" -msgstr "Невозможно получить запись VDI %s на" +msgid "parse_xml_file: %s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/huawei/huawei_utils.py:129 #, python-format -msgid "Unable to introduce VDI for SR %s" -msgstr "Невозможно внедрить VDI для SR %s" +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/huawei/rest_common.py:59 #, python-format -msgid "Error finding vdis in SR %s" -msgstr "Ошибка поиска vdis в SR %s" +msgid "HVS Request URL: %(url)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid "Unable to find vbd for vdi %s" -msgstr "Ошибка поиска vbd для vdi %s" +msgid "HVS Request Data: %(data)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" -msgstr "Невозможно получить сведения назначения %(data)s, %(mountpoint)s" +msgid "HVS Response Data: %(res)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "Mountpoint cannot be translated: %s" -msgstr "Точка подключения не может быть переведена: %s" +msgid "Bad response from server: %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "Creating SR %s" -msgstr "Создание SR %s" - -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" -msgstr "Невозможно создать SR" +msgid "Login error, reason is %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" -msgstr "Невозможно получить запись SR" +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/huawei/rest_common.py:173 #, python-format -msgid "Introducing SR %s" -msgstr "Внедрение SR %s" +msgid "%s \"data\" was not in result." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" -msgstr "SR найдено в базе данных xapi. Нет необходимости во внедрении" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" -msgstr "Невозможно внедрить SR" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Checking for SR %s" -msgstr "Проверка SR %s" +msgid "Invalid resource pool: %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "SR %s not found in the xapi database" -msgstr "SR %s не найден в базе данных xapi" - -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" -msgstr "Невозможно забыть SR" +msgid "Get pool info error, pool name is:%s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 +#: cinder/volume/drivers/huawei/rest_common.py:354 #, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" -msgstr "Невозможно создать VDI на SR %(sr_ref)s для копии %(instance_name)s" +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/huawei/rest_common.py:474 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "Невозможно использовать SR %(sr_ref)s для копии %(instance_name)s" +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "Невозможно присоединить том для копии %s" +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/huawei/rest_common.py:527 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -msgstr "Точка подключения %(mountpoint)s присоединена к копии %(instance_name)s" +msgid "host lun id is %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/huawei/rest_common.py:553 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Unable to locate volume %s" -msgstr "Невозможно найти том %s" +msgid "the fc server properties is:%s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Unable to detach volume %s" -msgstr "Невозможно отсоединить том %s" +msgid "JSON transfer data error. %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/huawei/rest_common.py:874 #, python-format -msgid "Unable to destroy vbd %s" -msgstr "Невозможно ликвидировать vbd %s" +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/huawei/rest_common.py:937 #, python-format -msgid "Error purging SR %s" -msgstr "Ошибка очистки SR %s" +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/huawei/rest_common.py:964 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" -msgstr "Точка подключения %(mountpoint)s отсоединена от копии %(instance_name)s" +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 -#, python-format -msgid "Error in handshake: %s" -msgstr "Ошибка в соглашении: %s" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format -msgid "Invalid request: %s" -msgstr "Недопустимый запрос: %s" +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "Request: %s" -msgstr "Запрос: %s" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/huawei/rest_common.py:1101 #, python-format -msgid "Request made with missing token: %s" -msgstr "Запрос сделан с отсутствующим токеном: %s" +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/huawei/rest_common.py:1124 #, python-format -msgid "Request made with invalid token: %s" -msgstr "Запрос сделан с недопустимым токеном: %s" +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Unexpected error: %s" -msgstr "Непредвиденная ошибка: %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" -msgstr "Запуск узла сети cinder-xvpvncproxy (версия %s)" - -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +msgid "The config parameters are: %s" msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" -msgstr "Превышена квота для %(pid)s, попытка создания тома %(size)sG" +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "Состояние тома должно быть доступно" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/huawei/rest_common.py:1256 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/volume/api.py:325 -#, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "Состояние тома должно быть доступно" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "Восстановление после недопустимого выполнения. Попытка номер %s" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "volume group %s doesn't exist" -msgstr "том группы %s не существует" +msgid "_get_login_info: %s" +msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/huawei/ssh_common.py:421 #, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "Невозможно найти экспортирование iSCSI для тома %s" +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/huawei/ssh_common.py:436 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "CLI command: %s" msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/huawei/ssh_common.py:466 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -"Невозможно подтвердить идентификатора экспортированного " -"тома:%(volume_id)s." -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/huawei/ssh_common.py:501 #, python-format -msgid "FAKE ISCSI: %s" +msgid "_execute_cli: %s" msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/huawei/ssh_common.py:511 #, python-format -msgid "rbd has no pool %s" -msgstr "rbd не имеет пула %s" +msgid "delete_volume: volume name: %s" +msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/huawei/ssh_common.py:516 #, python-format -msgid "Sheepdog is not working: %s" -msgstr "Sheepdog не выполняется: %s" +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" -msgstr "Sheepdog не выполняется" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/huawei/ssh_common.py:580 #, python-format -msgid "LoggingVolumeDriver: %s" -msgstr "LoggingVolumeDriver: %s" +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Re-exporting %s volumes" -msgstr "Повторное экспортирование %s томов" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "volume %s: skipping export" -msgstr "том %s: пропуск экспортирования" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/huawei/ssh_common.py:697 #, python-format -msgid "volume %s: creating" -msgstr "том %s: создание" +msgid "Source volume %(name)s does not exist." +msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" -msgstr "том %(vol_name)s: создание lv объёмом %(vol_size)sG" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "volume %s: creating export" -msgstr "том %s: создание экспортирования" +msgid "extend_volume: volume %s does not exist." +msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "volume %s: created successfully" -msgstr "том %s: создание завершено" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" -msgstr "Том до сих пор присоединён" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "volume %s: removing export" -msgstr "том %s: удаление экспортирования" +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/huawei/ssh_common.py:865 #, python-format -msgid "volume %s: deleting" -msgstr "том %s: удаление" +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/huawei/ssh_common.py:873 #, python-format -msgid "volume %s: volume is busy" -msgstr "том %s: том занят" +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/huawei/ssh_common.py:916 #, python-format -msgid "volume %s: deleted successfully" -msgstr "том %s: удаление завершено" +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/huawei/ssh_common.py:933 #, python-format -msgid "snapshot %s: creating" -msgstr "снимок %s: создание" +msgid "map_volume: Volume %s was not found." +msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/huawei/ssh_common.py:1079 #, python-format -msgid "snapshot %(snap_name)s: creating" -msgstr "снимок %(snap_name)s: создание" +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/huawei/ssh_common.py:1102 #, python-format -msgid "snapshot %s: created successfully" -msgstr "снимок %s: создание завершено" +msgid "remove_map: Host %s does not exist." +msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "snapshot %s: deleting" -msgstr "снимок %s: удаление" +msgid "remove_map: Volume %s does not exist." +msgstr "" -#: cinder/volume/manager.py:214 -#, fuzzy, python-format -msgid "snapshot %s: snapshot is busy" -msgstr "снимок %s: создание завершено" +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format -msgid "snapshot %s: deleted successfully" -msgstr "снимок %s: удаление выполнено" +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" -msgstr "Проверка возможностей тома" +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "New capabilities found: %s" -msgstr "Обнаружены новые возможности: %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" -msgstr "Очистить возможности" +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/huawei/ssh_common.py:1413 #, python-format -msgid "Notification {%s} received" -msgstr "Принято уведомление {%s}" +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" -#: cinder/volume/netapp.py:79 -#, fuzzy, python-format -msgid "API %(name)sfailed: %(reason)s" -msgstr "Образ %(image_id)s недопустим: %(reason)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format msgid "%s is not set" msgstr "" -#: cinder/volume/netapp.py:128 -#, fuzzy -msgid "Connected to DFM server" -msgstr "Переподлючено к очереди" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Job failed: %s" +msgid "ensure_export: Volume %s not found on storage" msgstr "" -#: cinder/volume/netapp.py:240 -#, fuzzy -msgid "Failed to provision dataset member" -msgstr "Ошибка обновления базы данных" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 -#, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "Ошибка поиска vbd для vdi %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, fuzzy, python-format -msgid "No LUN ID for volume %s" -msgstr "Невозможно найти экспортирование iSCSI для тома %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, fuzzy, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "Ошибка получения метаданных для ip: %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" -#: cinder/volume/netapp.py:614 -#, fuzzy, python-format -msgid "Failed to get host details for host ID %s" -msgstr "Ошибка получения метаданных для ip: %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" -#: cinder/volume/netapp.py:620 -#, fuzzy, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "Ошибка получения метаданных для ip: %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" -#: cinder/volume/netapp.py:625 -#, fuzzy, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "Ошибка получения метаданных для ip: %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "Задайте san_password или san_private_key" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "san_ip должен быть назначен" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" -#: cinder/volume/san.py:320 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 #, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "LUID не найден для %(zfs_poolname)s. Вывод=%(out)s" +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" -#: cinder/volume/san.py:452 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "CLIQ command returned %s" -msgstr "Возврат команды CLIQ %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" -#: cinder/volume/san.py:458 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -"Неправильный ответ на команду CLIQ %(verb)s %(cliq_args)s. " -"Результат=%(out)s" -#: cinder/volume/san.py:466 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "Ошибка выполнения команды CLIQ %(verb)s %(cliq_args)s. Результат=%(out)s" +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" -#: cinder/volume/san.py:496 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 #, python-format msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -"Непредвиденное количество виртуальных ip для кластера %(cluster_name)s. " -"Результат=%(_xml)s" -#: cinder/volume/san.py:549 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "Сведения о томе: %(volume_name)s => %(volume_attributes)s" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "local_path не поддерживается" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" -#: cinder/volume/san.py:626 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "Невозможно определить проект для тома %s, невозможно экспортировать" +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" -#: cinder/volume/san.py:696 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "Payload for SolidFire API call: %s" +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/volume/san.py:713 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Call to json.loads() raised an exception: %s" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/volume/san.py:718 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "Results of SolidFire API call: %s" +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"Команда: %(cmd)s\n" +"Код выхода: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "Выполняемые копии: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, fuzzy, python-format +msgid "Created LUN with name %s" +msgstr "Создана папка с адресом %s" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "снимок %s: удаление выполнено" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "Преобразование в необработанный, но текущий формат %s" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "том группы %s не существует" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, fuzzy, python-format +msgid "Sending JSON data: %s" +msgstr "Заданные данные: %s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "ответ %s" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "Возврат команды CLIQ %s" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" +"Неправильный ответ на команду CLIQ %(verb)s %(cliq_args)s. " +"Результат=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "Ошибка выполнения команды CLIQ %(verb)s %(cliq_args)s. Результат=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" +"Непредвиденное количество виртуальных ip для кластера %(cluster_name)s. " +"Результат=%(_xml)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "Сведения о томе: %(volume_name)s => %(volume_attributes)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "local_path не поддерживается" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "Задайте san_password или san_private_key" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "san_ip должен быть назначен" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "LUID не найден для %(zfs_poolname)s. Вывод=%(out)s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Ошибка в соглашении: %s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "группа %s уже существует" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "Создание SR %s" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "Состояние тома должно быть доступно" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "Ошибка БД: %s" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Неожиданная ошибка при выполнении команды." + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "iSCSI-устройство не найдено в %s" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "Исключение: %s" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "Ошибка удаления контейнера: %s" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "том %(vol_name)s: создание lv объёмом %(vol_size)sG" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "том %s: создание экспортирования" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "том %s: создание экспортирования" + +#~ msgid "volume %s: creating from image" +#~ msgstr "том %s: создание" + +#~ msgid "volume %s: creating" +#~ msgstr "том %s: создание" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "том %s: создание экспортирования" + +#~ msgid "volume %s: create failed" +#~ msgstr "том %s: создание" + +#~ msgid "volume %s: created successfully" +#~ msgstr "том %s: создание завершено" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "Получение %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "Выполняется %(name)s на %(host)s:%(port)s" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "Невозможно отсоединить том %s" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "Невозможно найти том %s" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "снимок %(snap_name)s: создание" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "Невозможно найти адрес %r" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "Ошибка перемещения" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "Заданные данные: %s" + +#~ msgid "Configure response : %s" +#~ msgstr "ответ %s" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "Невозможно найти том %s" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "Невозможно найти том %s" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "Ошибка перезагрузки копии" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "снимок %(snap_name)s: создание" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "Создать том из снимка %s" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "том группы %s не существует" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Неожиданная ошибка при выполнении команды." + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "Сервер возвратил ошибку: %s" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "Сбой соединения с glance" + +#~ msgid "Invalid snapshot" +#~ msgstr "Недопустимый снимок" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "Недопустимый тип тома" + +#~ msgid "Invalid volume" +#~ msgstr "Недопустимый том" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "Недопустимый снимок" + +#~ msgid "Invalid metadata" +#~ msgstr "Недопустимые метаданные" + +#~ msgid "Invalid metadata size" +#~ msgstr "Неправильный ключ метаданных" + +#~ msgid "Migration error" +#~ msgstr "Ошибка перемещения" + +#~ msgid "Quota exceeded" +#~ msgstr "Превышена квота" + +#~ msgid "Connection to swift failed" +#~ msgstr "Сбой соединения с glance" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "отладка в обратном вызове: %s" + +#~ msgid "Expected object of type: %s" +#~ msgstr "Ожидался объект типа: %s" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "timefunc: '%(name)s' заняла %(total_time).2f с." + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "том группы %s не существует" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "Узел не найден" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "Создать снимок тома %s" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "_удалить: %s" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "Ошибка создания sr %s...продолжение" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "Ошибка создания" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "Ошибка обновления базы данных" +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "Ошибка внедрения sr %s...продолжение" +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "XenSMDriver необходимо xenapi соединение" +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "Ошибка начала сеанса" +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "Том будет создан во внутреннем интерфейсе - %d" +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "Ошибка обновления тома в базе данных" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "Невозможно создать том" +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "Ошибка удаления vdi" +#~ msgid "The request is invalid." +#~ msgstr "Недопустимый запрос." -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "Ошибка удаления тома в базе данных" +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "Том %(volume_id)s не найден." -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "Ошибка поиска тома в базе данных" +#~ msgid "No disk at %(location)s" +#~ msgstr "Отсутствует диск в %(location)s" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "Ошибка поиска внутреннего интерфейса в базе данных" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "Класс %(class_name)s не найден: %(exception)s" -#: cinder/volume/nexenta/__init__.py:27 -#, fuzzy, python-format -msgid "Nexenta SA returned the error" -msgstr "Сервер возвратил ошибку: %s" +#~ msgid "Action not allowed." +#~ msgstr "Действие не разрешено." -#: cinder/volume/nexenta/jsonrpc.py:64 -#, fuzzy, python-format -msgid "Sending JSON data: %s" -msgstr "Заданные данные: %s" +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, fuzzy, python-format -msgid "Got response: %s" -msgstr "ответ %s" +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, fuzzy, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "том группы %s не существует" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "do_setup." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "Удалить том с идентификатором: %s" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." -#~ msgstr "Параметры доступы Cinder не заданы." +#~ msgid "restore finished." +#~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." -#~ msgstr "Виртуальный массив для хранения данных %(id)d не найден." +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." -#~ msgstr "Виртуальный массив для хранения данных %(name)s не найден." +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "No body provided" -#~ msgstr "Тело не предоставлено" +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" -#~ msgstr "Создать VSA %(display_name)s типа %(vc_type)s" +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" -#~ msgid "Delete VSA with id: %s" -#~ msgstr "Удалить VSA с идентификатором: %s" +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" -#~ msgstr "Ассоциировать адрес %(ip)s с VSA %(id)s" +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" -#~ msgstr "Исключить адрес для VSA %(id)s" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" -#~ msgstr "%(obj)s с идентификатором %(id)s не найден" +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ "%(obj)s с идентификатором %(id)s относится " -#~ "к VSA %(own_vsa_id)s и не VSA " -#~ "%(vsa_id)s." -#~ msgid "Index. vsa_id=%(vsa_id)s" -#~ msgstr "Index. vsa_id=%(vsa_id)s" +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" -#~ msgstr "Создать том %(size)s ГБ из VSA ID %(vsa_id)s" +#~ msgid "Create snapshot error." +#~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" -#~ msgstr "Обновить %(obj)s с идентификатором: %(id)s, изменения: %(changes)s" +#~ msgid "Create luncopy error." +#~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" -#~ msgstr "Принудительное удаление вм %(instance_uuid)s, даже если она удалена" +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ "Копия %(instance_uuid)s не существует в " -#~ "БД, но будет выполнено выключение " -#~ "используя особый контекст" -#~ msgid "trying to destroy already destroyed instance: %s" -#~ msgstr "попытка ликвидации уже ликвидированной копии: %s" +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "Can't downgrade without losing data" -#~ msgstr "Невозможно перейти на предыдущую версию без потери данных" +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ "Сервер AMQP на %(fl_host)s:%(fl_port)d " -#~ "недоступен: %(e)s. Очередная попытка через " -#~ "%(fl_intv)d секунд." -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ "Невозможно подключиться к серверу AMQP " -#~ "после выполнения %(tries)d попыток. Завершение" -#~ " работы." -#~ msgid "Failed to fetch message from queue: %s" -#~ msgstr "Ошибка получения сообщения из очереди: %s" +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "topic is %s" -#~ msgstr "тема %s" +#~ msgid "_read_xml:%s" +#~ msgstr "_создать: %s" -#~ msgid "message %s" -#~ msgstr "сообщение %s" +#~ msgid "request ip info is %s." +#~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" -#~ msgstr "_filter_hosts: %(request_spec)s" +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" -#~ msgstr "На узле %s недостаточно свободного места. Пропуск" +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" -#~ msgid "Filter hosts: %s" -#~ msgstr "Фильтр узлов: %s" +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "zfs send/recv done, new volume %s created" #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" -#~ msgstr "Выбрано макс. количество узлов (%d)" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" -#~ msgstr "Том обеспечения %(name)s размером %(size)s ГБ на узле %(host)s" +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" -#~ msgstr "%(i)d: Том %(name)s" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "Error creating volumes" -#~ msgstr "Ошибка создания томов" +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" -#~ msgid "Non-VSA volume %d" -#~ msgstr "Том не-VSA %d" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "Error creating volume" -#~ msgstr "Ошибка создания тома" +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" -#~ msgstr "Возможность для тома, объёмом %(size)s, не выбрана" +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" -#~ msgid "Host %s:" -#~ msgstr "Узел %s:" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ "\tДиск %(qosgrp)-25s: всего %(total)2s, " -#~ "занято %(used)2s, свободно %(free)2s. " -#~ "Доступный объём %(avail)-5s" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" -#~ msgstr "\t LeastUsedHost: Наилучший узел: %(best_host)s. (занято %(min_used)s)" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ "\t MostAvailCap: Наилучший узел: " -#~ "%(best_host)s. (доступно %(max_avail)s %(type_str)s)" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Publishing to route %s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Declaring queue %s" -#~ msgstr "Объявление очереди %s" +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" -#~ msgid "Declaring exchange %s" -#~ msgstr "Объявление точки обмена %s" +#~ msgid "Invalid request body" +#~ msgstr "Недопустимый запрос тела" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" -#~ msgstr "Получение из %(queue)s: %(message)s" +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "Test: user_data = %s" -#~ msgstr "Тест: user_data = %s" +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" -#~ msgid "_create: param=%s" -#~ msgstr "_create: param=%s" +#~ msgid "Expected volume result not found" +#~ msgstr "" -#~ msgid "Host %s" -#~ msgstr "Узел %s" +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" -#~ msgstr "Проверка: обеспечение vol %(name)s на узле %(host)s" +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" -#~ msgid "\t vol=%(vol)s" -#~ msgstr "\t vol=%(vol)s" +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" -#~ msgstr "Тест: запрос обновления VSA: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" -#~ msgid "Test: Volume create: %s" -#~ msgstr "Тест: Создание тома: %s" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" -#~ msgstr "Тест: Том получает запрос: id=%(volume_id)s" +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" -#~ msgstr "Тест: Запрос обновления тома: id=%(volume_id)s значения=%(values)s" +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" -#~ msgstr "Тест: Том получает: id=%(volume_id)s" +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" -#~ msgstr "Состояние задачи [%(name)s] %(task)s: готово %(result)s" +#~ msgid "Clear capabilities" +#~ msgstr "Очистить возможности" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" -#~ msgstr "Состояние задачи [%(name)s] %(task)s: %(status)s %(error_info)s" +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "Can't find lun or lun goup in array" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "Unable to read LXC console" -#~ msgstr "Невозможно прочитать консоль LXC" +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "in looping call" #~ msgstr "" -#~ "в xml...\n" -#~ ":%s " -#~ msgid "During wait running, %s disappeared." +#~ msgid "Is the appropriate service running?" +#~ msgstr "Выполняется ли соответствующая служба?" + +#~ msgid "Could not find another host" +#~ msgstr "Невозможно найти другой compute" + +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "migrate_volume: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "Перемещение %(migration_id)s не найдено." + +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "Копия %(instance_id)s не найдена." + +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "Created VM %s..." -#~ msgstr "Созданная ВМ %s..." +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." -#~ msgstr "Созданная ВМ %(instance_name)s как %(vm_ref)s." +#~ msgid "_wait_child %d" +#~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -#~ msgstr "Создание особого CDROM VBD для ВМ %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "wait wrap.failed %s" +#~ msgstr "" #~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ "Создана VBD на основе CDROM %(vbd_ref)s" -#~ " для ВМ %(vm_ref)s, VDI %(vdi_ref)s." -#~ msgid "Image Type: %s" -#~ msgstr "Тип образа: %s" +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" -#~ msgstr "ISO: Найден sr, возможно содержащий образ ISO" +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" -#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " -#~ msgstr "Создание VBD для VDI %s ... " +#~ msgid "Get code level failed" +#~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." -#~ msgstr "Создание VBD для VDI %s выполнено." +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" -#~ msgstr "Пропуск XenAPI.Failure в VBD.unplug: %s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" -#~ msgstr "Пропуск XenAPI.Failure %s" +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" -#~ msgid "instance %s: Failed to spawn" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Starting VM %s..." -#~ msgstr "Запуск ВМ %s..." +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "Перевод узла %(host)s в %(state)s." + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "удаление тома %(volume_name)s, который имеет снимок" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" -#~ msgstr "Копия %s: ожидание запуска" +#~ msgid "volume %s mapping to multi host" +#~ msgstr "том %s: пропуск экспортирования" -#~ msgid "Resources to remove:%s" -#~ msgstr "Ресурсы для удаления:%s" +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" -#~ msgstr "Пропуск ликвидации VDI для %s" +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" -#~ msgstr "Ликвидирование VDI для копии %(instance_uuid)s" +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" -#~ msgstr "Копия %(instance_uuid)s ВМ ликвидирована" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" -#~ msgstr "Ликвидирование ВМ для копии %(instance_uuid)s" +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" -#~ msgstr "создание vif(s) для вм: |%s|" +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." -#~ msgstr "Создан VIF %(vif_ref)s для ВМ %(vm_ref)s, сеть %(network_ref)s." +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ "Вызов %(method)s возвратил ошибку: %(e)s. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -#~ msgstr "Создание VBD для ВМ %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" -#~ msgid "Error destroying VDI" -#~ msgstr "Ошибка ликвидации VDI" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" -#~ msgstr "\tТом %s не является VSA томом" +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "_создать: %s" -#~ msgid "\tFE VSA Volume %s creation - do nothing" -#~ msgstr "\tСоздание тома FE VSA %s - ничего не выполнять" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" -#~ msgstr "Ошибка VSA BE create_volume для %s" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" -#~ msgstr "VSA BE create_volume для %s выполнено" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" -#~ msgstr "\tУдаление FE VSA тома %s - ничего не выполнять" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" -#~ msgstr "Ошибка VSA BE delete_volume для %s" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" -#~ msgstr "VSA BE delete_volume для %s выполнено" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" -#~ msgstr "\tТом FE VSA %s создать экспортирование - ничего не выполнять" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" -#~ msgstr "\tТом FE VSA %s удалить экспортирование - ничего не выполнять" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" -#~ msgstr "Ошибка приёма сведений QoS" +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" -#~ msgstr "*** Экспериментальный код VSA ***" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" -#~ msgstr "Запрошенное количество VC (%d) избыточно. Назначение по умолчанию" +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" -#~ msgid "Creating VSA: %s" -#~ msgstr "Создание VSA: %s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Создать " -#~ "том %(vol_name)s, %(vol_size)d ГБ, тип " -#~ "%(vol_type_id)s" - -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" -#~ msgstr "VSA ID %(vsa_id)d: Обновить состояние VSA на %(status)s" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" -#~ msgstr "VSA ID %(vsa_id)d: Обновить вызов VSA" - -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." -#~ msgstr "Добавление %(add_cnt)s VC в VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." -#~ msgstr "Удаление %(del_cnt)s VC из VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" -#~ msgstr "VSA ID %(vsa_id)s: Удаление %(direction)s тома %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" -#~ msgid "Unable to delete volume %s" -#~ msgstr "Невозможно удалить том %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ "VSA ID %(vsa_id)s: Принудительное удаление." -#~ " %(direction)s том %(vol_name)s" -#~ msgid "Going to try to terminate VSA ID %s" -#~ msgstr "Выполнение завершения работы VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" -#~ msgstr "VSA ID %(vsa_id)s: Удалить копию %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" -#~ msgid "Create call received for VSA %s" -#~ msgstr "Создать вызов полученный для VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" -#~ msgstr "Ошибка поиска VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" -#~ msgstr "VSA ID %(vsa_id)s: Создан накопитель %(vol_id)s. Состояние %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" -#~ msgstr "Накопитель %(vol_name)s (%(vol_disp_name)s) в фазе создания - подождите" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/ru_RU/LC_MESSAGES/cinder.po b/cinder/locale/ru_RU/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..1ad9e9a0fe --- /dev/null +++ b/cinder/locale/ru_RU/LC_MESSAGES/cinder.po @@ -0,0 +1,10737 @@ +# Russian (Russia) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Russian (Russia) " +"(http://www.transifex.com/projects/p/openstack/language/ru_RU/)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/sk/LC_MESSAGES/cinder.po b/cinder/locale/sk/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..0eac4886a2 --- /dev/null +++ b/cinder/locale/sk/LC_MESSAGES/cinder.po @@ -0,0 +1,10481 @@ +# Slovak translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-08-27 04:58+0000\n" +"Last-Translator: daisy.ycguo \n" +"Language-Team: Slovak " +"(http://www.transifex.com/projects/p/openstack/language/sk/)\n" +"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/sl_SI/LC_MESSAGES/cinder.po b/cinder/locale/sl_SI/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..430fb9f2e5 --- /dev/null +++ b/cinder/locale/sl_SI/LC_MESSAGES/cinder.po @@ -0,0 +1,10737 @@ +# Slovenian (Slovenia) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-07-01 16:14+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Slovenian (Slovenia) " +"(http://www.transifex.com/projects/p/openstack/language/sl_SI/)\n" +"Plural-Forms: nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 " +"|| n%100==4 ? 2 : 3)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/sv/LC_MESSAGES/cinder.po b/cinder/locale/sv/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..76677a9b8a --- /dev/null +++ b/cinder/locale/sv/LC_MESSAGES/cinder.po @@ -0,0 +1,10001 @@ +# Swedish translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-11-26 20:45+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Swedish " +"(http://www.transifex.com/projects/p/openstack/language/sv/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/sw_KE/LC_MESSAGES/cinder.po b/cinder/locale/sw_KE/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..f341f3a97d --- /dev/null +++ b/cinder/locale/sw_KE/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Swahili (Kenya) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Swahili (Kenya) " +"(http://www.transifex.com/projects/p/openstack/language/sw_KE/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/tl/LC_MESSAGES/cinder.po b/cinder/locale/tl/LC_MESSAGES/cinder.po index 6161a8c26d..5053a7e7b9 100644 --- a/cinder/locale/tl/LC_MESSAGES/cinder.po +++ b/cinder/locale/tl/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2011-08-23 11:21+0000\n" "Last-Translator: Thierry Carrez \n" "Language-Team: Tagalog \n" @@ -15,8186 +15,10722 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." msgstr "" -#: cinder/crypto.py:51 -msgid "Filename of private key" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -#: cinder/crypto.py:57 -msgid "Where we keep our keys" +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "Kailangan bang gumamit ng CA bawat proyekto?" - -#: cinder/crypto.py:67 +#: cinder/exception.py:120 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +msgid "Connection to glance failed: %(reason)s" msgstr "" -#: cinder/crypto.py:72 -#, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" msgstr "" -#: cinder/crypto.py:292 +#: cinder/exception.py:133 #, python-format -msgid "Flags path: %s" +msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." +#: cinder/exception.py:137 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "walang paraan para sa mensahe: %s" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" -#: cinder/exception.py:59 +#: cinder/exception.py:150 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +msgid "Invalid snapshot: %(reason)s" msgstr "" -#: cinder/exception.py:94 -msgid "DB exception wrapped." +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:155 -msgid "An unknown exception occurred." +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" +#: cinder/exception.py:163 +msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" +#: cinder/exception.py:167 +msgid "The results are invalid." msgstr "" -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:195 -msgid "Connection to glance failed" +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:203 -msgid "Not authorized." +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:208 -msgid "User does not have admin privileges" +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" msgstr "" -#: cinder/exception.py:212 +#: cinder/exception.py:197 #, python-format -msgid "Policy doesn't allow %(action)s to be performed." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:216 -#, fuzzy, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "walang paraan para sa mensahe: %s" +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" -#: cinder/exception.py:220 -msgid "Unacceptable parameters." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." msgstr "" -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:214 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: cinder/exception.py:237 -msgid "Failed to load data into json format" +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:245 +#: cinder/exception.py:242 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:253 +#: cinder/exception.py:250 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:264 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" #: cinder/exception.py:269 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:274 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:277 +#: cinder/exception.py:278 #, python-format -msgid "Invalid cidr %(cidr)s." +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:291 #, python-format -msgid "%(err)s" +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:295 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:299 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:303 #, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:307 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:311 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:315 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:334 -msgid "Failed to terminate instance" +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:348 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:373 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "Malformed message body: %(reason)s" msgstr "" #: cinder/exception.py:377 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "Could not find config at %(path)s" msgstr "" #: cinder/exception.py:381 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "Could not find parameter %(param)s" msgstr "" #: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" #: cinder/exception.py:389 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:398 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:402 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:409 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:415 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:419 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:423 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." -msgstr "" - -#: cinder/exception.py:422 -msgid "Resource could not be found." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" #: cinder/exception.py:427 #, python-format -msgid "Required flag %(flag)s not set." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:432 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:436 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" #: cinder/exception.py:440 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" #: cinder/exception.py:444 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" #: cinder/exception.py:449 -msgid "Zero volume types found." +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" #: cinder/exception.py:453 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Failed to export for volume: %(reason)s" msgstr "" #: cinder/exception.py:457 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:461 #, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:465 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:469 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Failed to copy image to volume: %(reason)s" msgstr "" -#: cinder/exception.py:475 -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:480 -#, python-format -msgid "No target id found for volume %(volume_id)s." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:484 -#, python-format -msgid "No disk at %(location)s" +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:485 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:493 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Invalid backup: %(reason)s" msgstr "" -#: cinder/exception.py:496 -msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" msgstr "" #: cinder/exception.py:501 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Transfer %(transfer_id)s could not be found." msgstr "" #: cinder/exception.py:505 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" #: cinder/exception.py:509 #, python-format -msgid "User %(user_id)s could not be found." +msgid "SSH command injection detected: %(command)s" msgstr "" #: cinder/exception.py:513 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" #: cinder/exception.py:517 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:522 #, python-format -msgid "Role %(role_id)s could not be found." -msgstr "" - -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:529 +#: cinder/exception.py:527 #, python-format -msgid "%(req)s is required to create a network." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:531 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:536 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" #: cinder/exception.py:541 #, python-format -msgid "Network could not be found for uuid %(uuid)s" +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:546 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:550 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:557 -#, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:562 -#, python-format -msgid "Host is not set to the network (%(network_id)s)." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:566 -#, python-format -msgid "Network %(network)s has active ports, cannot delete." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:576 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:580 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:584 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:586 -#, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:593 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "Unable to create server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:597 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:599 -#, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:605 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:609 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:620 -#, python-format -msgid "Floating ip not found for id %(id)s." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:626 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:630 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:636 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:641 +msgid "Unknown NFS exception" msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:644 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Floating ip %(address)s is not associated." -msgstr "" - -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:652 -#, python-format -msgid "Interface %(interface)s not found." +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" msgstr "" -#: cinder/exception.py:656 -#, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:660 -#, python-format -msgid "Certificate %(certificate_id)s not found." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:668 -#, python-format -msgid "Host %(host)s could not be found." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:672 +#: cinder/quota.py:105 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:676 +#: cinder/quota.py:748 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:680 +#: cinder/quota.py:770 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:684 +#: cinder/quota.py:790 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:692 -#, python-format -msgid "Quota for project %(project_id)s could not be found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:696 +#: cinder/quota_utils.py:46 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:700 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:704 +#: cinder/service.py:95 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:709 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Security group with rule %(rule_id)s not found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:713 +#: cinder/service.py:148 #, python-format msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:718 -#, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" msgstr "" -#: cinder/exception.py:723 -#, python-format -msgid "Migration %(migration_id)s could not be found." +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." msgstr "" -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +#: cinder/service.py:270 +msgid "Recovered model server connection!" msgstr "" -#: cinder/exception.py:732 -#, python-format -msgid "Console pool %(pool_id)s could not be found." +#: cinder/service.py:276 +msgid "model server went away" msgstr "" -#: cinder/exception.py:736 +#: cinder/service.py:298 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:742 -#, python-format -msgid "Console %(console_id)s could not be found." +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:746 +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:750 +#: cinder/utils.py:96 #, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:755 +#: cinder/utils.py:127 #, python-format -msgid "Invalid console type %(console_type)s " +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:763 +#: cinder/utils.py:228 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "Error connecting via ssh: %s" msgstr "" -#: cinder/exception.py:767 +#: cinder/utils.py:412 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "Invalid backend: %s" msgstr "" -#: cinder/exception.py:772 +#: cinder/utils.py:423 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "backend %s" msgstr "" -#: cinder/exception.py:776 +#: cinder/utils.py:698 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:780 +#: cinder/utils.py:759 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:784 +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgid "Unable to find cert_file : %s" msgstr "" -#: cinder/exception.py:789 +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 #, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +msgid "Unable to find ca_file : %s" msgstr "" -#: cinder/exception.py:793 +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgid "Unable to find key_file : %s" msgstr "" -#: cinder/exception.py:798 -#, python-format +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:807 +#: cinder/wsgi.py:206 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:811 -#, python-format -msgid "LDAP group %(group_id)s could not be found." +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:815 -#, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:819 -#, python-format -msgid "File %(file_path)s could not be found." +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:827 -#, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:832 -#, python-format -msgid "Network adapter %(adapter)s could not be found." +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:844 +#: cinder/api/common.py:162 #, python-format -msgid "Unable to use global role %(role_id)s" +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:861 +#: cinder/api/extensions.py:197 #, python-format -msgid "Key pair %(key_name)s already exists." +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:865 +#: cinder/api/extensions.py:235 #, python-format -msgid "User %(user)s already exists." +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:869 +#: cinder/api/extensions.py:236 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:873 +#: cinder/api/extensions.py:237 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:877 +#: cinder/api/extensions.py:239 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:882 +#: cinder/api/extensions.py:240 #, python-format -msgid "Project %(project)s already exists." +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:886 +#: cinder/api/extensions.py:242 #, python-format -msgid "Instance %(name)s already exists." +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:890 +#: cinder/api/extensions.py:256 #, python-format -msgid "Instance Type %(name)s already exists." +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:894 +#: cinder/api/extensions.py:262 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:898 +#: cinder/api/extensions.py:276 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:906 +#: cinder/api/extensions.py:287 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:910 +#: cinder/api/extensions.py:356 #, python-format -msgid "Could not find config at %(path)s" +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/extensions.py:381 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:938 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/contrib/backups.py:140 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "delete called for member %s" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Delete backup with id: %s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" msgstr "" -#: cinder/exception.py:958 -#, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/exception.py:963 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/exception.py:967 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/exception.py:971 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/exception.py:975 -#, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/exception.py:980 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "Host '%s' could not be found." msgstr "" -#: cinder/exception.py:984 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Invalid status: '%s'" msgstr "" -#: cinder/exception.py:988 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/exception.py:992 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/exception.py:1005 -#, python-format -msgid "Error in SolidFire API response: status=%(status)s" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/exception.py:1009 -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/exception.py:1013 -#, python-format -msgid "Detected existing vlan with id %(vlan)d" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/exception.py:1017 -#, python-format -msgid "Instance %(instance_id)s could not be found." +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/exception.py:1025 -#, python-format -msgid "Could not fetch image %(image)s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/log.py:315 -#, python-format -msgid "syslog facility must be one of: %s" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/manager.py:146 -#, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/service.py:177 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -#: cinder/service.py:195 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/service.py:340 -msgid "model server went away" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/service.py:440 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/utils.py:534 -#, python-format -msgid "Link Local address is not found.:%s" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/utils.py:537 -#, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/utils.py:648 -#, python-format -msgid "Invalid backend: %s" +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/utils.py:659 +#: cinder/api/contrib/volume_transfer.py:147 #, python-format -msgid "backend %s" +msgid "Creating new volume transfer %s" msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" msgstr "" -#: cinder/utils.py:927 +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/utils.py:931 +#: cinder/api/contrib/volume_transfer.py:196 #, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgid "Accepting transfer %s" msgstr "" -#: cinder/utils.py:935 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/utils.py:942 -#, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/utils.py:1028 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgid "Valid control location are: %s" msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1169 -#, python-format -msgid "Invalid server_string: %s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/utils.py:1298 -#, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/utils.py:1461 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +msgid "Caught error: %s" msgstr "" -#: cinder/utils.py:1463 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Class %(fullname)s is deprecated" +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/utils.py:1495 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/wsgi.py:97 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +msgid "Extended resource: %s" msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/wsgi.py:117 -#, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/api/direct.py:299 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "Returned non-serializeable type: %s" +msgid "Exception handling resource: %s" msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" msgstr "" -#: cinder/api/validator.py:142 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:95 -#, python-format -msgid "FaultWrapper: %s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:180 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "Authentication Failure: %s" +msgid "%(url)s returned a fault: %(e)s" msgstr "" -#: cinder/api/ec2/__init__.py:404 -#, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/__init__.py:435 -#, python-format -msgid "action: %s" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/__init__.py:437 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/__init__.py:584 -#, python-format -msgid "InstanceNotFound raised: %s" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" msgstr "" -#: cinder/api/ec2/__init__.py:590 -#, python-format -msgid "VolumeNotFound raised: %s" +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/__init__.py:596 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "SnapshotNotFound raised: %s" +msgid "Delete snapshot with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" msgstr "" -#: cinder/api/ec2/__init__.py:605 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "EC2APIError raised: %s" +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "KeyPairExists raised: %s" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/__init__.py:617 -#, python-format -msgid "InvalidParameterValue raised: %s" +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" msgstr "" -#: cinder/api/ec2/__init__.py:621 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "InvalidPortRange raised: %s" +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "NotAuthorized raised: %s" +msgid "Delete volume with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/__init__.py:633 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "QuotaError raised: %s" +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:637 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:646 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "Unexpected error raised: %s" +msgid "Create volume of %s GB" msgstr "" -#: cinder/api/ec2/__init__.py:647 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "Environment: %s" +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/apirequest.py:64 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/cloud.py:336 -#, python-format -msgid "Create snapshot of volume %s" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:372 -#, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" msgstr "" -#: cinder/api/ec2/cloud.py:378 -#, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/ec2/cloud.py:382 -#, python-format -msgid "Create key pair %s" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" msgstr "" -#: cinder/api/ec2/cloud.py:391 -#, python-format -msgid "Import key %s" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Delete key pair %s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 -#, python-format -msgid "Revoke security group ingress %s" +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, python-format -msgid "%s Not enough parameters to build a valid rule" +#: cinder/backup/api.py:140 +msgid "Backup status must be available" msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/backup/api.py:154 #, python-format -msgid "Authorize security group ingress %s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:725 -#, python-format -msgid "%s - This rule already exists in group" +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" msgstr "" -#: cinder/api/ec2/cloud.py:769 +#: cinder/backup/api.py:176 #, python-format msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/backup/api.py:181 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/backup/manager.py:100 #, python-format -msgid "group %s already exists" +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/backup/manager.py:107 #, python-format -msgid "Delete security group %s" +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#: cinder/backup/manager.py:117 #, python-format -msgid "Get console output for instance %s" +msgid "Manager requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:894 +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 #, python-format -msgid "Create volume from snapshot %s" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/backup/manager.py:129 #, python-format -msgid "Create volume of %s GB" +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/backup/manager.py:154 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "Registering default backend %s." msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/backup/manager.py:165 #, python-format -msgid "Detach volume %s" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/ec2/cloud.py:959 -msgid "Detach Volume Failed." +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/backup/manager.py:189 #, python-format -msgid "attribute not supported: %s" +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/backup/manager.py:194 #, python-format -msgid "vol = %s\n" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/ec2/cloud.py:1267 +#: cinder/backup/manager.py:212 #, python-format -msgid "Release address %s" +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/manager.py:217 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/manager.py:225 #, python-format -msgid "Disassociate address %s" +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/manager.py:282 #, python-format -msgid "Reboot instance %r" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/manager.py:310 #, python-format -msgid "De-registering image %s" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/manager.py:360 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/manager.py:422 #, python-format -msgid "Updating image %s publicity" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/ec2/cloud.py:1555 -#, python-format -msgid "Not allowed to modify attributes for image %s" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/drivers/ceph.py:147 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "invalid user '%s'" msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 -#, python-format -msgid "Failed to get metadata for ip: %s" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "Caught error: %s" +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/drivers/ceph.py:250 #, python-format -msgid "Extended resource: %s" +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/drivers/ceph.py:361 #, python-format -msgid "Could not find %s in request." +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "Successfully authenticated '%s'" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "marker [%s] not found" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "href %s does not contain version" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "Converting nw_info: %s" +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/drivers/ceph.py:528 #, python-format -msgid "Converted networks: %s" +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/drivers/ceph.py:543 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:555 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/extensions.py:188 -#, python-format -msgid "Loaded extension: %s" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "Ext name: %s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:226 -#, python-format -msgid "Ext alias: %s" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "Ext description: %s" +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/drivers/ceph.py:697 #, python-format -msgid "Ext namespace: %s" +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/drivers/ceph.py:704 #, python-format -msgid "Ext updated: %s" +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/ceph.py:708 #, python-format -msgid "Exception loading extension: %s" +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/drivers/ceph.py:713 #, python-format -msgid "Loading extension %s" +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:252 -#, python-format -msgid "Calling extension factory %s" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:344 -#, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "Exception handling resource: %s" +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/ceph.py:916 #, python-format -msgid "Fault thrown: %s" +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/wsgi.py:589 -#, python-format -msgid "HTTP exception thrown: %s" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:1005 #, python-format -msgid "There is no such action: %s" +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:1039 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/swift.py:209 #, python-format msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." -msgstr "" - -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" -msgstr "" - -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/swift.py:219 #, python-format -msgid "Invalid server status: %(status)s" +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Bad personality format: missing %s" +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/swift.py:304 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 -#, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:345 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/swift.py:350 #, python-format -msgid "Bad network format: missing %s" +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/swift.py:455 #, python-format -msgid "Error in confirm-resize %s" +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "Error in revert-resize %s" +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/tsm.py:143 #, python-format -msgid "Error in reboot %s" +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/brick/exception.py:93 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/brick/exception.py:97 #, python-format -msgid "Compute.api::pause %s" +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/brick/exception.py:101 #, python-format -msgid "Compute.api::unpause %s" +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/brick/exception.py:105 #, python-format -msgid "compute.api::suspend %s" +msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/brick/exception.py:109 #, python-format -msgid "compute.api::resume %s" +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/brick/exception.py:113 #, python-format -msgid "Error in migrate %s" +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/brick/exception.py:117 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/brick/initiator/connector.py:127 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Compute.api::lock %s" +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/brick/initiator/connector.py:229 #, python-format -msgid "Compute.api::unlock %s" +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "createBackup entity requires %s attribute" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 -#, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "Aggregates does not have %s action" +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/brick/initiator/connector.py:858 msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "No more floating ips in pool %s." -msgstr "" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Invalid status: '%s'" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Invalid mode: '%s'" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/iscsi/iscsi.py:280 #, python-format -msgid "Invalid update setting: '%s'" +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Key pair '%s' already exists." +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Unable to find address %r" +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "Network does not have %s action" +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "Disassociating network with id %s" +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 -#, python-format -msgid "Showing network with id %s" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/local_dev/lvm.py:370 #, python-format -msgid "Deleting network with id %s" +msgid "Unable to find VG: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 -#, python-format -msgid "Security group %s already exists" +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 -#, python-format -msgid "Security group %s is not a string or unicode" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Security group %s cannot be empty." +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 -#, python-format -msgid "Security group %s should not be greater than 255 characters." +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 -#, python-format -msgid "Security group (%s) not found" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "This rule already exists in group %s" +msgid "Already mounted: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 -#, python-format -msgid "Rule (%s) not found" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 -#, python-format -msgid "start instance %r" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "stop instance %r" +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "vol=%s" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Delete volume with id: %s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "Delete snapshot with id: %s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "Create snapshot from volume %s" +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/auth/ldapdriver.py:650 -#, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/auth/manager.py:302 -#, python-format -msgid "Failed authorization for access key %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Using project name = user name (%s)" +msgid "Table |%s| not created!" msgstr "" -#: cinder/auth/manager.py:315 -#, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/auth/manager.py:324 -#, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 -#, python-format -msgid "expected_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:353 -#, python-format -msgid "host_only_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgid "Table |%s| not created" msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +msgid "Exception while dropping table %s." msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +msgid "Exception while creating table %s." msgstr "" -#: cinder/auth/manager.py:613 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "modifying project %s" +msgid "Column |%s| not created!" msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/auth/manager.py:676 -#, python-format -msgid "Deleting project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/auth/manager.py:753 -#, python-format -msgid "Access Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/auth/manager.py:755 -#, python-format -msgid "Secret Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -msgid "Instance type for vpn instances" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Launching VPN for %s" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/compute/api.py:144 +#: cinder/image/image_utils.py:101 #, python-format -msgid "Unable to find host for Instance %s" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/api.py:192 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/api.py:203 +#: cinder/image/image_utils.py:157 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:259 +#: cinder/image/image_utils.py:178 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/api.py:261 +#: cinder/image/image_utils.py:206 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/compute/api.py:312 +#: cinder/image/image_utils.py:260 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/api.py:383 -#, python-format -msgid "Going to run %s instances..." +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/compute/api.py:447 -#, python-format -msgid "bdm %s" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "block_device_mapping %s" +msgid "Not deleting key %s" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/compute/api.py:871 -msgid "Going to try to soft delete instance" +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/api.py:939 -msgid "Going to try to terminate instance" +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/api.py:977 -msgid "Going to try to stop instance" +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/api.py:996 -msgid "Going to try to start instance" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/api.py:1000 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "Searching by: %s" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "Image type not recognized %s" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1377 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/openstack/common/log.py:301 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "natanggap %s" + +#: cinder/openstack/common/log.py:402 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/openstack/common/log.py:623 #, python-format -msgid "DB error: %s" +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "Instance type %s not found for deletion" +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/compute/manager.py:138 -#, python-format -msgid "check_instance_lock: decorating: |%s|" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/manager.py:144 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:146 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/policy.py:149 #, python-format msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/openstack/common/policy.py:163 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "Setting up bdm %s" +msgid "Running cmd (subprocess): %s" msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Result was %s" msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "Instance %s not found." +msgid "Running cmd (SSH): %s" msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:523 -#, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 #, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" msgstr "" -#: cinder/compute/manager.py:565 -#, python-format -msgid "Instance network_info: |%s|" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "%(action_str)s instance" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "terminating bdm %s" +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "Rebuilding instance %s" +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "Rebooting instance %s" +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/openstack/common/versionutils.py:69 #, python-format msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "instance %s: snapshotting" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:939 -#, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "Rotating out %d backups" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1005 -#, python-format -msgid "Deleting image %s" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:1035 -#, python-format -msgid "Failed to set admin password. Instance %s is not running" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "Instance %s: Root password set" +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/compute/manager.py:1079 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/notifier/api.py:145 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Failed to load notifier %s. These notifications will not be sent." msgstr "" -#: cinder/compute/manager.py:1103 -#, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "instance %s: rescuing" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format -msgid "instance %s: unrescuing" +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "instance %s: migrating" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "instance %s: pausing" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "instance %s: unpausing" +msgid "unpacked context: %s" msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "instance %s: suspending" -msgstr "" +msgid "received %s" +msgstr "natanggap %s" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "instance %s: resuming" -msgstr "" +msgid "no method for message: %s" +msgstr "walang paraan para sa mensahe: %s" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "instance %s: locking" -msgstr "" +msgid "No method for message: %s" +msgstr "Walang paraan para sa mensahe: %s" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "instance %s: unlocking" +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/compute/manager.py:1596 -#, python-format -msgid "instance %s: getting locked state" +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "instance %s: reset network" +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "instance %s: inject network info" +msgid "MSG_ID is %s" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "network_info to inject: |%s|" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:1655 -#, python-format -msgid "instance %s: getting vnc console" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." msgstr "" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:1714 -#, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:1752 -#, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/rpc/common.py:151 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/common.py:156 #, python-format msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." -msgstr "" - -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "Returning exception %s to caller" msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." -msgstr "" - -#: cinder/compute/manager.py:2075 msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +msgid "Deserializing: %s" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" msgstr "" -#: cinder/console/manager.py:97 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/console/vmrc_manager.py:122 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/impl_zmq.py:267 #, python-format -msgid "Removing console %(console_id)s." +msgid "Running func with context: %s" msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/console/xvp.py:116 -#, python-format -msgid "Re-wrote %s" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/console/xvp.py:141 -#, python-format -msgid "Error starting xvp: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/consoleauth/manager.py:63 -#, python-format -msgid "Deleting Expired Token: (%s)" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:198 -#, python-format -msgid "Unrecognized read_deleted value '%s'" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, python-format -msgid "No ComputeNode for %(host)s" +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 -#, python-format -msgid "No backend config with id %(sm_backend_id)s" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 -#, python-format -msgid "No sm_flavor called %(sm_flavor)s" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 -#, python-format -msgid "SQL connection failed. %s attempts left." +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 -#, python-format -msgid "Table |%s| not created!" -msgstr "" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "natanggap %s" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 -#, python-format -msgid "join list for moving mac_addresses |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +msgid "topic is %s." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/image/glance.py:281 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/image/glance.py:289 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/image/glance.py:410 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/image/s3.py:309 -#, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/image/s3.py:328 -#, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/image/s3.py:379 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/scheduler/filter_scheduler.py:114 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "Filtered %s" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "Unknown chain: %r" +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/linux_net.py:694 -#, python-format -msgid "Hupping dnsmasq threw %s" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/linux_net.py:696 -#, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "killing radvd threw %s" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "Pid %d is stale, relaunching radvd" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Starting VLAN inteface %s" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Starting Bridge interface for %s" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/network/linux_net.py:1142 -#, python-format -msgid "Starting bridge %s " +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/network/linux_net.py:1149 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format -msgid "Done starting bridge %s" +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/network/linux_net.py:1167 -#, python-format -msgid "Failed unplugging gateway interface '%s'" +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/network/linux_net.py:1170 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Unplugged gateway interface '%s'" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/network/manager.py:291 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "Interface %(interface)s not found" +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/network/manager.py:315 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "floating IP allocation for instance |%s|" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/network/manager.py:353 -#, python-format -msgid "floating IP deallocation for instance |%s|" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/network/manager.py:402 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "Faked command matched %s" msgstr "" -#: cinder/network/manager.py:614 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/manager.py:660 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/manager.py:778 +#: cinder/tests/test_netapp_nfs.py:360 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/manager.py:896 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "network allocations for instance |%s|" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/manager.py:901 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "network deallocation for instance |%s|" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Given data: %s" msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Leased IP |%(address)s|" +msgid "Result data: %s" msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "IP %s leased that is not associated" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/manager.py:1256 -#, python-format -msgid "IP |%s| leased that isn't allocated" +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/tests/integrated/test_login.py:29 #, python-format -msgid "Released IP |%(address)s|" +msgid "volume: %s" msgstr "" -#: cinder/network/manager.py:1265 +#: cinder/tests/integrated/api/client.py:32 #, python-format -msgid "IP %s released that is not associated" +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/manager.py:1268 -#, python-format -msgid "IP %s released that was not leased" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +msgid "Body: %s" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +msgid "Decoding JSON: %s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/network/quantum/client.py:180 +#: cinder/transfer/api.py:119 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "Failed to create transfer record for %s" msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/transfer/api.py:136 #, python-format -msgid "Quantum entity not found: %s" +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/transfer/api.py:182 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/transfer/api.py:199 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Volume %s has been transferred." msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" msgstr "" -#: cinder/network/quantum/manager.py:204 -#, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/network/quantum/manager.py:301 -#, python-format -msgid "network allocations for instance %s" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/network/quantum/manager.py:588 -#, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" msgstr "" -#: cinder/network/quantum/manager.py:606 -#, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 -#, python-format -msgid "Server returned error: %s" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/api.py:502 #, python-format msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 -#, python-format -msgid "No network with net_id = %s" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 -#, python-format -msgid "No fixed IPs to deallocate for vif %s" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 -#, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 -#, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/api.py:757 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/notifier/list_notifier.py:65 -#, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/volume/api.py:797 #, python-format -msgid "Returning exception %s to caller" +msgid "No available service named %s" msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/rpc/amqp.py:231 -#, python-format -msgid "received %s" -msgstr "natanggap %s" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" -#: cinder/rpc/amqp.py:236 -#, python-format -msgid "no method for message: %s" -msgstr "walang paraan para sa mensahe: %s" +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" -#: cinder/rpc/amqp.py:237 +#: cinder/volume/api.py:842 #, python-format -msgid "No method for message: %s" -msgstr "Walang paraan para sa mensahe: %s" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" -#: cinder/rpc/amqp.py:321 -#, python-format -msgid "Making asynchronous call on %s ..." +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/volume/api.py:862 #, python-format -msgid "MSG_ID is %s" +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/api.py:868 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/rpc/amqp.py:379 +#: cinder/volume/api.py:887 #, python-format -msgid "Sending notification on %s..." +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/api.py:900 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/driver.py:282 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/driver.py:327 #, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/driver.py:340 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/driver.py:358 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/volume/driver.py:394 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/driver.py:433 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/driver.py:451 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/rpc/impl_qpid.py:346 -#, python-format -msgid "Connected to AMQP server on %s" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/scheduler/driver.py:80 -#, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/scheduler/driver.py:89 -#, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/manager.py:203 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/manager.py:264 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/manager.py:271 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgid "volume %s: skipping export" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/manager.py:273 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 -#, python-format -msgid "No host selection for %s defined." +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/manager.py:286 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "Filtered %(hosts)s" +msgid "volume %s: deleting" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 -#, python-format -msgid "Weighted %(weighted_host)s" +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/manager.py:389 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "volume %s: removing export" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/manager.py:394 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/manager.py:427 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/manager.py:430 #, python-format -msgid "Host filter passes for %(host)s" +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/manager.py:434 #, python-format -msgid "Received %(service_name)s service update from %(host)s." +msgid "volume %s: deleted successfully" msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/manager.py:462 #, python-format -msgid "No service for compute ID %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/manager.py:490 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/manager.py:496 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "snapshot %s: deleting" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/manager.py:526 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Cannot delete snapshot %s: snapshot is busy" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/tests/fake_utils.py:72 -#, python-format -msgid "Faking execution of cmd (subprocess): %s" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/manager.py:698 #, python-format -msgid "Faked command matched %s" +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:760 #, python-format -msgid "Faked command raised an exception %s" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:807 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:880 #, python-format -msgid "Running instances: %s" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/manager.py:909 #, python-format -msgid "After terminating instances: %s" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:940 #, python-format -msgid "After force-killing instances: %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:976 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Target %s allocated" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/tests/test_volume_types.py:58 -#, python-format -msgid "Given data: %s" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/tests/test_volume_types.py:59 -#, python-format -msgid "Result data: %s" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "Quota exceeded: code=%(code)s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "_create: %s" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "_delete: %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "_get: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "_get_all: %s" +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/utils.py:144 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "flavor: %s" +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/tests/integrated/api/client.py:107 -#, python-format -msgid "Body: %s" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/tests/integrated/api/client.py:151 -#, python-format -msgid "%(relative_uri)s => code %(http_status)s" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/tests/integrated/api/client.py:168 -#, python-format -msgid "Decoding JSON: %s" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Nested received %(queue)s, %(value)s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Nested return %s" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "Received %s" +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Compute_service record created for %s " +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 -#, python-format -msgid "Compute_service record updated for %s " +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Filters added to instance %s" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "Adding security group rule: %r" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Adding provider rule: %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/eqlx.py:348 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Got exception: %s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "No such domain (%s)" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/dom.py:134 -#, python-format -msgid "Failed power down Bare-metal node %s" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "No such domain %s" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Domains: %s" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Nodes: %s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "After storing domains: %s" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Created new domain: %s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:226 -#, python-format -msgid "change_domain_state: to new state %s" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/glusterfs.py:608 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "No base file found for %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "instance %s: rebooted" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "instance %s: rescued" +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "instance %s: is building" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "instance %s: booted" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "instance %s spawned successfully" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "instance %s:not booted" +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "instance %s: Creating image" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "file already exists at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "instance %s: starting toXML method" +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "instance %s: finished toXML method" +msgid "Available shares: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "#### RLK: cpu_arch = %s " -msgstr "" - -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:216 -#, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "%s is not a directory." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "unknown disk image handler: %s" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Failed to remove container: %s" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "User %(username)s not found in password file." +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "unsupported partition: %s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "" - -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +msgid "%s" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/lvm.py:573 #, python-format -msgid "Failed to map partitions: %s" +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "qemu-nbd error: %s" +msgid " but size is now %d" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/nfs.py:361 #, python-format -msgid "nbd device %s did not show up" +msgid "%s is already mounted" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "Connecting to libvirt: %s" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Deleting instance files %(target)s" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -msgid "Instance soft rebooted successfully." +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Automatically confirming migration %d" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -msgid "Guest does not have a console available" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "block_device_list %s" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/rbd.py:625 #, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "'' must be 1, but %d\n" +msgid "connection data: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 -#, python-format -msgid "topology (%(topology)s) must have %(ks)s" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 -#, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 +#: cinder/volume/drivers/rbd.py:724 #, python-format -msgid "Timeout migrating for %s. nwfilter not found." +msgid "not cloneable: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 -#, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/drivers/rbd.py:747 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +msgid "Unable to open image %(loc)s: %(err)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 -#, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "%s is a valid instance name" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "%s has a disk file" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Base file too young to remove: %s" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Removing base file: %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "API response: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Unknown base file: %s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Active base files: %s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "Corrupt base files: %s" +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Removable base files: %s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/libvirt/vif.py:90 -#, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/libvirt/vif.py:99 -#, python-format -msgid "Ensuring bridge %s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 -#, python-format -msgid "Failed while unplugging vif of instance '%s'" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/libvirt/volume.py:166 -#, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/zadara.py:357 #, python-format -msgid "%(text)s: _db_content => %(content)s" +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Glance image %s is in killed state" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 +#: cinder/volume/drivers/emc/emc_smis_common.py:40 msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 -#, python-format -msgid "Creating Port Group with name %s on the ESX host" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "Create Volume: %(volume)s Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" -msgstr "" - -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "httplib error in %s: " +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Socket error in %s: " +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "Type error in %s: " +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Exception in %s " +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Got total of %s instances" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 -#, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Powered on the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Uploading image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Uploaded image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format -msgid "Deleted temporary vmdk file %s" +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Did hard reboot of VM %s" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "instance - %s not present" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Powering off the VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Powered off the VM %s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 -#, python-format -msgid "Unregistering the VM %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 #, python-format -msgid "Unregistered the VM %s" +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Suspending the VM %s " +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Suspended the VM %s " +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "Resuming the VM %s" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "Resumed the VM %s " +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 #, python-format -msgid "Creating directory with path %s" +msgid "Create export: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "Created directory with path %s" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Downloading image %s from glance image server" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:674 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Error mapping volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:678 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "ExposePaths for volume %s completed successfully." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Calling %(localname)s %(impl)s" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/emc/emc_smis_common.py:744 #, python-format -msgid "Calling getter %s" +msgid "AddMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:775 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "RemoveMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Found no network for bridge %s" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:146 -#, python-format -msgid "Unable to join %(host)s in the pool" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, python-format -msgid "Unable to set up pool: %(e)s." +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/pool.py:185 -#, python-format -msgid "Pool-set_name_label failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "Found no PIF for device %s" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Muling kumonekta sa queue" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "VBD not found in instance %s" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "VBD %s already detached" +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "Snapshotting with label '%(label)s'" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "Created snapshot %(template_vm_ref)s" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "download_vhd failed: %r" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 -#, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +msgid "Add target WWN: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Cannot find device number for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Found iSCSI endpoint: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "VDI %s is still available" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "Re-scanning SR %s" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/hds/hds.py:178 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "LUN %s is deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "Plugging VBD %s ... " +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Plugging VBD %s done." +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 -#, python-format -msgid "Destroying VBD for VDI %s ... " +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Destroying VBD for VDI %s done." +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Running pygrub against %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Found Xen kernel %s" +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "Writing partition table %s done." +msgid "Bad response from server: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 -#, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -msgid "Starting instance" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Injecting file path: '%s'" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" -msgstr "" - -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/rest_common.py:1124 #, python-format -msgid "Instance agent version: %s" +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Updating Agent to %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:686 +#: cinder/volume/drivers/huawei/rest_common.py:1256 #, python-format -msgid "Unable to Snapshot instance: %(exc)s" +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 -#, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/ssh_common.py:792 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 +#: cinder/volume/drivers/huawei/ssh_common.py:933 #, python-format -msgid "Creating VIF for network %(network_ref)s" +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/huawei/ssh_common.py:1079 #, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 -#, python-format -msgid "OpenSSL error: %s" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "type is = %s" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "name = %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Failed getting details for pool %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 -#, python-format -msgid "Forgetting SR %s..." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Introducing %s..." +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 #, python-format -msgid "Error finding vdis in SR %s" +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "Creating SR %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "Introducing SR %s" +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 #, python-format -msgid "Checking for SR %s" +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "SR %s not found in the xapi database" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "Could not find key in output of command %(cmd)s: %(out)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgid "Failed to find host %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "enter: get_host_from_connector: %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "Unable to locate volume %s" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Unable to detach volume %s" +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Unable to destroy vbd %s" +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "Error purging SR %s" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 #, python-format -msgid "Error in handshake: %s" +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 #, python-format -msgid "Invalid request: %s" +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "Request: %s" +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "Request made with missing token: %s" +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "Request made with invalid token: %s" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 #, python-format -msgid "Unexpected error: %s" +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/volume/api.py:137 -msgid "Volume status must be available or error" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" msgstr "" -#: cinder/volume/api.py:325 -msgid "Volume Snapshot status must be available or error" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "volume group %s doesn't exist" +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "enter: delete_vdisk: vdisk %s" msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "Tried to delete non-existant vdisk %s." msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 #, python-format -msgid "Could not find iSCSI export for volume %s" +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 #, python-format -msgid "rbd has no pool %s" +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/netapp/api.py:419 #, python-format -msgid "Sheepdog is not working: %s" +msgid "No element by given name %s." msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/netapp/common.py:103 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/netapp/common.py:109 #, python-format -msgid "Re-exporting %s volumes" +msgid "Storage family %s is not supported" msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/netapp/common.py:116 #, python-format -msgid "volume %s: skipping export" +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/netapp/common.py:130 #, python-format -msgid "volume %s: creating" +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/netapp/iscsi.py:69 #, python-format -msgid "volume %s: creating export" +msgid "No metadata property %(prop)s defined for the LUN %(name)s" msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/netapp/iscsi.py:105 #, python-format -msgid "volume %s: created successfully" +msgid "Using NetApp filer: %s" msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/netapp/iscsi.py:175 #, python-format -msgid "volume %s: removing export" +msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/netapp/iscsi.py:191 #, python-format -msgid "volume %s: deleting" +msgid "Destroyed LUN %s" msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/netapp/iscsi.py:227 #, python-format -msgid "volume %s: volume is busy" +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/netapp/iscsi.py:232 #, python-format -msgid "volume %s: deleted successfully" +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/netapp/iscsi.py:238 #, python-format -msgid "snapshot %s: creating" +msgid "Failed to get LUN target details for the LUN %s" msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/netapp/iscsi.py:249 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "Failed to get target portal for the LUN %s" msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/netapp/iscsi.py:252 #, python-format -msgid "snapshot %s: created successfully" +msgid "Failed to get target IQN for the LUN %s" msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/netapp/iscsi.py:290 #, python-format -msgid "snapshot %s: deleting" +msgid "Snapshot %s deletion successful" msgstr "" -#: cinder/volume/manager.py:214 +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 #, python-format -msgid "snapshot %s: snapshot is busy" +msgid "Resizing %s failed. Cleaning volume." msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/netapp/iscsi.py:325 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/netapp/iscsi.py:431 #, python-format -msgid "New capabilities found: %s" +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/netapp/iscsi.py:543 #, python-format -msgid "Notification {%s} received" +msgid "Message: %s" msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/netapp/iscsi.py:545 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "Error getting lun attribute. Exception: %s" msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/netapp/iscsi.py:600 #, python-format -msgid "%s is not set" +msgid "No need to extend volume %s as it is already the requested new size." msgstr "" -#: cinder/volume/netapp.py:128 -#, fuzzy -msgid "Connected to DFM server" -msgstr "Muling kumonekta sa queue" +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/netapp/nfs.py:363 #, python-format -msgid "Job failed: %s" +msgid "Exception during deleting %s" msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#: cinder/volume/drivers/netapp/nfs.py:415 #, python-format -msgid "Failed to find LUN ID for volume %s" +msgid "Cache share: %s" msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" -msgstr "" +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, python-format -msgid "No LUN ID for volume %s" -msgstr "" +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "" +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:614 -#, python-format -msgid "Failed to get host details for host ID %s" -msgstr "" +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:620 -#, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "" +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" -#: cinder/volume/netapp.py:625 -#, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "" +#~ msgid "Connection to glance failed" +#~ msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#~ msgid "Invalid snapshot" +#~ msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Invalid input received" +#~ msgstr "" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Invalid volume type" +#~ msgstr "" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, python-format -msgid "Got response: %s" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "The request is invalid." #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "response %s" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "topic is %s" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "message %s" +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "Publishing to route %s" +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "Declaring queue %s" +#~ msgid "zfs send/recv done, new volume %s created" #~ msgstr "" -#~ msgid "Declaring exchange %s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgid "rbd export-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "Created VM %s..." +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Can't find lun or lun goup in array" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " +#~ msgid "migrate_volume: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "_wait_child %d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "wait wrap.failed %s" #~ msgstr "" #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "volume %s mapping to multi host" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" #~ msgstr "" -#~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Volume status must be available" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/tl_PH/LC_MESSAGES/cinder.po b/cinder/locale/tl_PH/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..a926433c09 --- /dev/null +++ b/cinder/locale/tl_PH/LC_MESSAGES/cinder.po @@ -0,0 +1,10412 @@ +# Filipino (Philippines) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-09-26 22:10+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: Tagalog (Philippines) " +"(http://www.transifex.com/projects/p/openstack/language/tl_PH/)\n" +"Plural-Forms: nplurals=2; plural=(n > 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/tr/LC_MESSAGES/cinder.po b/cinder/locale/tr/LC_MESSAGES/cinder.po index cfd3db0d85..643f0891e7 100644 --- a/cinder/locale/tr/LC_MESSAGES/cinder.po +++ b/cinder/locale/tr/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2011-12-14 18:10+0000\n" "Last-Translator: FULL NAME \n" "Language-Team: Turkish \n" @@ -15,8188 +15,10721 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." msgstr "" -#: cinder/crypto.py:51 -msgid "Filename of private key" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -#: cinder/crypto.py:57 -msgid "Where we keep our keys" +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" msgstr "" -#: cinder/crypto.py:67 -#, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" msgstr "" -#: cinder/crypto.py:72 +#: cinder/exception.py:133 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" +msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/crypto.py:292 +#: cinder/exception.py:137 #, python-format -msgid "Flags path: %s" +msgid "Not authorized for image %(image_id)s." msgstr "" -#: cinder/exception.py:56 -msgid "Unexpected error while running command." +#: cinder/exception.py:141 +msgid "Volume driver not ready." msgstr "" -#: cinder/exception.py:59 +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +msgid "Invalid snapshot: %(reason)s" msgstr "" -#: cinder/exception.py:94 -msgid "DB exception wrapped." +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:155 -msgid "An unknown exception occurred." +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" +#: cinder/exception.py:163 +msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" +#: cinder/exception.py:167 +msgid "The results are invalid." msgstr "" -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:195 -msgid "Connection to glance failed" +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:203 -msgid "Not authorized." +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:208 -msgid "User does not have admin privileges" +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" msgstr "" -#: cinder/exception.py:212 +#: cinder/exception.py:197 #, python-format -msgid "Policy doesn't allow %(action)s to be performed." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:216 +#: cinder/exception.py:201 #, python-format -msgid "Not authorized for image %(image_id)s." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:220 -msgid "Unacceptable parameters." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." msgstr "" -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:214 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: cinder/exception.py:237 -msgid "Failed to load data into json format" +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:245 +#: cinder/exception.py:242 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:253 +#: cinder/exception.py:250 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:264 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" #: cinder/exception.py:269 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:274 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:277 +#: cinder/exception.py:278 #, python-format -msgid "Invalid cidr %(cidr)s." +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:291 #, python-format -msgid "%(err)s" +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:295 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:299 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:303 #, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:307 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:311 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:315 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:334 -msgid "Failed to terminate instance" +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:348 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:373 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "Malformed message body: %(reason)s" msgstr "" #: cinder/exception.py:377 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "Could not find config at %(path)s" msgstr "" #: cinder/exception.py:381 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "Could not find parameter %(param)s" msgstr "" #: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" #: cinder/exception.py:389 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:398 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:402 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:409 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:415 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:419 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:423 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." -msgstr "" - -#: cinder/exception.py:422 -msgid "Resource could not be found." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" #: cinder/exception.py:427 #, python-format -msgid "Required flag %(flag)s not set." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:432 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:436 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" #: cinder/exception.py:440 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" #: cinder/exception.py:444 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" #: cinder/exception.py:449 -msgid "Zero volume types found." +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" #: cinder/exception.py:453 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Failed to export for volume: %(reason)s" msgstr "" #: cinder/exception.py:457 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:461 #, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:465 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:469 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Failed to copy image to volume: %(reason)s" msgstr "" -#: cinder/exception.py:475 -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:480 -#, python-format -msgid "No target id found for volume %(volume_id)s." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:484 -#, python-format -msgid "No disk at %(location)s" +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:485 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:493 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Invalid backup: %(reason)s" msgstr "" -#: cinder/exception.py:496 -msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" msgstr "" #: cinder/exception.py:501 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Transfer %(transfer_id)s could not be found." msgstr "" #: cinder/exception.py:505 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" #: cinder/exception.py:509 #, python-format -msgid "User %(user_id)s could not be found." +msgid "SSH command injection detected: %(command)s" msgstr "" #: cinder/exception.py:513 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" #: cinder/exception.py:517 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:522 #, python-format -msgid "Role %(role_id)s could not be found." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." -msgstr "" - -#: cinder/exception.py:529 +#: cinder/exception.py:527 #, python-format -msgid "%(req)s is required to create a network." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:531 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:536 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" #: cinder/exception.py:541 #, python-format -msgid "Network could not be found for uuid %(uuid)s" +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:546 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:550 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:557 -#, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:562 -#, python-format -msgid "Host is not set to the network (%(network_id)s)." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:566 -#, python-format -msgid "Network %(network)s has active ports, cannot delete." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:576 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:580 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:584 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:586 -#, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:593 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "Unable to create server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:597 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:599 -#, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:605 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:609 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:620 -#, python-format -msgid "Floating ip not found for id %(id)s." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:626 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:630 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:636 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:641 +msgid "Unknown NFS exception" msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:644 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Floating ip %(address)s is not associated." -msgstr "" - -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:652 -#, python-format -msgid "Interface %(interface)s not found." +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" msgstr "" -#: cinder/exception.py:656 -#, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:660 -#, python-format -msgid "Certificate %(certificate_id)s not found." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:668 -#, python-format -msgid "Host %(host)s could not be found." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:672 +#: cinder/quota.py:105 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:676 +#: cinder/quota.py:748 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:680 +#: cinder/quota.py:770 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:684 +#: cinder/quota.py:790 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:692 -#, python-format -msgid "Quota for project %(project_id)s could not be found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:696 +#: cinder/quota_utils.py:46 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:700 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:704 +#: cinder/service.py:95 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:709 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Security group with rule %(rule_id)s not found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:713 +#: cinder/service.py:148 #, python-format msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:718 -#, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" msgstr "" -#: cinder/exception.py:723 -#, python-format -msgid "Migration %(migration_id)s could not be found." +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." msgstr "" -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +#: cinder/service.py:270 +msgid "Recovered model server connection!" msgstr "" -#: cinder/exception.py:732 -#, python-format -msgid "Console pool %(pool_id)s could not be found." +#: cinder/service.py:276 +msgid "model server went away" msgstr "" -#: cinder/exception.py:736 +#: cinder/service.py:298 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:742 -#, python-format -msgid "Console %(console_id)s could not be found." +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" msgstr "" -#: cinder/exception.py:746 +#: cinder/service.py:387 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:750 +#: cinder/utils.py:96 #, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:755 +#: cinder/utils.py:127 #, python-format -msgid "Invalid console type %(console_type)s " +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:763 +#: cinder/utils.py:228 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "Error connecting via ssh: %s" msgstr "" -#: cinder/exception.py:767 +#: cinder/utils.py:412 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "Invalid backend: %s" msgstr "" -#: cinder/exception.py:772 +#: cinder/utils.py:423 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "backend %s" msgstr "" -#: cinder/exception.py:776 +#: cinder/utils.py:698 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:780 +#: cinder/utils.py:759 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:784 +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgid "Unable to find cert_file : %s" msgstr "" -#: cinder/exception.py:789 +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 #, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +msgid "Unable to find ca_file : %s" msgstr "" -#: cinder/exception.py:793 +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgid "Unable to find key_file : %s" msgstr "" -#: cinder/exception.py:798 -#, python-format +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:807 +#: cinder/wsgi.py:206 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:811 -#, python-format -msgid "LDAP group %(group_id)s could not be found." +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:815 -#, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:819 -#, python-format -msgid "File %(file_path)s could not be found." +#: cinder/wsgi.py:313 +msgid "You must implement __call__" msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:827 -#, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:832 -#, python-format -msgid "Network adapter %(adapter)s could not be found." +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:844 +#: cinder/api/common.py:162 #, python-format -msgid "Unable to use global role %(role_id)s" +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:861 +#: cinder/api/extensions.py:197 #, python-format -msgid "Key pair %(key_name)s already exists." +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:865 +#: cinder/api/extensions.py:235 #, python-format -msgid "User %(user)s already exists." +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:869 +#: cinder/api/extensions.py:236 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:873 +#: cinder/api/extensions.py:237 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:877 +#: cinder/api/extensions.py:239 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:882 +#: cinder/api/extensions.py:240 #, python-format -msgid "Project %(project)s already exists." +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:886 +#: cinder/api/extensions.py:242 #, python-format -msgid "Instance %(name)s already exists." +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:890 +#: cinder/api/extensions.py:256 #, python-format -msgid "Instance Type %(name)s already exists." +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:894 +#: cinder/api/extensions.py:262 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:898 +#: cinder/api/extensions.py:276 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:906 +#: cinder/api/extensions.py:287 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:910 +#: cinder/api/extensions.py:356 #, python-format -msgid "Could not find config at %(path)s" +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/extensions.py:381 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:938 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/contrib/backups.py:140 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "delete called for member %s" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Delete backup with id: %s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" msgstr "" -#: cinder/exception.py:958 -#, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/exception.py:963 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/exception.py:967 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/exception.py:971 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/exception.py:975 -#, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/exception.py:980 -#, python-format -msgid "Host %(host)s already member of another aggregate." +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/exception.py:984 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Host '%s' could not be found." msgstr "" -#: cinder/exception.py:988 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "Invalid status: '%s'" msgstr "" -#: cinder/exception.py:992 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/exception.py:1005 -#, python-format -msgid "Error in SolidFire API response: status=%(status)s" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/exception.py:1009 -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/exception.py:1013 -#, python-format -msgid "Detected existing vlan with id %(vlan)d" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/exception.py:1017 -#, python-format -msgid "Instance %(instance_id)s could not be found." +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/exception.py:1025 -#, python-format -msgid "Could not fetch image %(image)s" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/log.py:315 -#, python-format -msgid "syslog facility must be one of: %s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/manager.py:146 -#, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/service.py:177 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -#: cinder/service.py:195 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/service.py:340 -msgid "model server went away" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/service.py:440 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/utils.py:534 -#, python-format -msgid "Link Local address is not found.:%s" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/utils.py:537 -#, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/utils.py:648 -#, python-format -msgid "Invalid backend: %s" +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/utils.py:659 +#: cinder/api/contrib/volume_transfer.py:147 #, python-format -msgid "backend %s" +msgid "Creating new volume transfer %s" msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" msgstr "" -#: cinder/utils.py:927 +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/utils.py:931 +#: cinder/api/contrib/volume_transfer.py:196 #, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgid "Accepting transfer %s" msgstr "" -#: cinder/utils.py:935 +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/utils.py:942 -#, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/utils.py:1028 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgid "Valid control location are: %s" msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1169 -#, python-format -msgid "Invalid server_string: %s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/utils.py:1298 -#, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/utils.py:1461 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +msgid "Caught error: %s" msgstr "" -#: cinder/utils.py:1463 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Class %(fullname)s is deprecated" +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/utils.py:1495 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/wsgi.py:97 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +msgid "Extended resource: %s" msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/wsgi.py:117 -#, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/api/direct.py:299 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "Returned non-serializeable type: %s" +msgid "Exception handling resource: %s" msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" msgstr "" -#: cinder/api/validator.py:142 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:95 -#, python-format -msgid "FaultWrapper: %s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:180 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "Authentication Failure: %s" +msgid "%(url)s returned a fault: %(e)s" msgstr "" -#: cinder/api/ec2/__init__.py:404 -#, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/__init__.py:435 -#, python-format -msgid "action: %s" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/__init__.py:437 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/__init__.py:584 -#, python-format -msgid "InstanceNotFound raised: %s" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" msgstr "" -#: cinder/api/ec2/__init__.py:590 -#, python-format -msgid "VolumeNotFound raised: %s" +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/__init__.py:596 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "SnapshotNotFound raised: %s" +msgid "Delete snapshot with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:602 -#, python-format -msgid "NotFound raised: %s" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" msgstr "" -#: cinder/api/ec2/__init__.py:605 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "EC2APIError raised: %s" +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "KeyPairExists raised: %s" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/__init__.py:617 -#, python-format -msgid "InvalidParameterValue raised: %s" +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" msgstr "" -#: cinder/api/ec2/__init__.py:621 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "InvalidPortRange raised: %s" +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "NotAuthorized raised: %s" +msgid "Delete volume with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/__init__.py:633 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "QuotaError raised: %s" +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:637 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:646 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "Unexpected error raised: %s" +msgid "Create volume of %s GB" msgstr "" -#: cinder/api/ec2/__init__.py:647 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "Environment: %s" +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/apirequest.py:64 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/cloud.py:336 -#, python-format -msgid "Create snapshot of volume %s" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:372 -#, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" msgstr "" -#: cinder/api/ec2/cloud.py:378 -#, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/ec2/cloud.py:382 -#, python-format -msgid "Create key pair %s" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" msgstr "" -#: cinder/api/ec2/cloud.py:391 -#, python-format -msgid "Import key %s" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Delete key pair %s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 -#, python-format -msgid "Revoke security group ingress %s" +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, python-format -msgid "%s Not enough parameters to build a valid rule" +#: cinder/backup/api.py:140 +msgid "Backup status must be available" msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/backup/api.py:154 #, python-format -msgid "Authorize security group ingress %s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:725 -#, python-format -msgid "%s - This rule already exists in group" +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" msgstr "" -#: cinder/api/ec2/cloud.py:769 +#: cinder/backup/api.py:176 #, python-format msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/backup/api.py:181 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/backup/manager.py:100 #, python-format -msgid "group %s already exists" +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/backup/manager.py:107 #, python-format -msgid "Delete security group %s" +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#: cinder/backup/manager.py:117 #, python-format -msgid "Get console output for instance %s" +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/ec2/cloud.py:894 +#: cinder/backup/manager.py:123 #, python-format -msgid "Create volume from snapshot %s" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/backup/manager.py:129 #, python-format -msgid "Create volume of %s GB" +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/backup/manager.py:154 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "Registering default backend %s." msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/backup/manager.py:165 #, python-format -msgid "Detach volume %s" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/ec2/cloud.py:959 -msgid "Detach Volume Failed." +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/backup/manager.py:189 #, python-format -msgid "attribute not supported: %s" +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/backup/manager.py:194 #, python-format -msgid "vol = %s\n" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/ec2/cloud.py:1267 +#: cinder/backup/manager.py:212 #, python-format -msgid "Release address %s" +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/manager.py:217 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/manager.py:225 #, python-format -msgid "Disassociate address %s" +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/manager.py:282 #, python-format -msgid "Reboot instance %r" +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/manager.py:310 #, python-format -msgid "De-registering image %s" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/manager.py:360 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/manager.py:422 #, python-format -msgid "Updating image %s publicity" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/ec2/cloud.py:1555 -#, python-format -msgid "Not allowed to modify attributes for image %s" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/drivers/ceph.py:147 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "invalid user '%s'" msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 -#, python-format -msgid "Failed to get metadata for ip: %s" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "Caught error: %s" +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/drivers/ceph.py:250 #, python-format -msgid "Extended resource: %s" +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/drivers/ceph.py:361 #, python-format -msgid "Could not find %s in request." +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "Successfully authenticated '%s'" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "marker [%s] not found" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "href %s does not contain version" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "Converting nw_info: %s" +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/drivers/ceph.py:528 #, python-format -msgid "Converted networks: %s" +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/drivers/ceph.py:543 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:555 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/extensions.py:188 -#, python-format -msgid "Loaded extension: %s" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "Ext name: %s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:226 -#, python-format -msgid "Ext alias: %s" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "Ext description: %s" +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/drivers/ceph.py:697 #, python-format -msgid "Ext namespace: %s" +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/drivers/ceph.py:704 #, python-format -msgid "Ext updated: %s" +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/ceph.py:708 #, python-format -msgid "Exception loading extension: %s" +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/drivers/ceph.py:713 #, python-format -msgid "Loading extension %s" +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:252 -#, python-format -msgid "Calling extension factory %s" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:344 -#, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "Exception handling resource: %s" +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/ceph.py:916 #, python-format -msgid "Fault thrown: %s" +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/wsgi.py:589 -#, python-format -msgid "HTTP exception thrown: %s" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:1005 #, python-format -msgid "There is no such action: %s" +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:1039 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/swift.py:209 #, python-format msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/swift.py:234 #, python-format -msgid "Invalid server status: %(status)s" +msgid "volume size %d is invalid." msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" -msgstr "" - -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" -msgstr "" - -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Bad personality format: missing %s" +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/swift.py:304 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 -#, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:345 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/swift.py:350 #, python-format -msgid "Bad network format: missing %s" +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/swift.py:455 #, python-format -msgid "Error in confirm-resize %s" +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "Error in revert-resize %s" +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/tsm.py:143 #, python-format -msgid "Error in reboot %s" +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/brick/exception.py:93 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/brick/exception.py:97 #, python-format -msgid "Compute.api::pause %s" +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/brick/exception.py:101 #, python-format -msgid "Compute.api::unpause %s" +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/brick/exception.py:105 #, python-format -msgid "compute.api::suspend %s" +msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/brick/exception.py:109 #, python-format -msgid "compute.api::resume %s" +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/brick/exception.py:113 #, python-format -msgid "Error in migrate %s" +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/brick/exception.py:117 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/brick/initiator/connector.py:127 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Compute.api::lock %s" +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/brick/initiator/connector.py:229 #, python-format -msgid "Compute.api::unlock %s" +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "createBackup entity requires %s attribute" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 -#, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "Aggregates does not have %s action" +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/brick/initiator/connector.py:858 msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "No more floating ips in pool %s." -msgstr "" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Invalid status: '%s'" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Invalid mode: '%s'" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/iscsi/iscsi.py:280 #, python-format -msgid "Invalid update setting: '%s'" +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Key pair '%s' already exists." +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Unable to find address %r" +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "Network does not have %s action" +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "Disassociating network with id %s" +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 -#, python-format -msgid "Showing network with id %s" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/local_dev/lvm.py:370 #, python-format -msgid "Deleting network with id %s" +msgid "Unable to find VG: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 -#, python-format -msgid "Security group %s already exists" +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 -#, python-format -msgid "Security group %s is not a string or unicode" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Security group %s cannot be empty." +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 -#, python-format -msgid "Security group %s should not be greater than 255 characters." +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 -#, python-format -msgid "Security group (%s) not found" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "This rule already exists in group %s" +msgid "Already mounted: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 -#, python-format -msgid "Rule (%s) not found" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 -#, python-format -msgid "start instance %r" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "stop instance %r" +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "vol=%s" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Delete volume with id: %s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "Delete snapshot with id: %s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "Create snapshot from volume %s" +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/auth/ldapdriver.py:650 -#, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/auth/manager.py:302 -#, python-format -msgid "Failed authorization for access key %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Using project name = user name (%s)" +msgid "Table |%s| not created!" msgstr "" -#: cinder/auth/manager.py:315 -#, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/auth/manager.py:324 -#, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 -#, python-format -msgid "expected_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:353 -#, python-format -msgid "host_only_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgid "Table |%s| not created" msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +msgid "Exception while dropping table %s." msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +msgid "Exception while creating table %s." msgstr "" -#: cinder/auth/manager.py:613 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "modifying project %s" +msgid "Column |%s| not created!" msgstr "" -#: cinder/auth/manager.py:625 -#, python-format -msgid "Adding user %(uid)s to project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/auth/manager.py:676 -#, python-format -msgid "Deleting project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/auth/manager.py:753 -#, python-format -msgid "Access Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/auth/manager.py:755 -#, python-format -msgid "Secret Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -msgid "Instance type for vpn instances" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Launching VPN for %s" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/compute/api.py:144 +#: cinder/image/image_utils.py:101 #, python-format -msgid "Unable to find host for Instance %s" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/api.py:192 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/api.py:203 +#: cinder/image/image_utils.py:157 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:259 +#: cinder/image/image_utils.py:178 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/api.py:261 +#: cinder/image/image_utils.py:206 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/compute/api.py:312 +#: cinder/image/image_utils.py:260 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/api.py:383 -#, python-format -msgid "Going to run %s instances..." +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/compute/api.py:447 -#, python-format -msgid "bdm %s" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "block_device_mapping %s" +msgid "Not deleting key %s" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/compute/api.py:871 -msgid "Going to try to soft delete instance" +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/api.py:939 -msgid "Going to try to terminate instance" +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/api.py:977 -msgid "Going to try to stop instance" +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/api.py:996 -msgid "Going to try to start instance" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/api.py:1000 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "Searching by: %s" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "Image type not recognized %s" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1377 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/openstack/common/log.py:301 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "Deprecated: %s" msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "DB error: %s" +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/openstack/common/log.py:623 #, python-format -msgid "Instance type %s not found for deletion" +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/compute/manager.py:140 -#, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/compute/manager.py:144 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/manager.py:146 -#, python-format -msgid "check_instance_lock: admin: |%s|" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Setting up bdm %s" +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "Running cmd (subprocess): %s" msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "Instance %s not found." +msgid "Result was %s" msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Running cmd (SSH): %s" msgstr "" -#: cinder/compute/manager.py:528 -#, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:565 -#, python-format -msgid "Instance network_info: |%s|" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/service.py:337 #, python-format -msgid "%(action_str)s instance" +msgid "Starting %d workers" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "terminating bdm %s" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "Rebuilding instance %s" +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "Rebooting instance %s" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "instance %s: snapshotting" +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/versionutils.py:69 #, python-format msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:1001 -#, python-format -msgid "Rotating out %d backups" +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "Deleting image %s" +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "Failed to set admin password. Instance %s is not running" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1041 -#, python-format -msgid "Instance %s: Root password set" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:1079 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "SQL connection failed. %s attempts left." msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/notifier/api.py:145 #, python-format msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "instance %s: rescuing" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format -msgid "instance %s: unrescuing" +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "instance %s: migrating" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "instance %s: pausing" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "instance %s: unpausing" +msgid "unpacked context: %s" msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "instance %s: suspending" +msgid "received %s" msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "instance %s: resuming" +msgid "no method for message: %s" msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "instance %s: locking" +msgid "No method for message: %s" msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "instance %s: unlocking" +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/compute/manager.py:1596 -#, python-format -msgid "instance %s: getting locked state" +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "instance %s: reset network" +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "instance %s: inject network info" +msgid "MSG_ID is %s" msgstr "" -#: cinder/compute/manager.py:1618 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "network_info to inject: |%s|" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:1655 -#, python-format -msgid "instance %s: getting vnc console" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." msgstr "" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:1714 -#, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:1752 -#, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/rpc/common.py:151 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/common.py:156 #, python-format msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." -msgstr "" - -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "Returning exception %s to caller" msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." -msgstr "" - -#: cinder/compute/manager.py:2075 msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +msgid "Deserializing: %s" msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" msgstr "" -#: cinder/console/manager.py:97 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/console/vmrc_manager.py:122 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/impl_zmq.py:267 #, python-format -msgid "Removing console %(console_id)s." +msgid "Running func with context: %s" msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/console/xvp.py:116 -#, python-format -msgid "Re-wrote %s" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "Error starting xvp: %s" +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/openstack/common/rpc/impl_zmq.py:506 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 -#, python-format -msgid "No ComputeNode for %(host)s" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 -#, python-format -msgid "No backend config with id %(sm_backend_id)s" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "No sm_flavor called %(sm_flavor)s" +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 -#, python-format -msgid "No sm_volume with id %(volume_id)s" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 -#, python-format -msgid "SQL connection failed. %s attempts left." +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#: cinder/openstack/common/rpc/impl_zmq.py:681 #, python-format -msgid "Table |%s| not created!" +msgid "Received message: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 -#, python-format -msgid "join list for moving mac_addresses |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." -msgstr "" - -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +msgid "topic is %s." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/image/glance.py:281 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/image/glance.py:289 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/image/glance.py:410 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/image/s3.py:309 -#, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/image/s3.py:328 -#, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/image/s3.py:379 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/scheduler/filter_scheduler.py:114 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "Filtered %s" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "Unknown chain: %r" +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/linux_net.py:694 -#, python-format -msgid "Hupping dnsmasq threw %s" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/linux_net.py:696 -#, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "killing radvd threw %s" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "Pid %d is stale, relaunching radvd" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Starting VLAN inteface %s" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Starting Bridge interface for %s" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/network/linux_net.py:1142 -#, python-format -msgid "Starting bridge %s " +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/network/linux_net.py:1149 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format -msgid "Done starting bridge %s" +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/network/linux_net.py:1167 -#, python-format -msgid "Failed unplugging gateway interface '%s'" +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/network/linux_net.py:1170 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Unplugged gateway interface '%s'" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/network/manager.py:291 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "Interface %(interface)s not found" +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/network/manager.py:315 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "floating IP allocation for instance |%s|" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/network/manager.py:353 -#, python-format -msgid "floating IP deallocation for instance |%s|" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/network/manager.py:402 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "Faked command matched %s" msgstr "" -#: cinder/network/manager.py:614 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/manager.py:660 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/manager.py:778 +#: cinder/tests/test_netapp_nfs.py:360 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/manager.py:896 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "network allocations for instance |%s|" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/manager.py:901 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "network deallocation for instance |%s|" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Given data: %s" msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Leased IP |%(address)s|" +msgid "Result data: %s" msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "IP %s leased that is not associated" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/manager.py:1256 -#, python-format -msgid "IP |%s| leased that isn't allocated" +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/tests/integrated/test_login.py:29 #, python-format -msgid "Released IP |%(address)s|" +msgid "volume: %s" msgstr "" -#: cinder/network/manager.py:1265 +#: cinder/tests/integrated/api/client.py:32 #, python-format -msgid "IP %s released that is not associated" +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/manager.py:1268 -#, python-format -msgid "IP %s released that was not leased" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +msgid "Body: %s" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +msgid "Decoding JSON: %s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/network/quantum/client.py:180 +#: cinder/transfer/api.py:119 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "Failed to create transfer record for %s" msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/transfer/api.py:136 #, python-format -msgid "Quantum entity not found: %s" +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/transfer/api.py:182 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/transfer/api.py:199 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Volume %s has been transferred." msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" msgstr "" -#: cinder/network/quantum/manager.py:204 -#, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/network/quantum/manager.py:301 -#, python-format -msgid "network allocations for instance %s" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/network/quantum/manager.py:588 -#, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" msgstr "" -#: cinder/network/quantum/manager.py:606 -#, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 -#, python-format -msgid "Server returned error: %s" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/api.py:502 #, python-format msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 -#, python-format -msgid "No network with net_id = %s" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 -#, python-format -msgid "No fixed IPs to deallocate for vif %s" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 -#, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 -#, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/api.py:757 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/notifier/list_notifier.py:65 -#, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/volume/api.py:797 #, python-format -msgid "Returning exception %s to caller" +msgid "No available service named %s" msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/rpc/amqp.py:231 -#, python-format -msgid "received %s" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." msgstr "" -#: cinder/rpc/amqp.py:236 -#, python-format -msgid "no method for message: %s" +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." msgstr "" -#: cinder/rpc/amqp.py:237 +#: cinder/volume/api.py:842 #, python-format -msgid "No method for message: %s" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" -#: cinder/rpc/amqp.py:321 -#, python-format -msgid "Making asynchronous call on %s ..." +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/volume/api.py:862 #, python-format -msgid "MSG_ID is %s" +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/api.py:868 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/rpc/amqp.py:379 +#: cinder/volume/api.py:887 #, python-format -msgid "Sending notification on %s..." +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/api.py:900 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/driver.py:282 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/driver.py:327 #, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/driver.py:340 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/driver.py:358 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/volume/driver.py:394 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/driver.py:433 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/driver.py:451 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/rpc/impl_qpid.py:346 -#, python-format -msgid "Connected to AMQP server on %s" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/scheduler/driver.py:80 -#, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/scheduler/driver.py:89 -#, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/manager.py:203 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/manager.py:264 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/manager.py:271 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgid "volume %s: skipping export" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/manager.py:273 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 -#, python-format -msgid "No host selection for %s defined." +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/manager.py:286 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "Filtered %(hosts)s" +msgid "volume %s: deleting" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 -#, python-format -msgid "Weighted %(weighted_host)s" +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/manager.py:389 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "volume %s: removing export" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/manager.py:394 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/manager.py:430 #, python-format -msgid "Host filter passes for %(host)s" +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/manager.py:434 #, python-format -msgid "Received %(service_name)s service update from %(host)s." +msgid "volume %s: deleted successfully" msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/manager.py:462 #, python-format -msgid "No service for compute ID %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/manager.py:490 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/manager.py:496 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "snapshot %s: deleting" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/manager.py:526 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/manager.py:559 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/tests/fake_utils.py:72 -#, python-format -msgid "Faking execution of cmd (subprocess): %s" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/manager.py:698 #, python-format -msgid "Faked command matched %s" +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:760 #, python-format -msgid "Faked command raised an exception %s" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:807 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:880 #, python-format -msgid "Running instances: %s" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/manager.py:909 #, python-format -msgid "After terminating instances: %s" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:940 #, python-format -msgid "After force-killing instances: %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:976 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Target %s allocated" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/tests/test_volume_types.py:58 -#, python-format -msgid "Given data: %s" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/tests/test_volume_types.py:59 -#, python-format -msgid "Result data: %s" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "DB error: %s" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 -#, python-format -msgid "Quota exceeded: code=%(code)s" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "_create: %s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "_delete: %s" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "_get: %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "_get_all: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "Failed to disassociate qos specs %s." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 -#, python-format -msgid "test_snapshot_create: resp_dict=%s" +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/utils.py:144 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "flavor: %s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" - -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Creating clone of volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:107 -#, python-format -msgid "Body: %s" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:125 -#, python-format -msgid "%(auth_uri)s => code %(http_status)s" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "Decoding JSON: %s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/tests/rpc/common.py:133 -#, python-format -msgid "Nested received %(queue)s, %(value)s" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/tests/rpc/common.py:142 -#, python-format -msgid "Nested return %s" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Received %s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "Compute_service record created for %s " +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "Compute_service record updated for %s " +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Filters added to instance %s" +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Adding security group rule: %r" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Adding provider rule: %s" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Error running SSH command: %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" -msgstr "" - -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" -msgstr "" - -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "Got exception: %s" +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/eqlx.py:415 #, python-format -msgid "No such domain (%s)" +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Failed power down Bare-metal node %s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/glusterfs.py:86 #, python-format -msgid "No such domain %s" +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/glusterfs.py:91 #, python-format -msgid "Domains: %s" +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/dom.py:163 -#, python-format -msgid "Nodes: %s" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "After storing domains: %s" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Created new domain: %s" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/baremetal/dom.py:226 +#: cinder/volume/drivers/glusterfs.py:403 #, python-format -msgid "change_domain_state: to new state %s" +msgid "nova call result: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:233 -#, python-format -msgid "Stored fake domains to the file: %s" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/glusterfs.py:431 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/glusterfs.py:457 #, python-format -msgid "instance %s: rebooted" +msgid "volume id: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "instance %s: rescued" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "instance %s: is building" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "instance %s: booted" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/glusterfs.py:690 #, python-format -msgid "instance %s spawned successfully" +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/glusterfs.py:701 #, python-format -msgid "instance %s:not booted" +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "instance %s: Creating image" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "instance %s: starting toXML method" +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "instance %s: finished toXML method" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 -msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:216 +#: cinder/volume/drivers/gpfs.py:97 #, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/gpfs.py:160 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/gpfs.py:169 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "unknown disk image handler: %s" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "Failed to remove container: %s" +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "User %(username)s not found in password file." +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "unsupported partition: %s" +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Failed to map partitions: %s" +msgid "%s" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/nfs.py:263 #, python-format -msgid "qemu-nbd error: %s" +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "nbd device %s did not show up" +msgid " but size is now %d" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/nfs.py:361 #, python-format -msgid "Connecting to libvirt: %s" +msgid "%s is already mounted" msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Deleting instance files %(target)s" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -msgid "Instance soft rebooted successfully." +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Automatically confirming migration %d" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -msgid "Guest does not have a console available" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "block_device_list %s" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/rbd.py:625 #, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "'' must be 1, but %d\n" +msgid "connection data: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 -#, python-format -msgid "topology (%(topology)s) must have %(ks)s" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 -#, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 +#: cinder/volume/drivers/rbd.py:724 #, python-format -msgid "Timeout migrating for %s. nwfilter not found." +msgid "not cloneable: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 -#, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/drivers/rbd.py:747 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +msgid "Unable to open image %(loc)s: %(err)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 -#, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "%s is a valid instance name" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "%s has a disk file" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Base file too young to remove: %s" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Removing base file: %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "API response: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Unknown base file: %s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Active base files: %s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/solidfire.py:569 #, python-format -msgid "Corrupt base files: %s" +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Removable base files: %s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/libvirt/vif.py:90 -#, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/libvirt/vif.py:99 -#, python-format -msgid "Ensuring bridge %s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 -#, python-format -msgid "Failed while unplugging vif of instance '%s'" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/libvirt/volume.py:166 -#, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/zadara.py:357 #, python-format -msgid "%(text)s: _db_content => %(content)s" +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Glance image %s is in killed state" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 +#: cinder/volume/drivers/emc/emc_smis_common.py:40 msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 -#, python-format -msgid "Creating Port Group with name %s on the ESX host" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "Create Volume: %(volume)s Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" -msgstr "" - -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "httplib error in %s: " +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Socket error in %s: " +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "Type error in %s: " +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Exception in %s " +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Got total of %s instances" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 -#, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Powered on the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Uploading image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Uploaded image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format -msgid "Deleted temporary vmdk file %s" +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Did hard reboot of VM %s" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "instance - %s not present" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Powering off the VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Powered off the VM %s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 -#, python-format -msgid "Unregistering the VM %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 #, python-format -msgid "Unregistered the VM %s" +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Suspending the VM %s " +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Suspended the VM %s " +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "Resuming the VM %s" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "Resumed the VM %s " +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 #, python-format -msgid "Creating directory with path %s" +msgid "Create export: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "Created directory with path %s" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Downloading image %s from glance image server" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:674 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Error mapping volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:678 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "ExposePaths for volume %s completed successfully." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Calling %(localname)s %(impl)s" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/emc/emc_smis_common.py:744 #, python-format -msgid "Calling getter %s" +msgid "AddMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:775 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "RemoveMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Found no network for bridge %s" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:146 -#, python-format -msgid "Unable to join %(host)s in the pool" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, python-format -msgid "Unable to set up pool: %(e)s." +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/pool.py:185 -#, python-format -msgid "Pool-set_name_label failed: %(e)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "Found no PIF for device %s" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" msgstr "" -#: cinder/virt/xenapi/vif.py:122 -#, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "VBD not found in instance %s" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "VBD %s already detached" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "Snapshotting with label '%(label)s'" +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "Created snapshot %(template_vm_ref)s" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format -msgid "download_vhd failed: %r" +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" +msgid "Add target WWN: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Cannot find device number for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "Found iSCSI endpoint: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "VDI %s is still available" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/hds/hds.py:92 #, python-format -msgid "Re-scanning SR %s" +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/hds/hds.py:289 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "LUN %s is deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "Plugging VBD %s ... " +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Plugging VBD %s done." +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 -#, python-format -msgid "Destroying VBD for VDI %s ... " +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Destroying VBD for VDI %s done." +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Running pygrub against %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Found Xen kernel %s" +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "Writing partition table %s done." +msgid "Bad response from server: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 -#, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -msgid "Starting instance" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Injecting file path: '%s'" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" -msgstr "" - -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/rest_common.py:1124 #, python-format -msgid "Instance agent version: %s" +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Updating Agent to %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:686 +#: cinder/volume/drivers/huawei/rest_common.py:1256 #, python-format -msgid "Unable to Snapshot instance: %(exc)s" +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 -#, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/ssh_common.py:792 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 +#: cinder/volume/drivers/huawei/ssh_common.py:933 #, python-format -msgid "Creating VIF for network %(network_ref)s" +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/huawei/ssh_common.py:1079 #, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 -#, python-format -msgid "OpenSSL error: %s" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "type is = %s" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "name = %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Failed getting details for pool %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 -#, python-format -msgid "Forgetting SR %s..." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Introducing %s..." +msgid "%s is not set" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 -#, python-format -msgid "Unable to find SR from VBD %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 -#, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "ensure_export: Volume %s not found on storage" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 -#, python-format -msgid "Unable to get record of VDI %s on" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 -#, python-format -msgid "Error finding vdis in SR %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 #, python-format -msgid "Creating SR %s" +msgid "initialize_connection: Did not find a preferred node for volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "Introducing SR %s" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "Checking for SR %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 -#, python-format -msgid "SR %s not found in the xapi database" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 -#, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "Unable to locate volume %s" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 -#, python-format -msgid "Unable to detach volume %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 -#, python-format -msgid "Unable to destroy vbd %s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "Error purging SR %s" +msgid "Could not find key in output of command %(cmd)s: %(out)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Error in handshake: %s" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "Invalid request: %s" +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "Request: %s" +msgid "Failed to find host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "Request made with missing token: %s" +msgid "enter: get_host_from_connector: %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "Request made with invalid token: %s" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Unexpected error: %s" +msgid "enter: create_host: host %s" msgstr "" -#: cinder/vnc/xvp_proxy.py:180 -#, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/volume/api.py:137 -msgid "Volume status must be available or error" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/windows/windows_utils.py:105 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/api.py:325 -msgid "Volume Snapshot status must be available or error" +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/windows/windows_utils.py:193 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/windows/windows_utils.py:208 #, python-format -msgid "volume group %s doesn't exist" +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/windows/windows_utils.py:223 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/windows/windows_utils.py:240 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/windows/windows_utils.py:255 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/windows/windows_utils.py:290 #, python-format -msgid "Could not find iSCSI export for volume %s" +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/flows/common.py:52 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "Restoring source %(source_volid)s status to %(status)s" msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/flows/common.py:58 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/flows/common.py:83 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 #, python-format -msgid "rbd has no pool %s" +msgid "Failed updating volume %(volume_id)s with %(update)s" msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/flows/api/create_volume.py:81 #, python-format -msgid "Sheepdog is not working: %s" +msgid "Originating snapshot status must be one of %s values" msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/flows/api/create_volume.py:126 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/flows/api/create_volume.py:135 #, python-format -msgid "Re-exporting %s volumes" +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/flows/api/create_volume.py:144 #, python-format -msgid "volume %s: skipping export" +msgid "Volume size %(size)s must be an integer and greater than 0" msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/flows/api/create_volume.py:186 #, python-format -msgid "volume %s: creating" +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/flows/api/create_volume.py:212 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "Metadata property key %s greater than 255 characters" msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/flows/api/create_volume.py:217 #, python-format -msgid "volume %s: creating export" +msgid "Metadata property key %s value greater than 255 characters" msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/flows/api/create_volume.py:254 #, python-format -msgid "volume %s: created successfully" +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/flows/api/create_volume.py:463 #, python-format -msgid "volume %s: removing export" +msgid "Failed destroying volume entry %s" msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/flows/api/create_volume.py:546 #, python-format -msgid "volume %s: deleting" +msgid "Failed rolling back quota for %s reservations" msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/flows/api/create_volume.py:590 #, python-format -msgid "volume %s: volume is busy" +msgid "Failed to update quota for deleting volume: %s" msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 #, python-format -msgid "volume %s: deleted successfully" +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/flows/manager/create_volume.py:105 #, python-format -msgid "snapshot %s: creating" +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/flows/manager/create_volume.py:124 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "Volume %s: re-scheduled" msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/flows/manager/create_volume.py:141 #, python-format -msgid "snapshot %s: created successfully" +msgid "Updating volume %(volume_id)s with %(update)s." msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/flows/manager/create_volume.py:146 #, python-format -msgid "snapshot %s: deleting" +msgid "Volume %s: resetting 'creating' status failed." msgstr "" -#: cinder/volume/manager.py:214 +#: cinder/volume/flows/manager/create_volume.py:165 #, python-format -msgid "snapshot %s: snapshot is busy" +msgid "Volume %s: rescheduling failed" msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/flows/manager/create_volume.py:308 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/flows/manager/create_volume.py:347 #, python-format -msgid "New capabilities found: %s" +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/flows/manager/create_volume.py:418 #, python-format -msgid "Notification {%s} received" +msgid "Marking volume %s as bootable." msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/flows/manager/create_volume.py:421 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "Failed updating volume %(volume_id)s bootable flag to true" msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/flows/manager/create_volume.py:448 #, python-format -msgid "%s is not set" +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." msgstr "" -#: cinder/volume/netapp.py:128 -msgid "Connected to DFM server" +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/flows/manager/create_volume.py:588 #, python-format -msgid "Job failed: %s" +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#: cinder/volume/flows/manager/create_volume.py:680 #, python-format -msgid "Failed to find LUN ID for volume %s" +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" -msgstr "" +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, python-format -msgid "No LUN ID for volume %s" -msgstr "" +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "" +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:614 -#, python-format -msgid "Failed to get host details for host ID %s" -msgstr "" +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:620 -#, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "" +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:625 -#, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "" +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#~ msgid "Connection to glance failed" +#~ msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Invalid snapshot" +#~ msgstr "" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Invalid input received" +#~ msgstr "" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid volume type" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, python-format -msgid "Got response: %s" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "The request is invalid." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "Reconnected to queue" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "response %s" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "topic is %s" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "message %s" +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "Publishing to route %s" +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "Declaring queue %s" +#~ msgid "zfs send/recv done, new volume %s created" #~ msgstr "" -#~ msgid "Declaring exchange %s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgid "rbd export-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "Created VM %s..." +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Can't find lun or lun goup in array" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " +#~ msgid "migrate_volume: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "_wait_child %d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "wait wrap.failed %s" #~ msgstr "" #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "volume %s mapping to multi host" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" #~ msgstr "" -#~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Volume status must be available" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/tr_TR/LC_MESSAGES/cinder.po b/cinder/locale/tr_TR/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..a6cb0a7a1a --- /dev/null +++ b/cinder/locale/tr_TR/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Turkish (Turkey) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Turkish (Turkey) " +"(http://www.transifex.com/projects/p/openstack/language/tr_TR/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/uk/LC_MESSAGES/cinder.po b/cinder/locale/uk/LC_MESSAGES/cinder.po index ab3fffddb6..73f214758d 100644 --- a/cinder/locale/uk/LC_MESSAGES/cinder.po +++ b/cinder/locale/uk/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2011-08-23 11:21+0000\n" "Last-Translator: Thierry Carrez \n" "Language-Team: Ukrainian \n" @@ -16,8184 +16,10722 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." msgstr "" -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Ім'я файлу секретного ключа" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "Шлях до збережених ключів" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:67 +#: cinder/exception.py:116 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:72 +#: cinder/exception.py:120 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." msgstr "" -#: cinder/crypto.py:292 +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 #, python-format -msgid "Flags path: %s" +msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "Неочікувана помилка при виконанні команди." +#: cinder/exception.py:137 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "без порядку для повідомлень: %s" -#: cinder/exception.py:59 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +msgid "Invalid snapshot: %(reason)s" msgstr "" -#: cinder/exception.py:94 -msgid "DB exception wrapped." +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:155 -msgid "An unknown exception occurred." +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" +#: cinder/exception.py:167 +msgid "The results are invalid." msgstr "" -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" msgstr "" -#: cinder/exception.py:195 -msgid "Connection to glance failed" +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:199 -msgid "Connection to melange failed" +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:203 -msgid "Not authorized." +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" msgstr "" -#: cinder/exception.py:208 -msgid "User does not have admin privileges" +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:212 +#: cinder/exception.py:201 #, python-format -msgid "Policy doesn't allow %(action)s to be performed." +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:216 -#, fuzzy, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "без порядку для повідомлень: %s" +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" -#: cinder/exception.py:220 -msgid "Unacceptable parameters." +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:225 -msgid "Invalid snapshot" +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:218 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" +msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:237 -msgid "Failed to load data into json format" +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:245 +#: cinder/exception.py:242 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:249 -msgid "Invalid input received" +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:253 +#: cinder/exception.py:250 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:264 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" #: cinder/exception.py:269 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:274 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:277 +#: cinder/exception.py:278 #, python-format -msgid "Invalid cidr %(cidr)s." +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:291 #, python-format -msgid "%(err)s" +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:295 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:299 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:305 +#: cinder/exception.py:303 #, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:307 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:311 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:315 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:334 -msgid "Failed to terminate instance" +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:348 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:373 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "Malformed message body: %(reason)s" msgstr "" #: cinder/exception.py:377 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "Could not find config at %(path)s" msgstr "" #: cinder/exception.py:381 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "Could not find parameter %(param)s" msgstr "" #: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" #: cinder/exception.py:389 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:398 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:402 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:409 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:415 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:419 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:423 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." -msgstr "" - -#: cinder/exception.py:422 -msgid "Resource could not be found." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" #: cinder/exception.py:427 #, python-format -msgid "Required flag %(flag)s not set." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:432 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:436 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" #: cinder/exception.py:440 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" #: cinder/exception.py:444 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" #: cinder/exception.py:449 -msgid "Zero volume types found." +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" #: cinder/exception.py:453 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Failed to export for volume: %(reason)s" msgstr "" #: cinder/exception.py:457 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:461 #, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:465 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:469 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Failed to copy image to volume: %(reason)s" msgstr "" -#: cinder/exception.py:475 -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:480 -#, python-format -msgid "No target id found for volume %(volume_id)s." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:484 -#, python-format -msgid "No disk at %(location)s" +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:485 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Invalid backup: %(reason)s" msgstr "" -#: cinder/exception.py:496 -msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" msgstr "" #: cinder/exception.py:501 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Transfer %(transfer_id)s could not be found." msgstr "" #: cinder/exception.py:505 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" #: cinder/exception.py:509 #, python-format -msgid "User %(user_id)s could not be found." +msgid "SSH command injection detected: %(command)s" msgstr "" #: cinder/exception.py:513 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" #: cinder/exception.py:517 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:522 #, python-format -msgid "Role %(role_id)s could not be found." -msgstr "" - -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:529 +#: cinder/exception.py:527 #, python-format -msgid "%(req)s is required to create a network." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:531 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:536 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" #: cinder/exception.py:541 #, python-format -msgid "Network could not be found for uuid %(uuid)s" +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:546 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:550 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:557 -#, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:562 -#, python-format -msgid "Host is not set to the network (%(network_id)s)." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:566 -#, python-format -msgid "Network %(network)s has active ports, cannot delete." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:576 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:580 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:584 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:586 -#, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:593 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "Unable to create server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:597 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:599 -#, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:605 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:609 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:620 -#, python-format -msgid "Floating ip not found for id %(id)s." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:626 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:630 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:636 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:641 +msgid "Unknown NFS exception" msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:644 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Floating ip %(address)s is not associated." +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" msgstr "" -#: cinder/exception.py:652 -#, python-format -msgid "Interface %(interface)s not found." -msgstr "" - -#: cinder/exception.py:656 -#, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:660 -#, python-format -msgid "Certificate %(certificate_id)s not found." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:668 -#, python-format -msgid "Host %(host)s could not be found." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:672 +#: cinder/quota.py:105 #, python-format -msgid "Compute host %(host)s could not be found." +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:676 +#: cinder/quota.py:748 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:680 +#: cinder/quota.py:770 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:684 +#: cinder/quota.py:790 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:692 -#, python-format -msgid "Quota for project %(project_id)s could not be found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:696 +#: cinder/quota_utils.py:46 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:700 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:704 +#: cinder/service.py:95 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:709 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Security group with rule %(rule_id)s not found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:713 +#: cinder/service.py:148 #, python-format msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:718 -#, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +#: cinder/service.py:216 +msgid "Service killed that has no database entry" msgstr "" -#: cinder/exception.py:723 -#, python-format -msgid "Migration %(migration_id)s could not be found." +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." msgstr "" -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +#: cinder/service.py:270 +msgid "Recovered model server connection!" msgstr "" -#: cinder/exception.py:732 -#, python-format -msgid "Console pool %(pool_id)s could not be found." +#: cinder/service.py:276 +msgid "model server went away" msgstr "" -#: cinder/exception.py:736 +#: cinder/service.py:298 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:742 -#, python-format -msgid "Console %(console_id)s could not be found." +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" msgstr "" -#: cinder/exception.py:746 +#: cinder/service.py:387 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:750 +#: cinder/utils.py:96 #, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:755 +#: cinder/utils.py:127 #, python-format -msgid "Invalid console type %(console_type)s " +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:763 +#: cinder/utils.py:228 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "Error connecting via ssh: %s" msgstr "" -#: cinder/exception.py:767 +#: cinder/utils.py:412 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "Invalid backend: %s" msgstr "" -#: cinder/exception.py:772 +#: cinder/utils.py:423 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "backend %s" msgstr "" -#: cinder/exception.py:776 +#: cinder/utils.py:698 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:780 +#: cinder/utils.py:759 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:784 +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 #, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgid "Unable to find cert_file : %s" msgstr "" -#: cinder/exception.py:789 +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 #, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +msgid "Unable to find ca_file : %s" msgstr "" -#: cinder/exception.py:793 +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 #, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgid "Unable to find key_file : %s" msgstr "" -#: cinder/exception.py:798 -#, python-format +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:807 +#: cinder/wsgi.py:206 #, python-format -msgid "LDAP user %(user_id)s could not be found." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:811 -#, python-format -msgid "LDAP group %(group_id)s could not be found." +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:815 -#, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:819 -#, python-format -msgid "File %(file_path)s could not be found." +#: cinder/wsgi.py:313 +msgid "You must implement __call__" msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:827 -#, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:832 -#, python-format -msgid "Network adapter %(adapter)s could not be found." +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:844 +#: cinder/api/common.py:162 #, python-format -msgid "Unable to use global role %(role_id)s" +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:861 +#: cinder/api/extensions.py:197 #, python-format -msgid "Key pair %(key_name)s already exists." +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:865 +#: cinder/api/extensions.py:235 #, python-format -msgid "User %(user)s already exists." +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:869 +#: cinder/api/extensions.py:236 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:873 +#: cinder/api/extensions.py:237 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:877 +#: cinder/api/extensions.py:239 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:882 +#: cinder/api/extensions.py:240 #, python-format -msgid "Project %(project)s already exists." +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:886 +#: cinder/api/extensions.py:242 #, python-format -msgid "Instance %(name)s already exists." +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:890 +#: cinder/api/extensions.py:256 #, python-format -msgid "Instance Type %(name)s already exists." +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:894 +#: cinder/api/extensions.py:262 #, python-format -msgid "Volume Type %(name)s already exists." +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:898 +#: cinder/api/extensions.py:276 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:906 +#: cinder/api/extensions.py:287 #, python-format -msgid "Malformed message body: %(reason)s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:910 +#: cinder/api/extensions.py:356 #, python-format -msgid "Could not find config at %(path)s" +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/extensions.py:381 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:938 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/contrib/backups.py:140 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "delete called for member %s" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Delete backup with id: %s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" msgstr "" -#: cinder/exception.py:958 -#, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/exception.py:963 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/exception.py:967 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/exception.py:971 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/exception.py:975 -#, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/exception.py:980 +#: cinder/api/contrib/hosts.py:136 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "Host '%s' could not be found." msgstr "" -#: cinder/exception.py:984 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Invalid status: '%s'" msgstr "" -#: cinder/exception.py:988 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/exception.py:992 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/exception.py:1005 -#, python-format -msgid "Error in SolidFire API response: status=%(status)s" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/exception.py:1009 -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/exception.py:1013 -#, python-format -msgid "Detected existing vlan with id %(vlan)d" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/exception.py:1017 -#, python-format -msgid "Instance %(instance_id)s could not be found." +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/exception.py:1021 -#, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/exception.py:1025 -#, python-format -msgid "Could not fetch image %(image)s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/log.py:315 -#, python-format -msgid "syslog facility must be one of: %s" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/manager.py:146 -#, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/service.py:177 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -#: cinder/service.py:195 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/service.py:340 -msgid "model server went away" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/service.py:440 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "%(flag)s : FLAG SET " +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:291 -#, python-format -msgid "Running cmd (SSH): %s" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/utils.py:534 -#, python-format -msgid "Link Local address is not found.:%s" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/utils.py:537 -#, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/utils.py:648 +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, fuzzy, python-format +msgid "Creating new volume transfer %s" +msgstr "Створити розділ на %s ГБ" + +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "Створити розділ на %s ГБ" + +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "Invalid backend: %s" +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/utils.py:659 +#: cinder/api/contrib/volume_transfer.py:196 +#, fuzzy, python-format +msgid "Accepting transfer %s" +msgstr "Створити розділ на %s ГБ" + +#: cinder/api/contrib/volume_transfer.py:217 #, python-format -msgid "backend %s" +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/utils.py:935 -#, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/utils.py:942 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgid "Valid control location are: %s" msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/utils.py:1138 +#: cinder/api/middleware/fault.py:44 #, python-format -msgid "Expected object of type: %s" +msgid "Caught error: %s" msgstr "" -#: cinder/utils.py:1169 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "Invalid server_string: %s" +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/utils.py:1298 +#: cinder/api/middleware/fault.py:69 #, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/utils.py:1463 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "Class %(fullname)s is deprecated" +msgid "Extended resource: %s" msgstr "" -#: cinder/utils.py:1495 +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/utils.py:1497 +#: cinder/api/openstack/__init__.py:110 #, python-format -msgid "Function %(name)s in %(location)s is deprecated" +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/utils.py:1681 -#, python-format -msgid "Could not remove tmpdir: %s" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/wsgi.py:97 -#, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/wsgi.py:117 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgid "Exception handling resource: %s" msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/direct.py:299 -#, python-format -msgid "Returned non-serializeable type: %s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/validator.py:142 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/__init__.py:95 -#, python-format -msgid "FaultWrapper: %s" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." -msgstr "Занадто багато невдалих аутентифікацій." +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" -#: cinder/api/ec2/__init__.py:180 +#: cinder/api/openstack/wsgi.py:987 #, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/__init__.py:388 -#, python-format -msgid "Authentication Failure: %s" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" msgstr "" -#: cinder/api/ec2/__init__.py:404 -#, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/__init__.py:435 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "action: %s" +msgid "Delete snapshot with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:437 -#, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" msgstr "" -#: cinder/api/ec2/__init__.py:512 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/__init__.py:584 +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 #, python-format -msgid "InstanceNotFound raised: %s" +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/__init__.py:590 -#, python-format -msgid "VolumeNotFound raised: %s" +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" msgstr "" -#: cinder/api/ec2/__init__.py:596 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "SnapshotNotFound raised: %s" +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/__init__.py:602 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "NotFound raised: %s" +msgid "Delete volume with id: %s" msgstr "" -#: cinder/api/ec2/__init__.py:605 -#, python-format -msgid "EC2APIError raised: %s" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "KeyPairExists raised: %s" +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:617 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "InvalidParameterValue raised: %s" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/__init__.py:621 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "InvalidPortRange raised: %s" -msgstr "" +msgid "Create volume of %s GB" +msgstr "Створити розділ на %s ГБ" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "NotAuthorized raised: %s" +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/__init__.py:633 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "QuotaError raised: %s" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/__init__.py:637 -#, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" msgstr "" -#: cinder/api/ec2/__init__.py:646 -#, python-format -msgid "Unexpected error raised: %s" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" msgstr "" -#: cinder/api/ec2/__init__.py:647 -#, python-format -msgid "Environment: %s" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" msgstr "" -#: cinder/api/ec2/apirequest.py:64 -#, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/cloud.py:336 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Create snapshot of volume %s" +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:372 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:378 -#, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" msgstr "" -#: cinder/api/ec2/cloud.py:382 -#, python-format -msgid "Create key pair %s" +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" msgstr "" -#: cinder/api/ec2/cloud.py:391 -#, python-format -msgid "Import key %s" +#: cinder/backup/api.py:140 +msgid "Backup status must be available" msgstr "" -#: cinder/api/ec2/cloud.py:409 -#, python-format -msgid "Delete key pair %s" +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/backup/api.py:176 #, python-format -msgid "Revoke security group ingress %s" +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#: cinder/backup/api.py:181 #, python-format -msgid "%s Not enough parameters to build a valid rule" +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/backup/manager.py:100 #, python-format -msgid "Authorize security group ingress %s" +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/ec2/cloud.py:725 -#, fuzzy, python-format -msgid "%s - This rule already exists in group" -msgstr "Це правило вже існує в групі %s" - -#: cinder/api/ec2/cloud.py:769 +#: cinder/backup/manager.py:107 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/backup/manager.py:117 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +msgid "Manager requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/backup/manager.py:123 #, python-format -msgid "group %s already exists" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/backup/manager.py:129 #, python-format -msgid "Delete security group %s" -msgstr "Вилучити групу безпеки %s" +msgid "Driver requested for volume_backend '%s'." +msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#: cinder/backup/manager.py:147 #, python-format -msgid "Get console output for instance %s" +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/ec2/cloud.py:894 +#: cinder/backup/manager.py:154 #, python-format -msgid "Create volume from snapshot %s" +msgid "Registering default backend %s." msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 +#: cinder/backup/manager.py:158 #, python-format -msgid "Create volume of %s GB" -msgstr "Створити розділ на %s ГБ" - -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/backup/manager.py:165 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/backup/manager.py:189 #, python-format -msgid "Detach volume %s" -msgstr "Від'єднати том %s" - -#: cinder/api/ec2/cloud.py:959 -#, fuzzy, python-format -msgid "Detach Volume Failed." -msgstr "Від'єднати том %s" +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/backup/manager.py:194 #, python-format -msgid "attribute not supported: %s" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/backup/manager.py:206 #, python-format -msgid "vol = %s\n" +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/ec2/cloud.py:1267 +#: cinder/backup/manager.py:217 #, python-format -msgid "Release address %s" +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/manager.py:225 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/manager.py:237 #, python-format -msgid "Disassociate address %s" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/manager.py:286 #, python-format -msgid "Reboot instance %r" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/manager.py:329 #, python-format -msgid "De-registering image %s" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/manager.py:379 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" -msgstr "лише група \"всі\" підтримується" - -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/manager.py:422 #, python-format -msgid "Updating image %s publicity" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/ec2/cloud.py:1555 -#, python-format -msgid "Not allowed to modify attributes for image %s" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/drivers/ceph.py:147 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "invalid user '%s'" msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 -#, python-format -msgid "Failed to get metadata for ip: %s" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "Caught error: %s" +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/drivers/ceph.py:250 #, python-format -msgid "Extended resource: %s" +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/drivers/ceph.py:361 #, python-format -msgid "Could not find %s in request." +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "Successfully authenticated '%s'" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "marker [%s] not found" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "href %s does not contain version" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "Converting nw_info: %s" +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/drivers/ceph.py:528 #, python-format -msgid "Converted networks: %s" +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/drivers/ceph.py:543 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:555 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/extensions.py:188 -#, python-format -msgid "Loaded extension: %s" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "Ext name: %s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:226 -#, python-format -msgid "Ext alias: %s" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "Ext description: %s" +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:229 +#: cinder/backup/drivers/ceph.py:697 #, python-format -msgid "Ext namespace: %s" +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/drivers/ceph.py:704 #, python-format -msgid "Ext updated: %s" +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/ceph.py:708 #, python-format -msgid "Exception loading extension: %s" +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/drivers/ceph.py:713 #, python-format -msgid "Loading extension %s" +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:252 -#, python-format -msgid "Calling extension factory %s" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:344 -#, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "Exception handling resource: %s" +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/backup/drivers/ceph.py:916 #, python-format -msgid "Fault thrown: %s" +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/wsgi.py:589 -#, python-format -msgid "HTTP exception thrown: %s" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:1005 #, python-format -msgid "There is no such action: %s" +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "%(url)s returned a fault: %(e)s" +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" -msgstr "" - -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:1037 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/swift.py:192 #, python-format msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." -msgstr "" - -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/swift.py:219 #, python-format -msgid "Invalid server status: %(status)s" +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Bad personality format: missing %s" +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/swift.py:304 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 -#, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:345 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/swift.py:350 #, python-format -msgid "Bad network format: missing %s" +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/swift.py:455 #, python-format -msgid "Error in confirm-resize %s" +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "Error in revert-resize %s" +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/tsm.py:143 #, python-format -msgid "Error in reboot %s" +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/brick/exception.py:93 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/brick/exception.py:97 #, python-format -msgid "Compute.api::pause %s" +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/brick/exception.py:101 #, python-format -msgid "Compute.api::unpause %s" +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/brick/exception.py:105 #, python-format -msgid "compute.api::suspend %s" +msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/brick/exception.py:109 #, python-format -msgid "compute.api::resume %s" +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/brick/exception.py:113 #, python-format -msgid "Error in migrate %s" +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/brick/exception.py:117 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/brick/initiator/connector.py:127 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Compute.api::lock %s" +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/brick/initiator/connector.py:229 #, python-format -msgid "Compute.api::unlock %s" +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" -msgstr "" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 -#, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "Aggregates does not have %s action" +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/brick/initiator/connector.py:858 msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "No more floating ips in pool %s." -msgstr "" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Invalid status: '%s'" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "Invalid mode: '%s'" +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/iscsi/iscsi.py:280 #, python-format -msgid "Invalid update setting: '%s'" +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Key pair '%s' already exists." +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "Unable to find address %r" +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "Network does not have %s action" +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "Disassociating network with id %s" +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 -#, python-format -msgid "Showing network with id %s" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/local_dev/lvm.py:370 #, python-format -msgid "Deleting network with id %s" +msgid "Unable to find VG: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 -#, python-format -msgid "Security group %s already exists" +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 -#, python-format -msgid "Security group %s is not a string or unicode" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Security group %s cannot be empty." +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 -#, python-format -msgid "Security group %s should not be greater than 255 characters." +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 -#, python-format -msgid "Security group (%s) not found" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "This rule already exists in group %s" -msgstr "Це правило вже існує в групі %s" - -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +msgid "Already mounted: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 -#, python-format -msgid "Rule (%s) not found" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/compute/nova.py:97 #, python-format -msgid "start instance %r" +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 -#, python-format -msgid "stop instance %r" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "vol=%s" +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "Delete volume with id: %s" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Delete snapshot with id: %s" +msgid "No backup with id %s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 -#, python-format -msgid "Create snapshot from volume %s" +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/auth/ldapdriver.py:650 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/auth/manager.py:302 -#, python-format -msgid "Failed authorization for access key %s" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/auth/manager.py:308 -#, python-format -msgid "Using project name = user name (%s)" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/auth/manager.py:315 -#, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/auth/manager.py:324 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +msgid "Table |%s| not created!" msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 -#, python-format -msgid "expected_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/auth/manager.py:353 -#, python-format -msgid "host_only_signature: %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:490 -#, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/auth/manager.py:519 -#, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" msgstr "" -#: cinder/auth/manager.py:522 -#, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/auth/manager.py:595 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Created project %(name)s with manager %(manager_user)s" +msgid "Table |%s| not created" msgstr "" -#: cinder/auth/manager.py:613 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "modifying project %s" +msgid "Exception while dropping table %s." msgstr "" -#: cinder/auth/manager.py:625 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "Adding user %(uid)s to project %(pid)s" +msgid "Exception while creating table %s." msgstr "" -#: cinder/auth/manager.py:646 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "Remove user %(uid)s from project %(pid)s" +msgid "Column |%s| not created!" msgstr "" -#: cinder/auth/manager.py:676 -#, python-format -msgid "Deleting project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/auth/manager.py:753 -#, python-format -msgid "Access Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/auth/manager.py:755 -#, python-format -msgid "Secret Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -msgid "Instance type for vpn instances" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Launching VPN for %s" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/compute/api.py:144 +#: cinder/image/image_utils.py:101 #, python-format -msgid "Unable to find host for Instance %s" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/api.py:192 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/api.py:203 +#: cinder/image/image_utils.py:157 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/api.py:259 +#: cinder/image/image_utils.py:178 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/api.py:261 +#: cinder/image/image_utils.py:206 #, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/compute/api.py:312 +#: cinder/image/image_utils.py:260 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/api.py:383 -#, python-format -msgid "Going to run %s instances..." +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/compute/api.py:447 -#, python-format -msgid "bdm %s" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/compute/api.py:474 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "block_device_mapping %s" +msgid "Not deleting key %s" msgstr "" -#: cinder/compute/api.py:591 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/compute/api.py:871 -msgid "Going to try to soft delete instance" +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/api.py:939 -msgid "Going to try to terminate instance" +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/api.py:977 -msgid "Going to try to stop instance" +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/api.py:996 -msgid "Going to try to start instance" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/api.py:1000 +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "Searching by: %s" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "Image type not recognized %s" -msgstr "" - -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1377 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" -msgstr "" +#: cinder/openstack/common/log.py:301 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "отримано %s" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "DB error: %s" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "Instance type %s not found for deletion" +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/openstack/common/log.py:623 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/compute/manager.py:144 -#, python-format -msgid "check_instance_lock: locked: |%s|" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/compute/manager.py:146 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/manager.py:151 -#, python-format -msgid "check_instance_lock: executing: |%s|" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Setting up bdm %s" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "Instance %s not found." +msgid "Running cmd (subprocess): %s" msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" msgstr "" -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +msgid "Running cmd (SSH): %s" msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:565 -#, python-format -msgid "Instance network_info: |%s|" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "%(action_str)s instance" +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/service.py:337 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "Starting %d workers" msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "terminating bdm %s" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "Rebuilding instance %s" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "Rebooting instance %s" +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "instance %s: snapshotting" +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/versionutils.py:69 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "Rotating out %d backups" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:1005 -#, python-format -msgid "Deleting image %s" +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "Failed to set admin password. Instance %s is not running" +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "Instance %s: Root password set" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/compute/manager.py:1079 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:1084 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgid "SQL connection failed. %s attempts left." msgstr "" -#: cinder/compute/manager.py:1098 -#, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/notifier/api.py:145 #, python-format -msgid "instance %s: rescuing" +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "instance %s: unrescuing" +msgid "Failed to load notifier %s. These notifications will not be sent." msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "instance %s: migrating" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format -msgid "instance %s: pausing" +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:1489 -#, python-format -msgid "instance %s: unpausing" +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "instance %s: suspending" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format -msgid "instance %s: resuming" +msgid "unpacked context: %s" msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/rpc/amqp.py:345 #, python-format -msgid "instance %s: locking" +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format -msgid "instance %s: unlocking" -msgstr "" +msgid "received %s" +msgstr "отримано %s" -#: cinder/compute/manager.py:1596 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "instance %s: getting locked state" -msgstr "" +msgid "no method for message: %s" +msgstr "без порядку для повідомлень: %s" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "instance %s: reset network" -msgstr "" +msgid "No method for message: %s" +msgstr "Без порядку для повідомлень: %s" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "instance %s: inject network info" +msgid "Expected exception during message handling (%s)" msgstr "" -#: cinder/compute/manager.py:1618 -#, python-format -msgid "network_info to inject: |%s|" +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:1655 +#: cinder/openstack/common/rpc/amqp.py:594 #, python-format -msgid "instance %s: getting vnc console" +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" -msgstr "" +msgid "MSG_ID is %s" +msgstr "MSG_ID %s" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format -msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:1705 -#, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:1714 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/compute/manager.py:1724 -#, python-format -msgid "Attach failed %(mountpoint)s, removing" +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." msgstr "" -#: cinder/compute/manager.py:1752 +#: cinder/openstack/common/rpc/common.py:104 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/rpc/common.py:121 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/common.py:143 #, python-format -msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/common.py:151 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/rpc/common.py:156 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/rpc/impl_kombu.py:477 #, python-format -msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/impl_kombu.py:499 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/compute/manager.py:2075 +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/compute/manager.py:2458 -#, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +msgid "Deserializing: %s" msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" -msgstr "" - -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" -msgstr "" - -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/console/manager.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/console/vmrc_manager.py:122 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "Removing console %(console_id)s." +msgid "Subscribing to %s" msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/console/xvp.py:116 -#, python-format -msgid "Re-wrote %s" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/console/xvp.py:141 -#, python-format -msgid "Error starting xvp: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/consoleauth/manager.py:63 -#, python-format -msgid "Deleting Expired Token: (%s)" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/consoleauth/manager.py:79 -#, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "Unrecognized read_deleted value '%s'" +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "No ComputeNode for %(host)s" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#: cinder/openstack/common/rpc/impl_zmq.py:506 #, python-format -msgid "No backend config with id %(sm_backend_id)s" +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 -#, python-format -msgid "No sm_flavor called %(sm_flavor)s" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/db/sqlalchemy/session.py:137 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "SQL connection failed. %s attempts left." +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 -#, python-format -msgid "Table |%s| not created!" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 -#, python-format -msgid "join list for moving mac_addresses |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" -msgstr "" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "отримано %s" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." msgstr "" -#: cinder/image/glance.py:278 +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/image/glance.py:281 -#, python-format -msgid "Metadata after formatting for Glance %s" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/image/glance.py:289 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/image/glance.py:410 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/image/s3.py:309 -#, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/image/s3.py:328 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/image/s3.py:379 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "Failed to decrypt private key: %s" +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/image/s3.py:387 +#: cinder/scheduler/filter_scheduler.py:114 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "Bad mac for to_global_ipv6: %s" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "Filtered %s" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" msgstr "" -#: cinder/network/linux_net.py:166 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Attempted to remove chain %s which does not exist" +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "Unknown chain: %r" +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/linux_net.py:694 -#, python-format -msgid "Hupping dnsmasq threw %s" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/linux_net.py:696 -#, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/linux_net.py:756 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "killing radvd threw %s" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "Pid %d is stale, relaunching radvd" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/linux_net.py:967 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "Starting VLAN inteface %s" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "Starting Bridge interface for %s" +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/network/linux_net.py:1142 -#, python-format -msgid "Starting bridge %s " +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/network/linux_net.py:1149 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format -msgid "Done starting bridge %s" +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/network/linux_net.py:1167 -#, python-format -msgid "Failed unplugging gateway interface '%s'" +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/network/linux_net.py:1170 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "Unplugged gateway interface '%s'" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/network/manager.py:291 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "Interface %(interface)s not found" +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/network/manager.py:315 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "floating IP allocation for instance |%s|" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/network/manager.py:353 -#, python-format -msgid "floating IP deallocation for instance |%s|" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/network/manager.py:402 +#: cinder/tests/fake_utils.py:78 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "Faked command matched %s" msgstr "" -#: cinder/network/manager.py:614 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/manager.py:660 +#: cinder/tests/fake_utils.py:97 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/manager.py:670 +#: cinder/tests/test_misc.py:58 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/manager.py:778 +#: cinder/tests/test_netapp_nfs.py:360 #, python-format -msgid "Disassociated %s stale fixed ip(s)" +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/manager.py:896 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "network allocations for instance |%s|" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/manager.py:901 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "network deallocation for instance |%s|" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/manager.py:1227 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Given data: %s" msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/tests/test_volume_types.py:61 #, python-format -msgid "Leased IP |%(address)s|" +msgid "Result data: %s" msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "IP %s leased that is not associated" +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/manager.py:1256 -#, python-format -msgid "IP |%s| leased that isn't allocated" +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "Від'єднати том %s" + +#: cinder/tests/integrated/api/client.py:32 #, python-format -msgid "Released IP |%(address)s|" +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/manager.py:1265 -#, python-format -msgid "IP %s released that is not associated" +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/network/manager.py:1268 -#, python-format -msgid "IP %s released that was not leased" +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +msgid "Body: %s" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +msgid "Decoding JSON: %s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/network/quantum/client.py:180 +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "Створити розділ на %s ГБ" + +#: cinder/transfer/api.py:136 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format -msgid "Quantum entity not found: %s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/transfer/api.py:182 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/transfer/api.py:199 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "Volume %s has been transferred." msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/volume/api.py:143 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/network/quantum/manager.py:204 -#, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/network/quantum/manager.py:301 -#, python-format -msgid "network allocations for instance %s" +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" msgstr "" -#: cinder/network/quantum/manager.py:588 -#, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/network/quantum/manager.py:606 -#, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 +#: cinder/volume/api.py:490 #, python-format -msgid "Server returned error: %s" -msgstr "" - -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/volume/api.py:502 #, python-format msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 -#, python-format -msgid "No network with net_id = %s" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 -#, python-format -msgid "No fixed IPs to deallocate for vif %s" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 -#, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 -#, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/notifier/api.py:115 -#, python-format -msgid "%s not in valid priorities" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/api.py:757 #, python-format msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/notifier/list_notifier.py:65 -#, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/volume/api.py:797 #, python-format -msgid "Returning exception %s to caller" +msgid "No available service named %s" msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/rpc/amqp.py:231 -#, python-format -msgid "received %s" -msgstr "отримано %s" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" -#: cinder/rpc/amqp.py:236 -#, python-format -msgid "no method for message: %s" -msgstr "без порядку для повідомлень: %s" +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" -#: cinder/rpc/amqp.py:237 +#: cinder/volume/api.py:842 #, python-format -msgid "No method for message: %s" -msgstr "Без порядку для повідомлень: %s" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" -#: cinder/rpc/amqp.py:321 -#, python-format -msgid "Making asynchronous call on %s ..." +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/rpc/amqp.py:324 +#: cinder/volume/api.py:862 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID %s" +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" -#: cinder/rpc/amqp.py:346 +#: cinder/volume/api.py:868 #, python-format -msgid "Making asynchronous cast on %s..." +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/rpc/amqp.py:379 +#: cinder/volume/api.py:887 #, python-format -msgid "Sending notification on %s..." +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/api.py:900 #, python-format -msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/volume/driver.py:282 #, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/volume/driver.py:327 #, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/volume/driver.py:340 #, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/driver.py:358 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/volume/driver.py:394 #, python-format -msgid "Failed to consume message from queue: %s" +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/volume/driver.py:433 #, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/driver.py:451 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/rpc/impl_qpid.py:346 -#, python-format -msgid "Connected to AMQP server on %s" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/scheduler/driver.py:80 -#, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/scheduler/driver.py:89 -#, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/manager.py:203 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "Driver path %s is deprecated, update your configuration to the new path." msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/manager.py:264 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/manager.py:271 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgid "volume %s: skipping export" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/manager.py:273 #, python-format msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 -#, python-format -msgid "No host selection for %s defined." +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/manager.py:286 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "Resuming delete on volume: %s" msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "Filtered %(hosts)s" +msgid "volume %s: deleting" msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 -#, python-format -msgid "Weighted %(weighted_host)s" +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/volume/manager.py:389 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "volume %s: removing export" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/manager.py:394 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/manager.py:427 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/manager.py:430 #, python-format -msgid "Host filter passes for %(host)s" +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/scheduler/host_manager.py:272 +#: cinder/volume/manager.py:434 #, python-format -msgid "Received %(service_name)s service update from %(host)s." +msgid "volume %s: deleted successfully" msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/volume/manager.py:462 #, python-format -msgid "No service for compute ID %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/manager.py:490 #, python-format msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/manager.py:496 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "snapshot %s: deleting" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/manager.py:526 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/manager.py:559 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/tests/fake_utils.py:72 -#, python-format -msgid "Faking execution of cmd (subprocess): %s" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/manager.py:698 #, python-format -msgid "Faked command matched %s" +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:760 #, python-format -msgid "Faked command raised an exception %s" +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:807 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:880 #, python-format -msgid "Running instances: %s" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/manager.py:909 #, python-format -msgid "After terminating instances: %s" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:940 #, python-format -msgid "After force-killing instances: %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:976 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/manager.py:1190 #, python-format -msgid "Target %s allocated" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/manager.py:1193 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/tests/test_volume_types.py:58 -#, python-format -msgid "Given data: %s" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/tests/test_volume_types.py:59 -#, python-format -msgid "Result data: %s" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Creating files in %s to simulate guest agent" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "DB error: %s" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 -#, python-format -msgid "Quota exceeded: code=%(code)s" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "_create: %s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "_delete: %s" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "_get: %s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "_get_all: %s" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/utils.py:144 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "flavor: %s" +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, fuzzy, python-format +msgid "Creating clone of volume: %s" +msgstr "Створити розділ на %s ГБ" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/tests/integrated/api/client.py:107 -#, python-format -msgid "Body: %s" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/tests/integrated/api/client.py:151 -#, python-format -msgid "%(relative_uri)s => code %(http_status)s" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/tests/integrated/api/client.py:168 -#, python-format -msgid "Decoding JSON: %s" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Nested received %(queue)s, %(value)s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Nested return %s" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "Received %s" +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Compute_service record created for %s " +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 -#, python-format -msgid "Compute_service record updated for %s " +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/eqlx.py:139 #, python-format -msgid "Filters added to instance %s" +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/firewall.py:291 +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "Adding security group rule: %r" +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Adding provider rule: %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "Converted to raw, but format is now %s" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "Неочікувана помилка при виконанні команди." + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/eqlx.py:320 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/eqlx.py:348 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Got exception: %s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "No such domain (%s)" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/dom.py:134 -#, python-format -msgid "Failed power down Bare-metal node %s" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "No such domain %s" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "Domains: %s" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "Nodes: %s" +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "After storing domains: %s" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Created new domain: %s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" msgstr "" -#: cinder/virt/baremetal/dom.py:226 -#, python-format -msgid "change_domain_state: to new state %s" +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/glusterfs.py:608 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "No base file found for %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "instance %s: rebooted" +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "instance %s: rescued" +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "instance %s: is building" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "instance %s: booted" +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "instance %s spawned successfully" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "instance %s:not booted" +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "instance %s: Creating image" +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +msgid "file already exists at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "instance %s: starting toXML method" +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "instance %s: finished toXML method" +msgid "Available shares: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "#### RLK: cpu_arch = %s " -msgstr "" - -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" -msgstr "" - -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:216 +#: cinder/volume/drivers/gpfs.py:97 #, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/gpfs.py:160 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/gpfs.py:169 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "unknown disk image handler: %s" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "Failed to remove container: %s" +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "User %(username)s not found in password file." +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "unsupported partition: %s" +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Failed to map partitions: %s" +msgid "%s" msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/nfs.py:263 #, python-format -msgid "qemu-nbd error: %s" +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "nbd device %s did not show up" +msgid " but size is now %d" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/nfs.py:361 #, python-format -msgid "Connecting to libvirt: %s" +msgid "%s is already mounted" msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/nfs.py:526 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "%s is above nfs_used_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Deleting instance files %(target)s" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -msgid "Instance soft rebooted successfully." +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/rbd.py:210 +#, fuzzy, python-format +msgid "error opening rbd image %s" +msgstr "Від'єднати том %s" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Automatically confirming migration %d" +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/libvirt/connection.py:978 -msgid "Guest does not have a console available" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "block_device_list %s" +msgid "deleting parent %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +msgid "deleting rbd volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." msgstr "" -#: cinder/virt/libvirt/connection.py:1942 -#, python-format -msgid "'' must be 1, but %d\n" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/rbd.py:696 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "connection data: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 -#, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 -#, python-format -msgid "Timeout migrating for %s. nwfilter not found." +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/rbd.py:724 +#, fuzzy, python-format +msgid "not cloneable: %s" +msgstr "відповідь %s" + +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 -#, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/drivers/rbd.py:747 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +msgid "Unable to open image %(loc)s: %(err)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/firewall.py:42 -msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 -#, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "%s is a valid instance name" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "%s has a disk file" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "Base file too young to remove: %s" +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Removing base file: %s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 -#, python-format -msgid "%(id)s (%(base_file)s): checking" -msgstr "" +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "відповідь %s" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 -#, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "Unknown base file: %s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Active base files: %s" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 -#, python-format -msgid "Corrupt base files: %s" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "Removable base files: %s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/libvirt/vif.py:99 -#, python-format -msgid "Ensuring bridge %s" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 -#, python-format -msgid "Failed while unplugging vif of instance '%s'" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/libvirt/volume.py:163 -#, python-format -msgid "iSCSI device not found at %s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/libvirt/volume.py:166 -#, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/libvirt/volume.py:178 -#, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 -#, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 -#, python-format -msgid "%(text)s: _db_content => %(content)s" +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 -#, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "Glance image %s is in killed state" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/zadara.py:472 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 -#, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, python-format -msgid "httplib error in %s: " +msgid "Create Volume: %(volume)s Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Socket error in %s: " +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format -msgid "Type error in %s: " +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "Exception in %s " +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Got total of %s instances" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Creating VM with the name %s on the ESX host" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 -#, python-format -msgid "Created VM with the name %s on the ESX host" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "Powered on the VM instance %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 -#, python-format -msgid "Created Snapshot of the VM instance %s " +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Uploading image %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Uploaded image %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Deleted temporary vmdk file %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format -msgid "Did hard reboot of VM %s" +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 -#, python-format -msgid "instance - %s not present" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Powering off the VM %s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Powered off the VM %s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Unregistering the VM %s" +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Unregistered the VM %s" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" -msgstr "" - -#: cinder/virt/vmwareapi/vmops.py:592 -#, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "Suspending the VM %s " +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Suspended the VM %s " +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format -msgid "Resuming the VM %s" +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Resumed the VM %s " +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format -msgid "Creating directory with path %s" +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format -msgid "Created directory with path %s" +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format -msgid "Downloading image %s from glance image server" +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:621 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "Create export: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" -msgstr "" +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "Від'єднати том %s" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:678 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "ExposePaths for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Calling %(localname)s %(impl)s" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Calling getter %s" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:711 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "HidePaths for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:775 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "RemoveMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "Found no network for bridge %s" +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "Volume %s is not mapped. No volume to unmap." msgstr "" -#: cinder/virt/xenapi/pool.py:146 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Unable to join %(host)s in the pool" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/pool.py:174 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Unable to set up pool: %(e)s." +msgid "Terminate connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/emc/emc_smis_common.py:884 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/vif.py:103 -#, python-format -msgid "Found no PIF for device %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Found Masking View: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 -#, python-format -msgid "VBD not found in instance %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "VBD %s already detached" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "Оновлено з'єднання до черги" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "Snapshotting with label '%(label)s'" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "Created snapshot %(template_vm_ref)s" +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "download_vhd failed: %r" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Add target WWN: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "VDI %s is still available" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Cannot find device number for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Found iSCSI endpoint: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "Re-scanning SR %s" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/hds/hds.py:355 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/hds/hds.py:372 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/hds/hds.py:480 #, python-format -msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "LUN %s is deleted." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 -#, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "Plugging VBD %s ... " +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format -msgid "Plugging VBD %s done." +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "Destroying VBD for VDI %s ... " +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Destroying VBD for VDI %s done." +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "Running pygrub against %s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "Found Xen kernel %s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" -msgstr "" - -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "Writing partition table %s done." +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/huawei/huawei_utils.py:129 #, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/rest_common.py:59 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 -msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/rest_common.py:166 #, python-format -msgid "Updating progress to %(progress)d" +msgid "" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:173 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -msgid "Starting instance" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/rest_common.py:354 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/rest_common.py:474 #, python-format -msgid "Injecting file path: '%s'" +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Instance agent version: %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Updating Agent to %s" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/ssh_common.py:421 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:686 +#: cinder/volume/drivers/huawei/ssh_common.py:501 #, python-format -msgid "Unable to Snapshot instance: %(exc)s" +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/huawei/ssh_common.py:516 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:893 +#: cinder/volume/drivers/huawei/ssh_common.py:570 #, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:697 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "%s is not set" msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "Creating VIF for network %(network_ref)s" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 #, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "OpenSSL error: %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "type is = %s" +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 #, python-format -msgid "name = %s" +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "Forgetting SR %s..." +msgid "WWPN on node %(node)s: %(wwpn)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "Introducing %s..." +msgid "enter: get_host_from_connector: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgid "leave: create_host: host %(host)s - %(host_name)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 #, python-format -msgid "Error finding vdis in SR %s" +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 #, python-format -msgid "Creating SR %s" +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Introducing SR %s" +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 #, python-format -msgid "Checking for SR %s" +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 #, python-format -msgid "SR %s not found in the xapi database" +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "enter: delete_vdisk: vdisk %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgid "Tried to delete non-existant vdisk %s." msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgid "leave: delete_vdisk: vdisk %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 #, python-format -msgid "Unable to locate volume %s" +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 #, python-format -msgid "Unable to detach volume %s" +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 #, python-format -msgid "Unable to destroy vbd %s" +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 #, python-format -msgid "Error purging SR %s" +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 #, python-format -msgid "Error in handshake: %s" +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 #, python-format -msgid "Invalid request: %s" +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 #, python-format -msgid "Request: %s" +msgid "Did not find success message nor error for %(fun)s: %(out)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "Request made with missing token: %s" +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 #, python-format -msgid "Request made with invalid token: %s" +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/netapp/api.py:419 #, python-format -msgid "Unexpected error: %s" +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" msgstr "" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/netapp/common.py:116 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgid "No default storage protocol found for storage family %(storage_family)s" msgstr "" -#: cinder/volume/api.py:137 -msgid "Volume status must be available or error" +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" msgstr "" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/netapp/common.py:130 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" msgstr "" -#: cinder/volume/api.py:325 -msgid "Volume Snapshot status must be available or error" +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/netapp/iscsi.py:166 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "Created LUN with name %s" msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/netapp/iscsi.py:175 #, python-format -msgid "volume group %s doesn't exist" +msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/netapp/iscsi.py:191 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "Destroyed LUN %s" msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/netapp/iscsi.py:227 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/netapp/iscsi.py:232 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/netapp/iscsi.py:249 #, python-format -msgid "Could not find iSCSI export for volume %s" +msgid "Failed to get target portal for the LUN %s" msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/netapp/iscsi.py:252 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "Failed to get target IQN for the LUN %s" msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/netapp/iscsi.py:290 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "Snapshot %s deletion successful" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Resizing %s failed. Cleaning volume." msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/netapp/iscsi.py:325 #, python-format -msgid "rbd has no pool %s" +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/netapp/iscsi.py:412 #, python-format -msgid "Sheepdog is not working: %s" +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/netapp/iscsi.py:543 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "Message: %s" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/netapp/iscsi.py:545 #, python-format -msgid "Re-exporting %s volumes" +msgid "Error getting lun attribute. Exception: %s" msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/netapp/iscsi.py:600 #, python-format -msgid "volume %s: skipping export" +msgid "No need to extend volume %s as it is already the requested new size." msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/netapp/iscsi.py:606 #, python-format -msgid "volume %s: creating" +msgid "Resizing lun %s directly to new size." msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/netapp/iscsi.py:633 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "Lun %(path)s geometry failed. Message - %(msg)s" msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/netapp/iscsi.py:662 #, python-format -msgid "volume %s: creating export" +msgid "Moving lun %(name)s to %(new_name)s." msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/netapp/iscsi.py:677 #, python-format -msgid "volume %s: created successfully" +msgid "Resizing lun %s using sub clone to new size." msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/netapp/iscsi.py:707 #, python-format -msgid "volume %s: removing export" +msgid "Post clone resize lun %s" msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/netapp/iscsi.py:718 #, python-format -msgid "volume %s: deleting" +msgid "Failure staging lun %s to tmp." msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/netapp/iscsi.py:723 #, python-format -msgid "volume %s: volume is busy" +msgid "Failure moving new cloned lun to %s." msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/netapp/iscsi.py:727 #, python-format -msgid "volume %s: deleted successfully" +msgid "Failure deleting staged tmp lun %s." msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/netapp/iscsi.py:730 #, python-format -msgid "snapshot %s: creating" +msgid "Unknown exception in post clone resize lun %s." msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/netapp/iscsi.py:732 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/netapp/iscsi.py:741 #, python-format -msgid "snapshot %s: created successfully" +msgid "Failure getting lun info for %s." msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/netapp/iscsi.py:785 #, python-format -msgid "snapshot %s: deleting" +msgid "Failed to get vol with required size and extra specs for volume: %s" msgstr "" -#: cinder/volume/manager.py:214 +#: cinder/volume/drivers/netapp/iscsi.py:796 #, python-format -msgid "snapshot %s: snapshot is busy" +msgid "Error provisioning vol %(name)s on %(volume)s" msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/netapp/iscsi.py:841 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "No iscsi service found for vserver %s" msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/netapp/iscsi.py:986 #, python-format -msgid "New capabilities found: %s" +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/netapp/iscsi.py:1164 #, python-format -msgid "Notification {%s} received" +msgid "Failed to get vol with required size for volume: %s" msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/netapp/iscsi.py:1273 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "Error finding luns for volume %s. Verify volume exists." msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/netapp/iscsi.py:1390 #, python-format -msgid "%s is not set" +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" msgstr "" -#: cinder/volume/netapp.py:128 -#, fuzzy -msgid "Connected to DFM server" -msgstr "Оновлено з'єднання до черги" +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/netapp/iscsi.py:1462 #, python-format -msgid "Job failed: %s" +msgid "Error refreshing vol capacity. Message: %s" msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#: cinder/volume/drivers/netapp/nfs.py:224 #, python-format -msgid "Failed to find LUN ID for volume %s" +msgid "Copied image to volume %s" msgstr "" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#: cinder/volume/drivers/netapp/nfs.py:237 #, python-format -msgid "No LUN ID for volume %s" +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "" +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "відповідь %s" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "Неочікувана помилка при виконанні команди." + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "Неочікувана помилка при виконанні команди." + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "Створити розділ на %s ГБ" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "відповідь %s" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "Неочікувана помилка при виконанні команди." + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:614 -#, python-format -msgid "Failed to get host details for host ID %s" -msgstr "" +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:620 -#, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "" +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" -#: cinder/volume/netapp.py:625 -#, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "" +#~ msgid "Connection to glance failed" +#~ msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#~ msgid "Invalid snapshot" +#~ msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Invalid input received" +#~ msgstr "" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Invalid volume type" +#~ msgstr "" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, fuzzy, python-format -msgid "Got response: %s" -msgstr "відповідь %s" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "The request is invalid." #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "topic is %s" -#~ msgstr "заголовок %s" +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" -#~ msgid "message %s" -#~ msgstr "повідомлення %s" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "Publishing to route %s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "Declaring queue %s" -#~ msgstr "Оголошення черги %s" +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" -#~ msgid "Declaring exchange %s" -#~ msgstr "Оголошення точки обміну %s" +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "rbd export-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "Created VM %s..." +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " +#~ msgid "migrate_volume: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "_wait_child %d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "wait wrap.failed %s" #~ msgstr "" #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "volume %s mapping to multi host" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" #~ msgstr "" -#~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Volume status must be available" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/locale/ur/LC_MESSAGES/cinder.po b/cinder/locale/ur/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..e1bc82a6b2 --- /dev/null +++ b/cinder/locale/ur/LC_MESSAGES/cinder.po @@ -0,0 +1,10106 @@ +# Urdu translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-10-07 06:14+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Urdu " +"(http://www.transifex.com/projects/p/openstack/language/ur/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/vi_VN/LC_MESSAGES/cinder.po b/cinder/locale/vi_VN/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..6229b4f682 --- /dev/null +++ b/cinder/locale/vi_VN/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Vietnamese (Vietnam) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-08 11:44+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Vietnamese (Viet Nam) " +"(http://www.transifex.com/projects/p/openstack/language/vi_VN/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/zh_CN/LC_MESSAGES/cinder.po b/cinder/locale/zh_CN/LC_MESSAGES/cinder.po index 53a912cd1d..84c4dec2ca 100644 --- a/cinder/locale/zh_CN/LC_MESSAGES/cinder.po +++ b/cinder/locale/zh_CN/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2012-04-03 23:36+0000\n" "Last-Translator: cheesecake \n" "Language-Team: Chinese (Simplified) \n" @@ -15,1087 +15,652 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "根证书的文件名" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "私钥文件名" - -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" -msgstr "根证书撤销列表的文件名" - -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "保存密钥的位置" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "保存根证书的位置" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "发生未知异常。" -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "是否每个项目都使用认证权威?" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" -#: cinder/crypto.py:67 +#: cinder/exception.py:107 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" -msgstr "用户证书的标题,%s 依次分别为项目,用户,时间戳" +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" -#: cinder/crypto.py:72 +#: cinder/exception.py:112 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" -msgstr "项目证书的标题,%s 依次分别为项目,时间戳" +msgid "Volume driver reported an error: %(message)s" +msgstr "" -#: cinder/crypto.py:292 +#: cinder/exception.py:116 #, python-format -msgid "Flags path: %s" -msgstr "标记所在路径:%s" - -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "运行命令时出现意外错误" +msgid "Backup driver reported an error: %(message)s" +msgstr "" -#: cinder/exception.py:59 +#: cinder/exception.py:120 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" +msgid "Connection to glance failed: %(reason)s" msgstr "" -"%(description)s\n" -"命令:%(cmd)s\n" -"退出代码:%(exit_code)s\n" -"标准输出:%(stdout)r\n" -"标准错误输出:%(stderr)r" - -#: cinder/exception.py:94 -msgid "DB exception wrapped." -msgstr "数据库异常被包裹。" - -#: cinder/exception.py:155 -msgid "An unknown exception occurred." -msgstr "发生未知异常。" -#: cinder/exception.py:178 -msgid "Failed to decrypt text" -msgstr "解密文本失败" - -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" -msgstr "无法在镜像服务中翻页" - -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" -msgstr "虚拟接口创建失败" - -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" -msgstr "用唯一mac地址5次尝试创建虚拟接口失败" - -#: cinder/exception.py:195 -msgid "Connection to glance failed" -msgstr "连接到glance失败" - -#: cinder/exception.py:199 -msgid "Connection to melange failed" -msgstr "连接到melange失败" - -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "未授权。" -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "用户没有管理员权限" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "政策不允许 %(action)s 被执行。" -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, fuzzy, python-format msgid "Not authorized for image %(image_id)s." msgstr "没有为镜像 %(image_id)s 找到内核。" -#: cinder/exception.py:220 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "无法接受的参数。" -#: cinder/exception.py:225 -msgid "Invalid snapshot" -msgstr "无效的快照" +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" -#: cinder/exception.py:229 +#: cinder/exception.py:154 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" -msgstr "卷 %(volume_id)s 没有附加任何东西" +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" -msgstr "密钥对数据无效" +#: cinder/exception.py:159 +#, fuzzy, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "卷 %(volume_id)s 没有附加任何东西" -#: cinder/exception.py:237 +#: cinder/exception.py:163 msgid "Failed to load data into json format" msgstr "把数据加载为json格式失败" -#: cinder/exception.py:241 -msgid "The request is invalid." +#: cinder/exception.py:167 +#, fuzzy +msgid "The results are invalid." msgstr "请求无效。" -#: cinder/exception.py:245 -#, python-format -msgid "Invalid signature %(signature)s for user %(user)s." -msgstr "无效签名 %(signature)s 针对用户是 %(user)s" - -#: cinder/exception.py:249 -msgid "Invalid input received" -msgstr "收到无效的输入" - -#: cinder/exception.py:253 +#: cinder/exception.py:171 #, python-format -msgid "Invalid instance type %(instance_type)s." -msgstr "无效的实例类型 %(instance_type)s。" - -#: cinder/exception.py:257 -msgid "Invalid volume type" -msgstr "无效的卷类型" - -#: cinder/exception.py:261 -msgid "Invalid volume" -msgstr "无效的卷" +msgid "Invalid input received: %(reason)s" +msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:175 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" -msgstr "无效的端口范围 %(from_port)s:%(to_port)s. %(msg)s" +msgid "Invalid volume type: %(reason)s" +msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:179 #, python-format -msgid "Invalid IP protocol %(protocol)s." -msgstr "无效的IP协议 %(protocol)s。" +msgid "Invalid volume: %(reason)s" +msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:183 #, python-format msgid "Invalid content type %(content_type)s." msgstr "无效的内容类型 %(content_type)s。" -#: cinder/exception.py:277 +#: cinder/exception.py:187 #, python-format -msgid "Invalid cidr %(cidr)s." -msgstr "无效的cidr %(cidr)s。" - -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." -msgstr "无效的RPC连接重用。" - -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format msgid "%(err)s" msgstr "%(err)s" -#: cinder/exception.py:296 -#, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." -msgstr "无法执行action '%(action)s' 于聚合 %(aggregate_id)s 上。原因: %(reason)s。" - -#: cinder/exception.py:301 -#, fuzzy, python-format -msgid "Group not valid. Reason: %(reason)s" -msgstr "找不到有效主机,原因是 %(reason)s。" - -#: cinder/exception.py:305 -#, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." -msgstr "实例 %(instance_uuid)s 处于%(attr)s %(state)s 中。该实例在这种状态下不能执行 %(method)s。" - -#: cinder/exception.py:310 +#: cinder/exception.py:197 #, python-format -msgid "Instance %(instance_id)s is not running." -msgstr "实例 %(instance_id)s 没有运行。" - -#: cinder/exception.py:314 -#, python-format -msgid "Instance %(instance_id)s is not suspended." -msgstr "实例 %(instance_id)s 没有挂起。" +msgid "Invalid auth key: %(reason)s" +msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:201 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" -msgstr "实例 %(instance_id)s 不在救援模式。" - -#: cinder/exception.py:322 -msgid "Failed to suspend instance" -msgstr "挂起实例失败" - -#: cinder/exception.py:326 -msgid "Failed to resume server" -msgstr "服务器恢复失败" - -#: cinder/exception.py:330 -msgid "Failed to reboot instance" -msgstr "重新启动实例失败" - -#: cinder/exception.py:334 -#, fuzzy -msgid "Failed to terminate instance" -msgstr "重新启动实例失败" +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" -#: cinder/exception.py:338 +#: cinder/exception.py:206 msgid "Service is unavailable at this time." msgstr "该时刻服务无法使用。" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." -msgstr "该时刻卷服务无法使用。" - -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." -msgstr "该时刻计算服务无法使用。" - -#: cinder/exception.py:350 -#, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." -msgstr "无法把实例 (%(instance_id)s) 迁移到当前主机 (%(host)s)。" - -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." -msgstr "该时刻目标计算主机无法使用。" - -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." -msgstr "该时刻原始计算主机无法使用。" - -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." -msgstr "提供的虚拟机管理程序类型无效。" - -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." -msgstr "该实例需要比当前版本更新的虚拟机管理程序。" - -#: cinder/exception.py:372 -#, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." -msgstr "提供的磁盘路径 (%(path)s) 已经存在,预计是不存在的。" - -#: cinder/exception.py:377 -#, python-format -msgid "The supplied device path (%(path)s) is invalid." -msgstr "提供的设备路径 (%(path)s) 是无效的。" - -#: cinder/exception.py:381 -#, fuzzy, python-format -msgid "The supplied device (%(device)s) is busy." -msgstr "提供的设备路径 (%(path)s) 是无效的。" - -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" -msgstr "无法接受的CPU信息" - -#: cinder/exception.py:389 -#, python-format -msgid "%(address)s is not a valid IP v4/6 address." -msgstr "%(address)s 不是有效的IP v4/6地址。" - -#: cinder/exception.py:393 -#, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." -msgstr "VLAN标签对于端口组%(bridge)s 是不适合的。预计的VLAN标签是 %(tag)s,但与端口组关联的是 %(pgroup)s。" - -#: cinder/exception.py:399 -#, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." -msgstr "" -"包含端口组 %(bridge)s 的vSwitch没有与预计的物理适配器关联。预计的vSwitch是 %(expected)s,但关联的是 " -"%(actual)s。" - -#: cinder/exception.py:406 -#, python-format -msgid "Disk format %(disk_format)s is not acceptable" -msgstr "磁盘格式 %(disk_format)s 不能接受" - -#: cinder/exception.py:410 +#: cinder/exception.py:210 #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "镜像 %(image_id)s 无法接受,原因是: %(reason)s" -#: cinder/exception.py:414 +#: cinder/exception.py:214 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" -msgstr "实例 %(instance_id)s 无法接受,原因是: %(reason)s" +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:218 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." -msgstr "Ec2 id %(ec2_id)s 无法接受。" +msgid "Expected a uuid but received %(uuid)s." +msgstr "" -#: cinder/exception.py:422 +#: cinder/exception.py:222 cinder/brick/exception.py:68 msgid "Resource could not be found." msgstr "资源没有找到。" -#: cinder/exception.py:427 -#, python-format -msgid "Required flag %(flag)s not set." -msgstr "要求的标记 %(flag)s 没有设置。" - -#: cinder/exception.py:431 +#: cinder/exception.py:228 #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "卷 %(volume_id)s 没有找到。" -#: cinder/exception.py:435 -#, fuzzy, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "无法找到帐户 %(account_name) on Solidfire 设备" +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "卷 %(volume_id)s 没有含键 %(metadata_key)s 的元数据。" -#: cinder/exception.py:440 +#: cinder/exception.py:237 #, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "没有为实例 %(instance_id)s 找到卷。" +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:242 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "卷 %(volume_id)s 没有含键 %(metadata_key)s 的元数据。" +msgid "Invalid metadata: %(reason)s" +msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." -msgstr "没有找到卷类型。" +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:250 +#, fuzzy, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "实例 %(instance_id)s 没有键为 %(metadata_key)s 的元数据。" + +#: cinder/exception.py:255 #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "卷类型 %(volume_type_id)s 没有找到。" -#: cinder/exception.py:457 +#: cinder/exception.py:259 #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "名为 %(volume_type_name)s 的卷类型没有找到。" -#: cinder/exception.py:462 +#: cinder/exception.py:264 #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "卷类型 %(volume_type_id)s 没有额外说明键 %(extra_specs_key)s 。" -#: cinder/exception.py:467 +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "快照 %(snapshot_id)s 没有找到。" -#: cinder/exception.py:471 +#: cinder/exception.py:278 #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "正在删除有快照的卷 %(volume_name)s" -#: cinder/exception.py:475 +#: cinder/exception.py:282 #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:287 #, python-format msgid "No target id found for volume %(volume_id)s." msgstr "没有为卷 %(volume_id)s 找到目标id。" -#: cinder/exception.py:484 -#, python-format -msgid "No disk at %(location)s" -msgstr "在 %(location)s 没有磁盘" - -#: cinder/exception.py:488 -#, python-format -msgid "Could not find a handler for %(driver_type)s volume." -msgstr "无法为 %(driver_type)s 卷找到句柄。" - -#: cinder/exception.py:492 +#: cinder/exception.py:291 #, python-format msgid "Invalid image href %(image_href)s." msgstr "无效的镜像href %(image_href)s。" -#: cinder/exception.py:496 -msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." -msgstr "有些镜像通过hrefs存储。该api版本不支持显示镜像hrefs。" - -#: cinder/exception.py:501 +#: cinder/exception.py:295 #, python-format msgid "Image %(image_id)s could not be found." msgstr "镜像 %(image_id)s 没有找到。" -#: cinder/exception.py:505 -#, python-format -msgid "Kernel not found for image %(image_id)s." -msgstr "没有为镜像 %(image_id)s 找到内核。" - -#: cinder/exception.py:509 -#, python-format -msgid "User %(user_id)s could not be found." -msgstr "用户 %(user_id)s 没有找到。" - -#: cinder/exception.py:513 +#: cinder/exception.py:299 #, python-format -msgid "Project %(project_id)s could not be found." -msgstr "项目 %(project_id)s 没有找到。" +msgid "Service %(service_id)s could not be found." +msgstr "服务 %(service_id)s 没有找到。" -#: cinder/exception.py:517 +#: cinder/exception.py:303 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." -msgstr "用户 %(user_id)s 不是项目 %(project_id)s 的成员。" +msgid "Host %(host)s could not be found." +msgstr "主机 %(host)s 没有找到。" -#: cinder/exception.py:521 +#: cinder/exception.py:307 #, python-format -msgid "Role %(role_id)s could not be found." -msgstr "角色 %(role_id)s 没有找到。" +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." -msgstr "没有找到存储库来读写VDI。" +#: cinder/exception.py:311 +#, fuzzy, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" -#: cinder/exception.py:529 +#: cinder/exception.py:315 #, python-format -msgid "%(req)s is required to create a network." -msgstr "创建网络 %(req)s 是必要的。" +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "没有找到二进制 %(binary)s 在主机 %(host)s 上。" -#: cinder/exception.py:533 +#: cinder/exception.py:319 #, python-format -msgid "Network %(network_id)s could not be found." -msgstr "网络 %(network_id)s 没有找到。" +msgid "Invalid reservation expiration %(expire)s." +msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:323 #, python-format -msgid "Network could not be found for bridge %(bridge)s" -msgstr "无法为桥 %(bridge)s 找到网络" +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" -#: cinder/exception.py:541 -#, python-format -msgid "Network could not be found for uuid %(uuid)s" -msgstr "无法为uuid %(uuid)s 找到网络" +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "配额没有找到。" -#: cinder/exception.py:545 +#: cinder/exception.py:332 #, python-format -msgid "Network could not be found with cidr %(cidr)s." -msgstr "无法为cidr %(cidr)s 找到网络。" +msgid "Unknown quota resources %(unknown)s." +msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:336 #, python-format -msgid "Network could not be found for instance %(instance_id)s." -msgstr "无法为实例 %(instance_id)s 找到网络。" - -#: cinder/exception.py:553 -msgid "No networks defined." -msgstr "没有网络定义。" +msgid "Quota for project %(project_id)s could not be found." +msgstr "没有为项目 %(project_id)s 找到配额。" -#: cinder/exception.py:557 -#, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." -msgstr "或者网络uuid %(network_uuid)s不存在,或者它没有分配给项目 %(project_id)s。" +#: cinder/exception.py:340 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "找不到类 %(class_name)s :异常 %(exception)s" -#: cinder/exception.py:562 -#, python-format -msgid "Host is not set to the network (%(network_id)s)." -msgstr "主机没有设置于网络 (%(network_id)s)。" +#: cinder/exception.py:344 +#, fuzzy, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "没有为项目 %(project_id)s 找到配额。" -#: cinder/exception.py:566 +#: cinder/exception.py:348 #, fuzzy, python-format -msgid "Network %(network)s has active ports, cannot delete." -msgstr "网络 %s 存在活跃的端口,无法删除" +msgid "Quota reservation %(uuid)s could not be found." +msgstr "用户 %(user_id)s 没有找到。" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." -msgstr "无法找到虚拟机使用的数据存储引用。" +#: cinder/exception.py:352 +#, fuzzy, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "超出配额" -#: cinder/exception.py:574 +#: cinder/exception.py:356 #, python-format -msgid "No fixed IP associated with id %(id)s." -msgstr "没有固定IP与id %(id)s 关联。" +msgid "File %(file_path)s could not be found." +msgstr "找不到文件 %(file_path)s。" -#: cinder/exception.py:578 -#, python-format -msgid "Fixed ip not found for address %(address)s." -msgstr "没有为地址 %(address)s 找到固定IP。" +#: cinder/exception.py:365 +#, fuzzy, python-format +msgid "Volume Type %(id)s already exists." +msgstr "卷类型 %(name)s 已经存在。" -#: cinder/exception.py:582 +#: cinder/exception.py:369 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." -msgstr "实例 %(instance_id)s 没有固定ip。" +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:373 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." -msgstr "网络主机 %(host)s 在网络 %(network_id)s 中没有固定ip。" +msgid "Malformed message body: %(reason)s" +msgstr "错误格式的消息体: %(reason)s" -#: cinder/exception.py:591 +#: cinder/exception.py:377 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." -msgstr "实例 %(instance_id)s 没有固定ip '%(ip)s'。" +msgid "Could not find config at %(path)s" +msgstr "在 %(path)s 找不到配置文件。" -#: cinder/exception.py:595 -#, python-format -msgid "Host %(host)s has zero fixed ips." -msgstr "主机 %(host)s 没有固定IP。" +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "在 %(path)s 找不到配置文件。" -#: cinder/exception.py:599 +#: cinder/exception.py:385 #, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." -msgstr "固定IP地址 (%(address)s) 在网络 (%(network_uuid)s) 中不存在。" +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "无法从路径 %(path)s 中加载应用 '%(name)s'" -#: cinder/exception.py:604 +#: cinder/exception.py:389 #, python-format -msgid "Fixed IP address %(address)s is already in use." -msgstr "固定IP地址 %(address)s 已在使用。" +msgid "No valid host was found. %(reason)s" +msgstr "找不到有效主机,原因是 %(reason)s。" -#: cinder/exception.py:608 +#: cinder/exception.py:398 #, python-format -msgid "Fixed IP address %(address)s is invalid." -msgstr "固定IP地址 %(address)s 无效。" - -#: cinder/exception.py:612 -msgid "Zero fixed ips available." -msgstr "没有固定ip可用。" - -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." -msgstr "找不到固定IP。" +msgid "Host %(host)s is not up or doesn't exist." +msgstr "主机 %(host)s 没有启动或者不存在。" -#: cinder/exception.py:620 +#: cinder/exception.py:402 #, python-format -msgid "Floating ip not found for id %(id)s." -msgstr "找不到适合id %(id)s 的浮动IP。" +msgid "Quota exceeded: code=%(code)s" +msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:409 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." -msgstr "DNS 入口 %(name)s 已经在域中 %(domain)s 存在。" +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:415 #, python-format -msgid "Floating ip not found for address %(address)s." -msgstr "找不到适合地址 %(address)s 的浮动ip。" +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:419 #, python-format -msgid "Floating ip not found for host %(host)s." -msgstr "没有为主机 %(host)s 找到浮动IP。" +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." -msgstr "没有浮动IP可用。" +#: cinder/exception.py:423 +#, fuzzy, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "检测到不止一个名称为 %(vol_name) 的卷。" -#: cinder/exception.py:640 +#: cinder/exception.py:427 #, python-format -msgid "Floating ip %(address)s is associated." -msgstr "浮动ip %(address)s 已被关联。" +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "无法创建名称为 %(name)s 规格为 %(extra_specs)s 的卷类型。" -#: cinder/exception.py:644 +#: cinder/exception.py:432 #, python-format -msgid "Floating ip %(address)s is not associated." -msgstr "浮动ip %(address)s 没有被关联。" +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." -msgstr "没有浮动ip存在。" +#: cinder/exception.py:436 +#, fuzzy, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "错误格式的消息体: %(reason)s" -#: cinder/exception.py:652 +#: cinder/exception.py:440 #, python-format -msgid "Interface %(interface)s not found." -msgstr "接口 %(interface)s没有找到。" +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" -#: cinder/exception.py:656 +#: cinder/exception.py:444 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" -msgstr "密钥对 %(name)s 没有为用户 %(user_id)s 找到。" +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:449 #, python-format -msgid "Certificate %(certificate_id)s not found." -msgstr "证书 %(certificate_id)s 没有找到。" +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" -#: cinder/exception.py:664 +#: cinder/exception.py:453 #, python-format -msgid "Service %(service_id)s could not be found." -msgstr "服务 %(service_id)s 没有找到。" +msgid "Failed to export for volume: %(reason)s" +msgstr "" -#: cinder/exception.py:668 +#: cinder/exception.py:457 #, python-format -msgid "Host %(host)s could not be found." -msgstr "主机 %(host)s 没有找到。" +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" -#: cinder/exception.py:672 +#: cinder/exception.py:461 #, python-format -msgid "Compute host %(host)s could not be found." -msgstr "计算主机 %(host)s 没有找到。" +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" -#: cinder/exception.py:676 +#: cinder/exception.py:465 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." -msgstr "没有找到二进制 %(binary)s 在主机 %(host)s 上。" +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" -#: cinder/exception.py:680 -#, python-format -msgid "Auth token %(token)s could not be found." -msgstr "认证令牌 %(token)s 没有找到。" +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "无法找到 %s 卷" -#: cinder/exception.py:684 -#, python-format -msgid "Access Key %(access_key)s could not be found." -msgstr "访问密钥 %(access_key)s 没有找到。" +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" -msgstr "配额没有找到。" - -#: cinder/exception.py:692 -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "没有为项目 %(project_id)s 找到配额。" - -#: cinder/exception.py:696 -#, fuzzy, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "找不到类 %(class_name)s :异常 %(exception)s" - -#: cinder/exception.py:700 -#, python-format -msgid "Security group %(security_group_id)s not found." -msgstr "安全组 %(security_group_id)s 没有找到。" - -#: cinder/exception.py:704 -#, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." -msgstr "没有找到安全组 %(security_group_id)s 针对项目 %(project_id)s 。" - -#: cinder/exception.py:709 -#, python-format -msgid "Security group with rule %(rule_id)s not found." -msgstr "带有规则 %(rule_id)s 的安全组没有找到。" - -#: cinder/exception.py:713 -#, python-format -msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" -msgstr "安全组 %(security_group_id)s 已经与实例 %(instance_id)s 关联。" - -#: cinder/exception.py:718 -#, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" -msgstr "安全组 %(security_group_id)s 没有与实例 %(instance_id)s 关联。" - -#: cinder/exception.py:723 -#, python-format -msgid "Migration %(migration_id)s could not be found." -msgstr "迁移 %(migration_id)s 没有找到。" - -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." -msgstr "没有为实例 %(instance_id)s 找到迁移其状态为 %(status)s 。" - -#: cinder/exception.py:732 -#, python-format -msgid "Console pool %(pool_id)s could not be found." -msgstr "控制台池 %(pool_id)s 没有找到。" - -#: cinder/exception.py:736 -#, python-format -msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." -msgstr "没有找到类型是 %(console_type)s 的控制台池针对计算主机 %(compute_host)s 在代理主机 %(host)s 上。" - -#: cinder/exception.py:742 -#, python-format -msgid "Console %(console_id)s could not be found." -msgstr "控制台%(console_id)s 没有找到。" - -#: cinder/exception.py:746 -#, python-format -msgid "Console for instance %(instance_id)s could not be found." -msgstr "没有为实例 %(instance_id)s 找到控制台。" - -#: cinder/exception.py:750 -#, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." -msgstr "没有为实例 %(instance_id)s 在池 %(pool_id)s 中找到控制台。" - -#: cinder/exception.py:755 -#, python-format -msgid "Invalid console type %(console_type)s " -msgstr "无效的控制台类型 %(console_type)s " - -#: cinder/exception.py:759 -msgid "Zero instance types found." -msgstr "没有找到实例类型。" - -#: cinder/exception.py:763 -#, python-format -msgid "Instance type %(instance_type_id)s could not be found." -msgstr "实例类型 %(instance_type_id)s 没有找到。" - -#: cinder/exception.py:767 -#, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." -msgstr "名为 %(instance_type_name)s 的实例类型没有找到。" +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" -#: cinder/exception.py:772 -#, python-format -msgid "Flavor %(flavor_id)s could not be found." -msgstr "类型 %(flavor_id)s 没有找到。" +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" -#: cinder/exception.py:776 +#: cinder/exception.py:485 #, fuzzy, python-format -msgid "Cell %(cell_id)s could not be found." -msgstr "区域 %(zone_id)s 没有找到。" - -#: cinder/exception.py:780 -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" - -#: cinder/exception.py:784 -#, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." -msgstr "调度器的成本函数 %(cost_fn_str)s 没有找到。" - -#: cinder/exception.py:789 -#, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" -msgstr "未找到调度器的权重标记:%(flag_name)s" - -#: cinder/exception.py:793 -#, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." -msgstr "实例 %(instance_id)s 没有键为 %(metadata_key)s 的元数据。" - -#: cinder/exception.py:798 -#, python-format -msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "实例类型 %(instance_type_id)s 没有额外的 键为%(extra_specs_key)s 的规格说明。" - -#: cinder/exception.py:803 -msgid "LDAP object could not be found" -msgstr "未定义LDAP对象" - -#: cinder/exception.py:807 -#, python-format -msgid "LDAP user %(user_id)s could not be found." -msgstr "没有找到LDAP用户 %(user_id)s。" - -#: cinder/exception.py:811 -#, python-format -msgid "LDAP group %(group_id)s could not be found." +msgid "Backup %(backup_id)s could not be found." msgstr "没有找到LDAP用户组 %(group_id)s。" -#: cinder/exception.py:815 -#, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." -msgstr "LDAP用户 %(user_id)s 不是 %(group_id)s 的组成员。" - -#: cinder/exception.py:819 -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "找不到文件 %(file_path)s。" - -#: cinder/exception.py:823 -msgid "Zero files could be found." -msgstr "没找到文件" - -#: cinder/exception.py:827 -#, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." -msgstr "未找到与网络适配器 %(adapter)s 关联的虚拟交换机。" - -#: cinder/exception.py:832 -#, python-format -msgid "Network adapter %(adapter)s could not be found." -msgstr "未找到网络适配器 %(adapter)s。" - -#: cinder/exception.py:836 -#, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" -msgstr "找不到类 %(class_name)s :异常 %(exception)s" +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." -msgstr "操作不允许。" +#: cinder/exception.py:493 +#, fuzzy, python-format +msgid "Invalid backup: %(reason)s" +msgstr "无效的后台:%s" -#: cinder/exception.py:844 +#: cinder/exception.py:497 #, python-format -msgid "Unable to use global role %(role_id)s" -msgstr "无法使用全局角色 %(role_id)s" - -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" -msgstr "快照不允许循环。" - -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" -msgstr "备份 image_type 要求循环参数。" +msgid "Connection to swift failed: %(reason)s" +msgstr "" -#: cinder/exception.py:861 -#, python-format -msgid "Key pair %(key_name)s already exists." -msgstr "密钥对 %(key_name)s 已经存在。" +#: cinder/exception.py:501 +#, fuzzy, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "镜像 %(image_id)s 没有找到。" -#: cinder/exception.py:865 +#: cinder/exception.py:505 #, python-format -msgid "User %(user)s already exists." -msgstr "用户 %(user)s 已存在。" +msgid "Volume migration failed: %(reason)s" +msgstr "" -#: cinder/exception.py:869 +#: cinder/exception.py:509 #, python-format -msgid "LDAP user %(user)s already exists." -msgstr "LDAP用户 %(user)s 已经存在。" +msgid "SSH command injection detected: %(command)s" +msgstr "" -#: cinder/exception.py:873 +#: cinder/exception.py:513 #, python-format -msgid "LDAP group %(group)s already exists." -msgstr "LDAP用户组 %(group)s 已经存在。" +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" -#: cinder/exception.py:877 +#: cinder/exception.py:517 #, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" -msgstr "用户 %(uid)s 已经是 组 %(group_dn)s 中的成员" +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" -#: cinder/exception.py:882 +#: cinder/exception.py:522 #, python-format -msgid "Project %(project)s already exists." -msgstr "项目 %(project)s 已经存在。" +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" -#: cinder/exception.py:886 +#: cinder/exception.py:527 #, python-format -msgid "Instance %(name)s already exists." -msgstr "实例 %(name)s 已经存在。" +msgid "No such QoS spec %(specs_id)s." +msgstr "" -#: cinder/exception.py:890 +#: cinder/exception.py:531 #, python-format -msgid "Instance Type %(name)s already exists." -msgstr "实例类型 %(name)s 已经存在。" +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" -#: cinder/exception.py:894 +#: cinder/exception.py:536 #, python-format -msgid "Volume Type %(name)s already exists." -msgstr "卷类型 %(name)s 已经存在。" +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" -#: cinder/exception.py:898 +#: cinder/exception.py:541 #, python-format -msgid "%(path)s is on shared storage: %(reason)s" -msgstr "%(path)s 在共享存储上:%(reason)s" - -#: cinder/exception.py:902 -msgid "Migration error" -msgstr "迁移错误" +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" -#: cinder/exception.py:906 +#: cinder/exception.py:546 #, python-format -msgid "Malformed message body: %(reason)s" -msgstr "错误格式的消息体: %(reason)s" +msgid "Invalid qos specs: %(reason)s" +msgstr "" -#: cinder/exception.py:910 +#: cinder/exception.py:550 #, python-format -msgid "Could not find config at %(path)s" -msgstr "在 %(path)s 找不到配置文件。" +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" -#: cinder/exception.py:914 +#: cinder/exception.py:554 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "无法从路径 %(path)s 中加载应用 '%(name)s'" - -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" -msgstr "调整时实例的大小必须要发生变化。" +msgid "key manager error: %(reason)s" +msgstr "" -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" -msgstr "镜像比实例类型所允许的大。" +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" -msgstr "1个或多个区域无法完成请求。" +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." -msgstr "实例类型的内存对于所请求的镜像太小。" +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." -msgstr "实例类型的磁盘对于所请求的镜像太小。" +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" -#: cinder/exception.py:938 +#: cinder/exception.py:576 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." -msgstr "没有足够的可用内存来启动计算节点 %(uuid)s。" - -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." -msgstr "无法获取此主机的带宽、CPU和磁盘指标。" +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" -#: cinder/exception.py:946 +#: cinder/exception.py:580 #, python-format -msgid "No valid host was found. %(reason)s" -msgstr "找不到有效主机,原因是 %(reason)s。" +msgid "ESM configure request failed: %(message)s." +msgstr "" -#: cinder/exception.py:950 +#: cinder/exception.py:584 #, python-format -msgid "Host %(host)s is not up or doesn't exist." -msgstr "主机 %(host)s 没有启动或者不存在。" +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" -msgstr "超出配额" +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" -#: cinder/exception.py:958 +#: cinder/exception.py:593 #, fuzzy, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." -msgstr "聚合 %(aggregate_id)s没有主机 %(host)s。" - -#: cinder/exception.py:963 -#, python-format -msgid "Aggregate %(aggregate_id)s could not be found." -msgstr "找不到聚合 %(aggregate_id)s。" - -#: cinder/exception.py:967 -#, python-format -msgid "Aggregate %(aggregate_name)s already exists." -msgstr "聚合 %(aggregate_name)s 已经存在。" +msgid "Unable to create server object for initiator %(name)s" +msgstr "无法在存储库 %(sr_ref)s 上为实例 %(instance_name)s 创建 VDI" -#: cinder/exception.py:971 -#, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." -msgstr "聚合 %(aggregate_id)s没有主机 %(host)s。" +#: cinder/exception.py:597 +#, fuzzy, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "无法找到实例 %s 的宿主机" -#: cinder/exception.py:975 -#, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." -msgstr "聚合 %(aggregate_id)s 没有键为 %(metadata_key)s 的元数据。" +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" -#: cinder/exception.py:980 +#: cinder/exception.py:605 #, python-format -msgid "Host %(host)s already member of another aggregate." -msgstr "主机 %(host)s 已经是另外一个聚合的成员。" +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" -#: cinder/exception.py:984 +#: cinder/exception.py:609 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." -msgstr "聚合 %(aggregate_id)s已经有主机 %(host)s。" +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" -#: cinder/exception.py:988 +#: cinder/exception.py:613 #, fuzzy, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "检测到不止一个名称为 %(vol_name) 的卷。" - -#: cinder/exception.py:992 -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "无法创建名称为 %(name)s 规格为 %(extra_specs)s 的卷类型。" - -#: cinder/exception.py:997 -msgid "Unable to create instance type" -msgstr "无法创建实例类型。" +msgid "Bad HTTP response status %(status)s" +msgstr "无效的服务器状态:%(status)s" -#: cinder/exception.py:1001 +#: cinder/exception.py:618 msgid "Bad response from SolidFire API" msgstr "来自SolidFire API的错误响应" -#: cinder/exception.py:1005 -#, python-format -msgid "Error in SolidFire API response: status=%(status)s" -msgstr "SolidFire API响应里发生错误:status=%(status)s" +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" -#: cinder/exception.py:1009 +#: cinder/exception.py:626 #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "SolidFire API响应里发生错误:data=%(data)s" -#: cinder/exception.py:1013 +#: cinder/exception.py:630 #, fuzzy, python-format -msgid "Detected existing vlan with id %(vlan)d" -msgstr "检测到已存在的id为%(vlan) vlan" - -#: cinder/exception.py:1017 -#, python-format -msgid "Instance %(instance_id)s could not be found." -msgstr "实例 %(instance_id)s 没有找到。" +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "无法找到帐户 %(account_name) on Solidfire 设备" -#: cinder/exception.py:1021 +#: cinder/exception.py:636 #, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:1025 -#, fuzzy, python-format -msgid "Could not fetch image %(image)s" -msgstr "获取镜像 %(image)s" +#: cinder/exception.py:641 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "发生未知异常。" -#: cinder/log.py:315 -#, python-format -msgid "syslog facility must be one of: %s" -msgstr "syslog设备必须作为一个 %s 。" +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" -#: cinder/manager.py:146 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" -msgstr "跳过 %(full_task_name)s,到下次运行还剩下%(ticks_to_skip)s 跳。" +msgid "There is no share which can host %(volume_size)sG" +msgstr "" -#: cinder/manager.py:152 -#, python-format -msgid "Running periodic task %(full_task_name)s" -msgstr "正在运行周期性任务 %(full_task_name)s" +#: cinder/exception.py:654 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "发生未知异常。" -#: cinder/manager.py:159 -#, python-format -msgid "Error during %(full_task_name)s: %(e)s" -msgstr "在 %(full_task_name)s 期间发生的错误:%(e)s" +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" -#: cinder/manager.py:203 +#: cinder/manager.py:133 msgid "Notifying Schedulers of capabilities ..." msgstr "向调度器通报能力。" @@ -1107,3067 +672,2746 @@ msgstr "JSON文件表示策略。" msgid "Rule checked when requested rule is not found" msgstr "请求的规则找不到时的检查缺省规则。" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/service.py:177 +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "%(pid)s 的配额超出,尝试创建 %(size)sG 的卷" + +#: cinder/service.py:95 +#, fuzzy, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "启动 %(topic)s 节点 (版本 %(vcs_string)s)" -#: cinder/service.py:195 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format msgid "Creating Consumer connection for Service %s" msgstr "为服务 %s 创建消费者" -#: cinder/service.py:282 +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 msgid "Service killed that has no database entry" msgstr "因无数据库记录,服务已被中止" -#: cinder/service.py:319 +#: cinder/service.py:255 msgid "The service database object disappeared, Recreating it." msgstr "服务数据库对象消失,正在重新创建。" -#: cinder/service.py:334 +#: cinder/service.py:270 msgid "Recovered model server connection!" msgstr "与模型服务器(model server)的连接已恢复!" -#: cinder/service.py:340 +#: cinder/service.py:276 msgid "model server went away" msgstr "失去与模型服务器的连接" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" -msgstr "标记全集:" - -#: cinder/service.py:440 -#, python-format -msgid "%(flag)s : FLAG SET " -msgstr "%(flag)s:标记集合 " - -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" -msgstr "内层异常:%s" - -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" -msgstr "正在抓取 %s" - -#: cinder/utils.py:210 +#: cinder/service.py:298 #, python-format -msgid "Got unknown keyword args to utils.execute: %r" -msgstr "发现未知的 utils.execute 关键字参数:%r" +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" -msgstr "正在运行cmd (subprocess):%s" +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" -msgstr "运行结果为 %s" +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +#, fuzzy +msgid "Full set of CONF:" +msgstr "标记全集:" -#: cinder/utils.py:249 +#: cinder/service.py:387 #, python-format -msgid "%r failed. Retrying." -msgstr "%r 失败,重试。" +msgid "%s : FLAG SET " +msgstr "" -#: cinder/utils.py:291 +#: cinder/utils.py:96 #, python-format -msgid "Running cmd (SSH): %s" -msgstr "运行cmd (SSH):%s" - -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" -msgstr "SSH上不支持环境变量" - -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" -msgstr "SSH上不支持的进程输入参数。" +msgid "Can not translate %s to integer." +msgstr "" -#: cinder/utils.py:352 +#: cinder/utils.py:127 #, python-format -msgid "debug in callback: %s" -msgstr "回调中debug:%s" +msgid "May specify only one of %s" +msgstr "" -#: cinder/utils.py:534 -#, python-format -msgid "Link Local address is not found.:%s" -msgstr "本地IP地址没有找到:%s" +#: cinder/utils.py:212 +#, fuzzy +msgid "Specify a password or private_key" +msgstr "指定san_password或者san_private_key" -#: cinder/utils.py:537 -#, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" -msgstr "无法连接到 %(interface)s 的本地IP:%(ex)s" +#: cinder/utils.py:228 +#, fuzzy, python-format +msgid "Error connecting via ssh: %s" +msgstr "正在连接 libvirt:%s" -#: cinder/utils.py:648 +#: cinder/utils.py:412 #, python-format msgid "Invalid backend: %s" msgstr "无效的后台:%s" -#: cinder/utils.py:659 +#: cinder/utils.py:423 #, python-format msgid "backend %s" msgstr "后台 %s" -#: cinder/utils.py:709 -msgid "in looping call" -msgstr "循环调用中。" - -#: cinder/utils.py:927 +#: cinder/utils.py:698 #, fuzzy, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." -msgstr "正在试图获取信号量 \"%(lock)s\" 为方法 \"%(method)s\"...锁" +msgid "Could not remove tmpdir: %s" +msgstr "移除容器失败:%s" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" -#: cinder/utils.py:931 +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 #, fuzzy, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." -msgstr "获得信号量 \"%(lock)s\" 为方法 \"%(method)s\" ...锁" +msgid "Unable to find cert_file : %s" +msgstr "无法找到地址 %r" -#: cinder/utils.py:935 +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 #, fuzzy, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." -msgstr "正在 试图获取锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" +msgid "Unable to find ca_file : %s" +msgstr "无法找到地址 %r" -#: cinder/utils.py:942 +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 #, fuzzy, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." -msgstr "获得文件锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" - -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" -msgstr "" - -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" -msgstr "" +msgid "Unable to find key_file : %s" +msgstr "无法找到地址 %r" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/utils.py:1028 +#: cinder/wsgi.py:169 #, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" -msgstr "期望的对象类型:%s" - -#: cinder/utils.py:1169 -#, python-format -msgid "Invalid server_string: %s" -msgstr "不正确的server_string:%s" - -#: cinder/utils.py:1298 -#, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" -msgstr "timefunc:'%(name)s' 用了%(total_time).2f 秒" - -#: cinder/utils.py:1330 -msgid "Original exception being dropped" -msgstr "正在丢弃原来的异常。" - -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" -msgstr "类 %(fullname)s 是不推荐的:%(msg)s" - -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" -msgstr "类 %(fullname)s 是不推荐的" - -#: cinder/utils.py:1495 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" -msgstr "函数 %(name)s 在%(location)s 里的是不推荐的:%(msg)s" - -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" -msgstr "函数 %(name)s 在%(location)s 里的是不推荐的" - -#: cinder/utils.py:1681 -#, fuzzy, python-format -msgid "Could not remove tmpdir: %s" -msgstr "移除容器失败:%s" - -#: cinder/wsgi.py:97 +#: cinder/wsgi.py:206 #, python-format msgid "Started %(name)s on %(host)s:%(port)s" -msgstr "启动%(name)s 位置在 %(host)s:%(port)s" +msgstr "" -#: cinder/wsgi.py:108 +#: cinder/wsgi.py:226 msgid "Stopping WSGI server." msgstr "关闭WSGI服务器" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." -msgstr "正在停止裸TCP服务器。" - -#: cinder/wsgi.py:117 -#, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" -msgstr "正在启动TCP服务器 %(arg0)s 位置在%(host)s:%(port)s" - -#: cinder/wsgi.py:133 +#: cinder/wsgi.py:244 msgid "WSGI server has stopped." msgstr "WSGI服务器已经停止。" -#: cinder/wsgi.py:211 +#: cinder/wsgi.py:313 msgid "You must implement __call__" msgstr "你必须执行 __call__" -#: cinder/api/direct.py:218 -msgid "not available" -msgstr "不可用" - -#: cinder/api/direct.py:299 -#, python-format -msgid "Returned non-serializeable type: %s" -msgstr "返回的non-serializeable类型:%s" - -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/api/validator.py:142 -#, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" -msgstr "%(code)s: %(message)s" +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "limit 参数必须是整数" -#: cinder/api/ec2/__init__.py:95 -#, python-format -msgid "FaultWrapper: %s" -msgstr "FaultWrapper: %s" +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "limit参数必须是正数" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." -msgstr "认证失败过多" +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "offset 参数必须是整数" -#: cinder/api/ec2/__init__.py:180 -#, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." -msgstr "访问密钥 %(access_key)s 有错误 %(failures)d,认证失败将被锁定 %(lock_mins)d 分钟。" +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "offset 参数必须是正数" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" -msgstr "签名没有提供" +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "没有找到标记 [%s]" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" -msgstr "访问密钥没有提供" +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s 不包含版本" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" -msgstr "与keystone交流失败" +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "正在初始化扩展管理员。" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/extensions.py:197 #, python-format -msgid "Authentication Failure: %s" -msgstr "认证失败:%s" +msgid "Loaded extension: %s" +msgstr "加载的扩展:%s" -#: cinder/api/ec2/__init__.py:404 +#: cinder/api/extensions.py:235 #, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" -msgstr "为%(uname)s:%(pname)s 验证通过的请求" +msgid "Ext name: %s" +msgstr "Ext name: %s" -#: cinder/api/ec2/__init__.py:435 +#: cinder/api/extensions.py:236 #, python-format -msgid "action: %s" -msgstr "执行: %s" +msgid "Ext alias: %s" +msgstr "Ext alias: %s" -#: cinder/api/ec2/__init__.py:437 +#: cinder/api/extensions.py:237 #, python-format -msgid "arg: %(key)s\t\tval: %(value)s" -msgstr "arg: %(key)s\t\tval: %(value)s" +msgid "Ext description: %s" +msgstr "Ext 描述: %s" -#: cinder/api/ec2/__init__.py:512 +#: cinder/api/extensions.py:239 #, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" -msgstr "为controller=%(controller)s 以及 action=%(action)s未验证的请求" +msgid "Ext namespace: %s" +msgstr "Ext 命名空间: %s" -#: cinder/api/ec2/__init__.py:584 +#: cinder/api/extensions.py:240 #, python-format -msgid "InstanceNotFound raised: %s" -msgstr "引起异常 InstanceNotFound: %s" +msgid "Ext updated: %s" +msgstr "Ext updated: %s" -#: cinder/api/ec2/__init__.py:590 +#: cinder/api/extensions.py:242 #, python-format -msgid "VolumeNotFound raised: %s" -msgstr "引起异常 VolumeNotFound: %s" +msgid "Exception loading extension: %s" +msgstr "加载扩展发生异常:%s" -#: cinder/api/ec2/__init__.py:596 +#: cinder/api/extensions.py:256 #, python-format -msgid "SnapshotNotFound raised: %s" -msgstr "引起异常 SnapshotNotFound: %s" +msgid "Loading extension %s" +msgstr "正在加载扩展 %s" -#: cinder/api/ec2/__init__.py:602 +#: cinder/api/extensions.py:262 #, python-format -msgid "NotFound raised: %s" -msgstr "引起异常 NotFound: %s" +msgid "Calling extension factory %s" +msgstr "调用扩展工厂 %s" -#: cinder/api/ec2/__init__.py:605 +#: cinder/api/extensions.py:276 #, python-format -msgid "EC2APIError raised: %s" -msgstr "引起异常 EC2APIError: %s" +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" -#: cinder/api/ec2/__init__.py:613 +#: cinder/api/extensions.py:278 #, python-format -msgid "KeyPairExists raised: %s" -msgstr "引起异常 KeyPairExists: %s" +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" -#: cinder/api/ec2/__init__.py:617 +#: cinder/api/extensions.py:287 #, python-format -msgid "InvalidParameterValue raised: %s" -msgstr "引起异常 InvalidParameterValue: %s" +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "加载扩展 %(ext_factory)s 失败:%(exc)s" -#: cinder/api/ec2/__init__.py:621 +#: cinder/api/extensions.py:356 #, python-format -msgid "InvalidPortRange raised: %s" -msgstr "引起异常 InvalidPortRange: %s" +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "加载扩展 %(classpath)s 失败:%(exc)s" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/extensions.py:381 #, python-format -msgid "NotAuthorized raised: %s" -msgstr "引起异常 NotAuthorized: %s" +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "加载扩展 %(ext_name)s 失败:%(exc)s" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "元素不是子节点" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "根元素选择列表" -#: cinder/api/ec2/__init__.py:629 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "InvalidRequest raised: %s" -msgstr "引起异常 InvalidRequest: %s" +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "模板数不匹配;把slave %(slavetag)s 添加到master %(mastertag)s" -#: cinder/api/ec2/__init__.py:633 -#, fuzzy, python-format -msgid "QuotaError raised: %s" -msgstr "引起意外的错误:%s" +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "subclasses必须执行construct()!" -#: cinder/api/ec2/__init__.py:637 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/api/ec2/__init__.py:646 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Unexpected error raised: %s" -msgstr "引起意外的错误:%s" +msgid "show called for member %s" +msgstr "" -#: cinder/api/ec2/__init__.py:647 -#, python-format -msgid "Environment: %s" -msgstr "Environment: %s" +#: cinder/api/contrib/backups.py:140 +#, fuzzy, python-format +msgid "delete called for member %s" +msgstr "修改用户 %s 的私钥" + +#: cinder/api/contrib/backups.py:143 +#, fuzzy, python-format +msgid "Delete backup with id: %s" +msgstr "删除id为 %s 的快照" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." -msgstr "发生了一个未知的错误. 请重试你的请求." +#: cinder/api/contrib/backups.py:185 +#, fuzzy, python-format +msgid "Creating new backup %s" +msgstr "轮换出%d个备份" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "不正确的请求主体格式" + +#: cinder/api/contrib/backups.py:201 +#, fuzzy, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "卷 %(volume_id)s 正在 %(mountpoint)s 上启动" -#: cinder/api/ec2/apirequest.py:64 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" -msgstr "不支持的API请求:controller = %(controller)s,action = %(action)s" +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" -#: cinder/api/ec2/cloud.py:336 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Create snapshot of volume %s" -msgstr "创建卷 %s 的快照" +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" -#: cinder/api/ec2/cloud.py:372 -#, fuzzy, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." -msgstr "参数GroupName的值 (%s) 无效。内容仅限于含有字母数字的字符,空格,破折号和下划线。" +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +#, fuzzy +msgid "Snapshot not found." +msgstr "没有找到主机" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "无法理解XML" -#: cinder/api/ec2/cloud.py:378 +#: cinder/api/contrib/hosts.py:136 #, fuzzy, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." -msgstr "参数GroupName的值 (%s) 无效。长度超过了上限255。" +msgid "Host '%s' could not be found." +msgstr "主机 %(host)s 没有找到。" -#: cinder/api/ec2/cloud.py:382 +#: cinder/api/contrib/hosts.py:165 #, python-format -msgid "Create key pair %s" -msgstr "创建密钥对 %s" +msgid "Invalid status: '%s'" +msgstr "无效的状态:'%s'" -#: cinder/api/ec2/cloud.py:391 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Import key %s" -msgstr "导入密钥 %s" +msgid "Invalid update setting: '%s'" +msgstr "无效的更新设置:'%s'" -#: cinder/api/ec2/cloud.py:409 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Delete key pair %s" -msgstr "删除密钥对 %s" +msgid "Setting host %(host)s to %(state)s." +msgstr "把主机 %(host)s 设置为 %(state)s。" -#: cinder/api/ec2/cloud.py:551 -#, fuzzy -msgid "Invalid CIDR" -msgstr "无效的" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "Describe-resource是只有管理员才能执行的功能。" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "没有找到主机" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 -#, python-format -msgid "Revoke security group ingress %s" -msgstr "撤销 %s 安全组入口权限" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 -#, fuzzy, python-format -msgid "%s Not enough parameters to build a valid rule" -msgstr "参数不够创建有效规则。" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." -msgstr "对给定的参数无特定规则。" +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 -#, python-format -msgid "Authorize security group ingress %s" -msgstr "授权 %s 安全组入口权限" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" -#: cinder/api/ec2/cloud.py:725 -#, fuzzy, python-format -msgid "%s - This rule already exists in group" -msgstr "这条规则已经存在于组%s 中" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" -#: cinder/api/ec2/cloud.py:769 -#, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." -msgstr "参数GroupName的值 (%s) 无效。内容仅限于含有字母数字的字符,空格,破折号和下划线。" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" -#: cinder/api/ec2/cloud.py:776 -#, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." -msgstr "参数GroupName的值 (%s) 无效。长度超过了上限255。" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" -msgstr "创建安全组 %s" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" -#: cinder/api/ec2/cloud.py:783 -#, python-format -msgid "group %s already exists" -msgstr "安全组 %s 已经存在" +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#: cinder/api/contrib/quotas.py:111 #, python-format -msgid "Delete security group %s" -msgstr "删除安全组 %s" +msgid "Bad key(s) in quota set: %s" +msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 -#, python-format -msgid "Get console output for instance %s" -msgstr "获取实例 %s 控制台输出" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" -#: cinder/api/ec2/cloud.py:894 -#, python-format -msgid "Create volume from snapshot %s" -msgstr "从快照 %s 创建卷" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 -#, python-format -msgid "Create volume of %s GB" -msgstr "创建 %s GB的卷" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" -#: cinder/api/ec2/cloud.py:921 -#, fuzzy -msgid "Delete Failed" -msgstr "创建失败" +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -msgstr "把卷 %(volume_id)s 附加到实例 %(instance_id)s 上位置在 %(device)s" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" -#: cinder/api/ec2/cloud.py:939 +#: cinder/api/contrib/types_extra_specs.py:101 #, fuzzy -msgid "Attach Failed." -msgstr "创建失败" +msgid "Request body empty" +msgstr "不正确的请求主体格式" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 -#, python-format -msgid "Detach volume %s" -msgstr "分离卷 %s" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "请求主体和URI不匹配" -#: cinder/api/ec2/cloud.py:959 -#, fuzzy, python-format -msgid "Detach Volume Failed." -msgstr "分离卷 %s" +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "请求主体包含太多items" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 -#, python-format -msgid "attribute not supported: %s" -msgstr "属性不支持: %s" +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "vol = %s\n" -msgstr "vol = %s\n" +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" -msgstr "分配地址" +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" -#: cinder/api/ec2/cloud.py:1267 -#, python-format -msgid "Release address %s" -msgstr "释放地址 %s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" -#: cinder/api/ec2/cloud.py:1272 -#, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" -msgstr "把地址 %(public_ip)s 关联到实例 %(instance_id)s" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" -#: cinder/api/ec2/cloud.py:1282 -#, python-format -msgid "Disassociate address %s" -msgstr "取消地址 %s 的关联" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" -msgstr "镜像必须可用。" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" -msgstr "准备开始终止实例" +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" -#: cinder/api/ec2/cloud.py:1343 -#, python-format -msgid "Reboot instance %r" -msgstr "重启实例 %r" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" -msgstr "准备停止实例" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" -msgstr "准备启动实例" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" -#: cinder/api/ec2/cloud.py:1455 -#, python-format -msgid "De-registering image %s" -msgstr "解除镜像 %s 的注册" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/api/ec2/cloud.py:1490 -#, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" -msgstr "用id %(image_id)s 注册镜像 %(image_location)s" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" -msgstr "用户或者组没有确定" +#: cinder/api/contrib/volume_transfer.py:131 +#, fuzzy +msgid "Listing volume transfers" +msgstr "更新主机状态" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" -msgstr "仅仅支持组\"all\"" +#: cinder/api/contrib/volume_transfer.py:147 +#, fuzzy, python-format +msgid "Creating new volume transfer %s" +msgstr "创建卷 %s 的快照" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" -msgstr "operation_type必须添加或者移除" +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "没有为卷 %(volume_id)s 找到目标id。" -#: cinder/api/ec2/cloud.py:1542 -#, python-format -msgid "Updating image %s publicity" -msgstr "正在更新镜像 %s 的 publicity 属性" +#: cinder/api/contrib/volume_transfer.py:183 +#, fuzzy, python-format +msgid "Accepting volume transfer %s" +msgstr "更新主机状态" -#: cinder/api/ec2/cloud.py:1555 -#, python-format -msgid "Not allowed to modify attributes for image %s" +#: cinder/api/contrib/volume_transfer.py:196 +#, fuzzy, python-format +msgid "Accepting transfer %s" +msgstr "更新主机状态" + +#: cinder/api/contrib/volume_transfer.py:217 +#, fuzzy, python-format +msgid "Delete transfer with id: %s" +msgstr "删除id为 %s 的卷" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/api/ec2/cloud.py:1603 -#, python-format -msgid "Couldn't stop instance with in %d sec" -msgstr "无法在 %d 秒内停止实例" +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "为ip: %s获取元数据失败" +msgid "Valid control location are: %s" +msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 #, python-format msgid "Caught error: %s" msgstr "抓到错误:%s" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "%(url)s 随HTTP %(status)d返回" -#: cinder/api/openstack/__init__.py:94 +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 msgid "Must specify an ExtensionManager class" msgstr "必须明确一个ExtensionManager类" -#: cinder/api/openstack/__init__.py:105 +#: cinder/api/openstack/__init__.py:80 #, python-format msgid "Extended resource: %s" msgstr "扩展资源:%s" -#: cinder/api/openstack/__init__.py:130 +#: cinder/api/openstack/__init__.py:104 #, python-format msgid "" "Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " "resource" msgstr "扩展%(ext_name)s:无法扩展资源 %(collection)s:没有那种资源" -#: cinder/api/openstack/__init__.py:135 +#: cinder/api/openstack/__init__.py:110 #, python-format msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "扩展资源的扩展 %(ext_name)s:%(collection)s" -#: cinder/api/openstack/auth.py:90 -#, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" -msgstr "%(user_id)s 通过令牌 '%(token)s' 是找不到的" - -#: cinder/api/openstack/auth.py:134 -#, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" -msgstr "%(user_id)s 必须是 %(project_id)s 的管理员或者成员" - -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." -msgstr "认证请求必须针对root版本(例如 /v2)。" - -#: cinder/api/openstack/auth.py:167 -#, python-format -msgid "Could not find %s in request." -msgstr "请求中找不到 %s" - -#: cinder/api/openstack/auth.py:191 -#, python-format -msgid "Successfully authenticated '%s'" -msgstr "成功验证 '%s'" - -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." -msgstr "没有为提供的API密钥找到用户。" - -#: cinder/api/openstack/auth.py:258 -#, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" -msgstr "提供的API密钥有效,但并不是给用户 '%(username)s' 的。" - -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" -msgstr "limit 参数必须是整数" - -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" -msgstr "limit参数必须是正数" - -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" -msgstr "offset 参数必须是整数" - -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" -msgstr "offset 参数必须是正数" - -#: cinder/api/openstack/common.py:203 -#, python-format -msgid "marker [%s] not found" -msgstr "没有找到标记 [%s]" - -#: cinder/api/openstack/common.py:243 -#, python-format -msgid "href %s does not contain version" -msgstr "href %s 不包含版本" - -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" -msgstr "超过镜像元数据限制" - -#: cinder/api/openstack/common.py:295 -#, fuzzy, python-format -msgid "Converting nw_info: %s" -msgstr "实例的network_info:|%s|" - -#: cinder/api/openstack/common.py:305 -#, fuzzy, python-format -msgid "Converted networks: %s" -msgstr "意外错误:%s" - -#: cinder/api/openstack/common.py:338 -#, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" -msgstr "'%(action)s' 针对处于 %(attr)s %(state)s 的实例是无法进行" - -#: cinder/api/openstack/common.py:341 -#, python-format -msgid "Instance is in an invalid state for '%(action)s'" -msgstr "实例针对 '%(action)s' 处于无效状态" - -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" -msgstr "拒绝快照请求,快照当前未被激活" - -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." -msgstr "该时刻实例快照是不允许的。" - -#: cinder/api/openstack/extensions.py:188 -#, python-format -msgid "Loaded extension: %s" -msgstr "加载的扩展:%s" - -#: cinder/api/openstack/extensions.py:225 -#, python-format -msgid "Ext name: %s" -msgstr "Ext name: %s" - -#: cinder/api/openstack/extensions.py:226 -#, python-format -msgid "Ext alias: %s" -msgstr "Ext alias: %s" - -#: cinder/api/openstack/extensions.py:227 -#, python-format -msgid "Ext description: %s" -msgstr "Ext 描述: %s" - -#: cinder/api/openstack/extensions.py:229 -#, python-format -msgid "Ext namespace: %s" -msgstr "Ext 命名空间: %s" - -#: cinder/api/openstack/extensions.py:230 -#, python-format -msgid "Ext updated: %s" -msgstr "Ext updated: %s" - -#: cinder/api/openstack/extensions.py:232 -#, python-format -msgid "Exception loading extension: %s" -msgstr "加载扩展发生异常:%s" - -#: cinder/api/openstack/extensions.py:246 -#, python-format -msgid "Loading extension %s" -msgstr "正在加载扩展 %s" - -#: cinder/api/openstack/extensions.py:252 -#, python-format -msgid "Calling extension factory %s" -msgstr "调用扩展工厂 %s" - -#: cinder/api/openstack/extensions.py:264 -#, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" -msgstr "加载扩展 %(ext_factory)s 失败:%(exc)s" - -#: cinder/api/openstack/extensions.py:344 -#, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" -msgstr "加载扩展 %(classpath)s 失败:%(exc)s" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" -#: cinder/api/openstack/extensions.py:368 -#, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" -msgstr "加载扩展 %(ext_name)s 失败:%(exc)s" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 msgid "cannot understand JSON" msgstr "无法理解JSON" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" -msgstr "无法理解XML" - -#: cinder/api/openstack/wsgi.py:543 +#: cinder/api/openstack/wsgi.py:639 msgid "too many body keys" msgstr "过多主体密钥" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/api/openstack/wsgi.py:677 #, fuzzy, python-format msgid "Exception handling resource: %s" msgstr "扩展资源:%s" -#: cinder/api/openstack/wsgi.py:586 +#: cinder/api/openstack/wsgi.py:682 #, python-format msgid "Fault thrown: %s" msgstr "错误抛出: %s" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/api/openstack/wsgi.py:685 #, python-format msgid "HTTP exception thrown: %s" msgstr "HTTP 异常抛出:%s" -#: cinder/api/openstack/wsgi.py:697 +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "请求中没有提供主体" + +#: cinder/api/openstack/wsgi.py:799 msgid "Unrecognized Content-Type provided in request" msgstr "请求中提供了无法识别的 Content-Type" -#: cinder/api/openstack/wsgi.py:701 +#: cinder/api/openstack/wsgi.py:803 msgid "No Content-Type provided in request" msgstr "请求中没有提供 Content-Type" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" -msgstr "请求中没有提供主体" - -#: cinder/api/openstack/wsgi.py:816 +#: cinder/api/openstack/wsgi.py:914 #, python-format msgid "There is no such action: %s" msgstr "没有该动作:%s" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 msgid "Malformed request body" msgstr "错误格式的请求主体" -#: cinder/api/openstack/wsgi.py:829 +#: cinder/api/openstack/wsgi.py:927 msgid "Unsupported Content-Type" msgstr "不支持的Content-Type" -#: cinder/api/openstack/wsgi.py:841 +#: cinder/api/openstack/wsgi.py:939 msgid "Malformed request url" msgstr "错误格式的请求url" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/api/openstack/wsgi.py:987 #, python-format msgid "%(url)s returned a fault: %(e)s" msgstr "%(url)s返回错误:%(e)s" -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" -msgstr "元素不是子节点" - -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" -msgstr "根元素选择列表" - -#: cinder/api/openstack/xmlutil.py:739 -#, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" -msgstr "模板数不匹配;把slave %(slavetag)s 添加到master %(mastertag)s" - -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" -msgstr "subclasses必须执行construct()!" - -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." -msgstr "正在初始化扩展管理员。" - -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." -msgstr "镜像没有找到。" - -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" -msgstr "不正确的请求主体格式" - -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" -msgstr "请求主体和URI不匹配" - -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" -msgstr "请求主体包含太多items" - -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" -msgstr "无效的元数据键" - -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" -msgstr "实例不存在" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" -msgstr "实例并不是指定网络的成员" +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "只能有 %(value)s 个 %(verb)s 请求发送给 %(uri)s 限定是每一个 %(unit_string)s。" -#: cinder/api/openstack/compute/limits.py:266 +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 msgid "This request was rate-limited." msgstr "这个请求受到频率限制。" -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" -msgstr "服务器不存在" +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +#, fuzzy +msgid "snapshot does not exist" +msgstr "实例不存在" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 msgid "Metadata item was not found" msgstr "元数据项目未找到" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Invalid server status: %(status)s" -msgstr "无效的服务器状态:%(status)s" - -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" -msgstr "无效的changes-since值" - -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" -msgstr "超过个性化文件限制" - -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" -msgstr "个性化文件路径太长" +msgid "Delete snapshot with id: %s" +msgstr "删除id为 %s 的快照" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" -msgstr "个性化文件内容太长" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" -msgstr "服务器名称不是字符串或者unicode" +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "为卷 %s 创建快照" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" -msgstr "服务器名称是空串" +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" -#: cinder/api/openstack/compute/servers.py:509 +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 #, fuzzy -msgid "Server name must be less than 256 characters." -msgstr "密钥对名称长度必须在1到255个字符之间" +msgid "volume does not exist" +msgstr "域不存在" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "Bad personality format: missing %s" -msgstr "错误的个性化格式:丢失 %s" - -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" -msgstr "损坏的个性化格式" +msgid "vol=%s" +msgstr "vol=%s" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "Personality content for %s cannot be decoded" -msgstr "个性化 %s 的内容无法解码" +msgid "Delete volume with id: %s" +msgstr "删除id为 %s 的卷" -#: cinder/api/openstack/compute/servers.py:550 -#, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" -msgstr "损坏的网络格式:网络 uuid 格式不正确 (%s)" +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "提供了无效的imageRef。" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "Invalid fixed IP address (%s)" -msgstr "不正确的固定 IP 地址(%s)" +msgid "snapshot id:%s not found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "Duplicate networks (%s) are not allowed" -msgstr "不允许重复的网络(%s)" +msgid "source vol id:%s not found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "Bad network format: missing %s" -msgstr "错误的网络格式:丢失%s" - -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" -msgstr "错误的网络格式" +msgid "Create volume of %s GB" +msgstr "创建 %s GB的卷" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" -msgstr "Userdata 内容无法解码" +#: cinder/api/v1/volumes.py:496 +#, fuzzy, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "正在从查询语句中移除选项 '%(unk_opt_str)s'" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" -msgstr "accessIPv4 不是正确的IPv4格式" +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -#, fuzzy -msgid "accessIPv6 is not proper IPv6 format" -msgstr "accessIPv4 不是正确的IPv4格式" +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" -msgstr "服务器名称未定义" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." -msgstr "提供了无效的flavorRef。" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" -msgstr "无法找到请求的镜像" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." -msgstr "提供了无效的key_name。" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." -msgstr "实例还没有调整大小。" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Error in confirm-resize %s" -msgstr "confirm-resize中的错误 %s" +msgid "source volume id:%s not found" +msgstr "" -#: cinder/api/openstack/compute/servers.py:855 -#, python-format -msgid "Error in revert-resize %s" -msgstr "revert-resize中的错误 %s" +#: cinder/api/v2/volumes.py:472 +#, fuzzy, python-format +msgid "Removing options '%s' from query" +msgstr "正在从查询语句中移除选项 '%(unk_opt_str)s'" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" -msgstr "重启的参数'type'既不是HARD也不是SOFT" +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "卷组状态必须可获取" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" -msgstr "缺少重启的参数'type'" +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "卷组状态必须可获取" -#: cinder/api/openstack/compute/servers.py:885 -#, python-format -msgid "Error in reboot %s" -msgstr "重启中错误 %s" +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "状态必须可用" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." -msgstr "无法找到请求的类型。" +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." -msgstr "调整大小需要尺寸的改变。" +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" -msgstr "错误格式的服务器实体" +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "卷组状态必须可获取" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" -msgstr "缺少属性imageRef" +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." -msgstr "提供了无效的imageRef。" +#: cinder/backup/api.py:181 +#, fuzzy, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "正在把卷 %(volume_id)s 附加到 %(mountpoint)s" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" -msgstr "缺少属性flavorRef" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" -msgstr "没有确定adminPass" +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" -msgstr "无效的adminPass" +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." -msgstr "无法解析元数据键/值对" +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." -msgstr "调整大小请求中的属性'flavorRef'无效。" +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." -msgstr "调整大小请求要求有属性'flavorRef'。" +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" -msgstr "无效的请求主体" +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." -msgstr "无法解析请求中的imageRef。" +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" -msgstr "无法找到实例" +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" -msgstr "无法找到用来重新创建的镜像" +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" -msgstr "实体createImage需要属性name" +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" -msgstr "无效的元数据" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/backup/manager.py:189 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" -msgstr "正在从查询语句中移除选项 '%(unk_opt_str)s'" +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/backup/manager.py:194 #, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::暂停 %s" +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/backup/manager.py:206 #, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::取消暂停 %s" +msgid "Resetting backup %s to error (was creating)." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/backup/manager.py:212 #, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::挂起 %s" +msgid "Resetting backup %s to available (was restoring)." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/backup/manager.py:217 #, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" +msgid "Resuming delete on backup: %s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/backup/manager.py:225 #, python-format -msgid "Error in migrate %s" -msgstr "迁移错误 %s" +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/backup/manager.py:237 #, python-format -msgid "Compute.api::reset_network %s" -msgstr "Compute.api::reset_network %s" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" -msgstr "没有找到服务器" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/backup/manager.py:249 #, python-format -msgid "Compute.api::inject_network_info %s" -msgstr "Compute.api::inject_network_info %s" +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/backup/manager.py:282 #, python-format -msgid "Compute.api::lock %s" -msgstr "compute.api::加锁 %s" +msgid "Create backup finished. backup: %s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/backup/manager.py:286 #, python-format -msgid "Compute.api::unlock %s" -msgstr "compute.api::解锁 %s" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/backup/manager.py:299 #, python-format -msgid "createBackup entity requires %s attribute" -msgstr "实体createBackup需要有属性 %s" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" -msgstr "错误格式的实体createBackup" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" -msgstr "createBackup的属性 'rotation'必须是整数" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" -msgstr "没有找到实例" - -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." -msgstr "host与block_migration必须确定" +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/backup/manager.py:310 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" -msgstr "实例 %(id)s 到主机 %(host)s 的动态迁移失败" - -#: cinder/api/openstack/compute/contrib/aggregates.py:76 -#, fuzzy, python-format msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" -msgstr "无法创建名称为 %(name)s 规格为 %(extra_specs)s 的卷类型。" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/backup/manager.py:329 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 -#, fuzzy, python-format -msgid "Cannot update aggregate: %(id)s" -msgstr "更新代理失败:%(resp)r" - -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/backup/manager.py:360 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/backup/manager.py:379 #, python-format -msgid "Aggregates does not have %s action" -msgstr "聚合没有动作 %s" +msgid "Delete backup started, backup: %s." +msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/backup/manager.py:386 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/backup/manager.py:399 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/backup/manager.py:422 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." -msgstr "只有根证书能被获取。" - -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/backup/drivers/ceph.py:116 msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" -msgstr "无法为VPN实例申请IP,确保实例没有运行,过一会儿再试。" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" -msgstr "缺少类型规范" +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" -msgstr "无效的类型规范" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/backup/drivers/ceph.py:224 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." -msgstr "%s 必须是'MANUAL' 或者 'AUTO'。" +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." -msgstr "没有找到服务器。" - -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -#, fuzzy -msgid "Flavor not found." -msgstr "没有找到服务器。" - -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" -msgstr "没有请求主体" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 -#, python-format -msgid "No more floating ips in pool %s." -msgstr "池 %s 中已经没有浮动ip。" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." -msgstr "没有更多的浮动ip。" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" -msgstr "缺少参数 dict" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" -msgstr "地址没有指定" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" -msgstr "没有固定ip与实例关联" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" -msgstr "关联浮动ip失败" - -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "Invalid status: '%s'" -msgstr "无效的状态:'%s'" - -#: cinder/api/openstack/compute/contrib/hosts.py:148 -#, fuzzy, python-format -msgid "Invalid mode: '%s'" -msgstr "无效的状态:'%s'" +msgid "writing zeroes chunk %d" +msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "Invalid update setting: '%s'" -msgstr "无效的更新设置:'%s'" - -#: cinder/api/openstack/compute/contrib/hosts.py:170 -#, fuzzy, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." -msgstr "把主机 %(host)s 设置为 %(state)s。" +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/backup/drivers/ceph.py:250 #, python-format -msgid "Setting host %(host)s to %(state)s." -msgstr "把主机 %(host)s 设置为 %(state)s。" - -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" -msgstr "Describe-resource是只有管理员才能执行的功能。" - -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" -msgstr "没有找到主机" - -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" -msgstr "密钥对名称长度必须在1到255个字符之间" - -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "Key pair '%s' already exists." -msgstr "密钥对 '%s' 已经存在。" - -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" -msgstr "addFixedIp缺少参数'networkId'" - -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" -msgstr "removeFixedIp缺少参数'address'" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Unable to find address %r" -msgstr "无法找到地址 %r" +msgid "transferring remaining %s bytes" +msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "Network does not have %s action" -msgstr "网络不包含动作 %s" +msgid "creating base image '%s'" +msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 #, python-format -msgid "Disassociating network with id %s" -msgstr "为id是 %s 的网络解除关联" - -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" -msgstr "没有找到网络" +msgid "deleting backup snapshot='%s'" +msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:87 -#, python-format -msgid "Showing network with id %s" -msgstr "显示id是 %s 的网络" +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/backup/drivers/ceph.py:361 #, python-format -msgid "Deleting network with id %s" -msgstr "正在删除id是 %s 的网络" - -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" -msgstr "错误格式的 scheduler_hints 属性" - -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" -msgstr "安全组id应该是整数" - -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -#, fuzzy -msgid "Security group is still in use" -msgstr "安全组id应该是整数" +msgid "trying diff format name format basename='%s'" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "Security group %s already exists" -msgstr "安全组 %s 已经存在" +msgid "image %s not found" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#: cinder/backup/drivers/ceph.py:377 #, python-format -msgid "Security group %s is not a string or unicode" -msgstr "安全组 %s 既不是字符串也不是unicode" +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "Security group %s cannot be empty." -msgstr "安全组 %s 不能为空。" +msgid "deleting base image='%s'" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#: cinder/backup/drivers/ceph.py:389 #, python-format -msgid "Security group %s should not be greater than 255 characters." -msgstr "安全组 %s 不能比255个字符更长。" +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" -msgstr "父组id不是整数" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#: cinder/backup/drivers/ceph.py:397 #, python-format -msgid "Security group (%s) not found" -msgstr "没有找到安全组 (%s)" - -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." -msgstr "参数不够创建有效规则。" +msgid "base backup image='%s' deleted)" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/backup/drivers/ceph.py:407 #, python-format -msgid "This rule already exists in group %s" -msgstr "这条规则已经存在于组%s 中" - -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" -msgstr "父组id或者组id不是整数" - -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" -msgstr "规则id不是整数" +msgid "deleting source snap '%s'" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "Rule (%s) not found" -msgstr "没有找到规则 (%s)" - -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" -msgstr "没有指定安全组" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" -msgstr "安全组名称不能是空" +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/backup/drivers/ceph.py:488 #, python-format -msgid "start instance %r" -msgstr "启动实例 %r" +msgid "image '%s' not found - trying diff format name" +msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "stop instance %r" -msgstr "停止实例 %r" +msgid "diff format image '%s' not found" +msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/backup/drivers/ceph.py:528 #, python-format -msgid "vol=%s" -msgstr "vol=%s" +msgid "using --from-snap '%s'" +msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 +#: cinder/backup/drivers/ceph.py:543 #, python-format -msgid "Delete volume with id: %s" -msgstr "删除id为 %s 的卷" +msgid "source snap '%s' is stale so deleting" +msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 +#: cinder/backup/drivers/ceph.py:555 #, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" -msgstr "把卷 %(volume_id)s 附加到实例 %(server_id)s 的 %(device)s 设备上" +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 +#: cinder/backup/drivers/ceph.py:566 #, python-format -msgid "Delete snapshot with id: %s" -msgstr "删除id为 %s 的快照" +msgid "creating backup snapshot='%s'" +msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 +#: cinder/backup/drivers/ceph.py:586 #, python-format -msgid "Create snapshot from volume %s" -msgstr "为卷 %s 创建快照" +msgid "differential backup transfer completed in %.4fs" +msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" -msgstr "试图实例化单例" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" -#: cinder/auth/ldapdriver.py:650 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." -msgstr "尝试删除组中最后一个成员,用删除组 %s 来代替。" +msgid "creating base image='%s'" +msgstr "" -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" -msgstr "查找用户:%r" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" -#: cinder/auth/manager.py:302 +#: cinder/backup/drivers/ceph.py:694 #, python-format -msgid "Failed authorization for access key %s" -msgstr "访问密钥 %s 授权失败" +msgid "looking for snapshot of backup base '%s'" +msgstr "" -#: cinder/auth/manager.py:308 +#: cinder/backup/drivers/ceph.py:697 #, python-format -msgid "Using project name = user name (%s)" -msgstr "使用用户名称来作为项目名称 (%s)" +msgid "backup base '%s' has no snapshots" +msgstr "" -#: cinder/auth/manager.py:315 +#: cinder/backup/drivers/ceph.py:704 #, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" -msgstr "授权失败:没有此项目名称 %(pjid)s (user=%(uname)s)" +msgid "backup '%s' has no snapshot" +msgstr "" -#: cinder/auth/manager.py:324 +#: cinder/backup/drivers/ceph.py:708 #, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" -msgstr "授权失败:用户 %(uname)s 不是管理员,也不是项目 %(pjname)s 的成员" +msgid "backup should only have one snapshot but instead has %s" +msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#: cinder/backup/drivers/ceph.py:713 #, python-format -msgid "user.secret: %s" -msgstr "user.secret: %s" +msgid "found snapshot '%s'" +msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 -#, python-format -msgid "expected_signature: %s" -msgstr "expected_signature: %s" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "signature: %s" -msgstr "签名: %s" +msgid "Starting backup of volume='%s'" +msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" -msgstr "用户 %s 的签名无效" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" -#: cinder/auth/manager.py:353 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "host_only_signature: %s" -msgstr "host_only_signature: %s" +msgid "backup '%s' finished." +msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" -msgstr "必须指定项目" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" -#: cinder/auth/manager.py:490 +#: cinder/backup/drivers/ceph.py:846 #, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" -msgstr "为用户 %(uid)s 添加角色 %(role)s 用户位置在%(pid)s 项目里" +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" -#: cinder/auth/manager.py:493 -#, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" -msgstr "添加全局角色 %(role)s 给用户 %(uid)s" +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/backup/drivers/ceph.py:869 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" -msgstr "移除用户 %(uid)s 的角色 %(role)s 用户位置是在项目 %(pid)s 里" +msgid "restore transfer completed in %.4fs" +msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/backup/drivers/ceph.py:916 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" -msgstr "移除全局角色 %(role)s 对象是用户 %(uid)s" +msgid "rbd has %s extents" +msgstr "" -#: cinder/auth/manager.py:595 -#, python-format -msgid "Created project %(name)s with manager %(manager_user)s" -msgstr "创建项目 %(name)s 通过管理员 %(manager_user)s" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" -#: cinder/auth/manager.py:613 -#, python-format -msgid "modifying project %s" -msgstr "正在修改项目 %s" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" -#: cinder/auth/manager.py:625 +#: cinder/backup/drivers/ceph.py:964 #, python-format -msgid "Adding user %(uid)s to project %(pid)s" -msgstr "添加用户 %(uid)s 到项目%(pid)s" +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" -msgstr "移除用户 %(uid)s 于项目 %(pid)s 中" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" -#: cinder/auth/manager.py:676 +#: cinder/backup/drivers/ceph.py:1005 #, python-format -msgid "Deleting project %s" -msgstr "删除项目 %s" +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" -msgstr "已创建用户 %(rvname)s (admin: %(rvadmin)r)" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" -msgstr "删除用户 %s" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" -#: cinder/auth/manager.py:753 +#: cinder/backup/drivers/ceph.py:1023 #, python-format -msgid "Access Key change for user %s" -msgstr "修改用户 %s 的访问密钥" +msgid "restore finished with error - %s" +msgstr "" -#: cinder/auth/manager.py:755 +#: cinder/backup/drivers/ceph.py:1029 #, python-format -msgid "Secret Key change for user %s" -msgstr "修改用户 %s 的私钥" +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" -#: cinder/auth/manager.py:757 +#: cinder/backup/drivers/ceph.py:1037 #, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" -msgstr "将管理员状态设置为 %(admin)r 目的是针对用户 %(uid)s" +msgid "delete '%s' finished with warning" +msgstr "" -#: cinder/auth/manager.py:802 +#: cinder/backup/drivers/ceph.py:1039 #, python-format -msgid "No vpn data for project %s" -msgstr "没有 %s 项目的vpn数据" +msgid "delete '%s' finished" +msgstr "" -#: cinder/cloudpipe/pipelib.py:46 +#: cinder/backup/drivers/swift.py:106 #, fuzzy, python-format -msgid "Instance type for vpn instances" -msgstr "无效的实例类型 %(instance_type)s。" - -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" -msgstr "cloudpipe实例的启动脚本模板" - -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" -msgstr "网络地址已存入openvpn配置" +msgid "unsupported compression algorithm: %s" +msgstr "不支持的分区:%s" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" -msgstr "子网掩码已存入openvpn配置" +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" -#: cinder/cloudpipe/pipelib.py:107 +#: cinder/backup/drivers/swift.py:141 #, python-format -msgid "Launching VPN for %s" -msgstr "启动VPN %s" +msgid "_check_container_exists: container: %s" +msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" -msgstr "未指定计算宿主机" +#: cinder/backup/drivers/swift.py:146 +#, fuzzy, python-format +msgid "container %s does not exist" +msgstr "实例不存在" -#: cinder/compute/api.py:144 -#, python-format -msgid "Unable to find host for Instance %s" -msgstr "无法找到实例 %s 的宿主机" +#: cinder/backup/drivers/swift.py:151 +#, fuzzy, python-format +msgid "container %s exists" +msgstr "实例不存在" -#: cinder/compute/api.py:192 +#: cinder/backup/drivers/swift.py:157 #, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" -msgstr "%(pid)s 已经超过配额,试图设置 %(num_metadata)s 个元数据属性" +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" -#: cinder/compute/api.py:203 +#: cinder/backup/drivers/swift.py:173 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" -msgstr "%(pid)s 已经超过配额,元数据属性键或值太长" - -#: cinder/compute/api.py:257 -#, fuzzy -msgid "Cannot run any more instances of this type." -msgstr "超过实例的配额。您无法运行更多此类型的实例。" - -#: cinder/compute/api.py:259 -#, fuzzy, python-format -msgid "Can only run %s more instances of this type." -msgstr "超过实例的配额。您只能再运行 %s 个此类型的实例。" +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" -#: cinder/compute/api.py:261 +#: cinder/backup/drivers/swift.py:182 #, fuzzy, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " -msgstr "%(pid)s 已经超过配额,试图运行 %(min_count)s 个实例" - -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" -msgstr "正在创建裸实例" +msgid "generated object list: %s" +msgstr "期望的对象类型:%s" -#: cinder/compute/api.py:312 +#: cinder/backup/drivers/swift.py:192 #, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" -msgstr "使用 Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" -#: cinder/compute/api.py:383 +#: cinder/backup/drivers/swift.py:209 #, python-format -msgid "Going to run %s instances..." -msgstr "准备运行 %s 个实例" +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" -#: cinder/compute/api.py:447 -#, python-format -msgid "bdm %s" -msgstr "bdm %s" +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" -#: cinder/compute/api.py:474 +#: cinder/backup/drivers/swift.py:219 #, python-format -msgid "block_device_mapping %s" -msgstr "block_device_mapping %s" +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" -#: cinder/compute/api.py:591 +#: cinder/backup/drivers/swift.py:224 #, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" -msgstr "为 %(pid)s/%(uid)s 向调度器发送创建命令" +msgid "_read_metadata finished (%s)" +msgstr "" -#: cinder/compute/api.py:871 +#: cinder/backup/drivers/swift.py:234 #, fuzzy, python-format -msgid "Going to try to soft delete instance" -msgstr "准备尝试软删除实例 %s" +msgid "volume size %d is invalid." +msgstr "请求无效。" -#: cinder/compute/api.py:891 -#, fuzzy, python-format -msgid "No host for instance, deleting immediately" -msgstr "没有托管实例 %s,立刻删除" +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" -#: cinder/compute/api.py:939 -#, fuzzy, python-format -msgid "Going to try to terminate instance" -msgstr "准备尝试终止实例 %s" +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" -#: cinder/compute/api.py:977 -#, fuzzy, python-format -msgid "Going to try to stop instance" -msgstr "准备尝试停止实例 %s" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" -#: cinder/compute/api.py:996 -#, fuzzy, python-format -msgid "Going to try to start instance" -msgstr "准备尝试启动实例 %s" +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" -#: cinder/compute/api.py:1000 -#, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" -msgstr "实例 %(instance_uuid)s 没有停止。(%(vm_state)s" +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Searching by: %s" -msgstr "搜索条件: %s" +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" -#: cinder/compute/api.py:1201 +#: cinder/backup/drivers/swift.py:301 #, python-format -msgid "Image type not recognized %s" -msgstr "无法识别镜像类型 %s" - -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." -msgstr "flavor_id为空。假定在迁移。" +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" -#: cinder/compute/api.py:1377 +#: cinder/backup/drivers/swift.py:304 #, python-format msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" -msgstr "旧的实例类型 %(current_instance_type_name)s,新的实例类型 %(new_instance_type_name)s" - -#: cinder/compute/api.py:1644 -#, python-format -msgid "multiple fixedips exist, using the first: %s" -msgstr "存在多个固定IP,使用第一个:%s" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" -msgstr "创建参数必须是正整数" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "DB error: %s" -msgstr "数据库错误:%s" +msgid "backup %s finished." +msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/backup/drivers/swift.py:345 #, python-format -msgid "Instance type %s not found for deletion" -msgstr "未找到要删除的实例类型 %s" +msgid "v1 swift volume backup restore of %s started" +msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/backup/drivers/swift.py:350 #, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: decorating: |%s|" +msgid "metadata_object_names = %s" +msgstr "" -#: cinder/compute/manager.py:140 -#, python-format +#: cinder/backup/drivers/swift.py:356 msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" -#: cinder/compute/manager.py:144 +#: cinder/backup/drivers/swift.py:362 #, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: 锁定: |%s|" +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" -#: cinder/compute/manager.py:146 +#: cinder/backup/drivers/swift.py:378 #, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: admin: |%s|" +msgid "decompressing data using %s algorithm" +msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/backup/drivers/swift.py:401 #, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: 执行中: |%s|" +msgid "v1 swift volume backup restore of %s finished" +msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/backup/drivers/swift.py:409 #, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "check_instance_lock: 未执行 |%s|" +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/backup/drivers/swift.py:423 #, python-format -msgid "Unable to load the virtualization driver: %s" -msgstr "无法加载虚拟驱动:%s" +msgid "Restoring swift backup version %s" +msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/backup/drivers/swift.py:428 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/compute/manager.py:240 +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 #, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." -msgstr "当前状态是 %(drv_state)s,数据库状态是 %(db_state)s。" - -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." -msgstr "cinder-compute重启后,实例正在重启。" - -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" -msgstr "虚拟机管理程序驱动不支持防火墙规则" +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" -msgstr "正在检查状态" +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/backup/drivers/swift.py:455 #, python-format -msgid "Setting up bdm %s" -msgstr "正在设置 bdm %s" +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/backup/drivers/swift.py:458 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 #, fuzzy, python-format -msgid "Exception encountered while terminating the instance %s" -msgstr "终止实例 %(instance_uuid)s 时发生异常" +msgid "delete %s finished" +msgstr "_delete: %s" -#: cinder/compute/manager.py:444 +#: cinder/backup/drivers/tsm.py:85 #, python-format -msgid "Instance %s not found." -msgstr "未找到实例 %s" - -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" -msgstr "实例已经创建" +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/compute/manager.py:523 +#: cinder/backup/drivers/tsm.py:143 #, python-format msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" -#: cinder/compute/manager.py:528 +#: cinder/backup/drivers/tsm.py:173 #, python-format msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -"镜像 '%(image_id)s' 的大小为 %(size_bytes)d ,超过实例类型 instance_type 所允许的大小 " -"%(allowed_size_bytes)d" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." -msgstr "正在启动虚拟机" - -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" -msgstr "实例跳过网络分配" - -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" -msgstr "实例网络设置失败" - -#: cinder/compute/manager.py:565 +#: cinder/backup/drivers/tsm.py:199 #, python-format -msgid "Instance network_info: |%s|" -msgstr "实例的network_info:|%s|" - -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" -msgstr "实例块设备设置失败" - -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" -msgstr "实例生产失败" - -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" -msgstr "释放实例的网络" +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/backup/drivers/tsm.py:206 #, python-format -msgid "%(action_str)s instance" -msgstr "%(action_str)s 实例" +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/backup/drivers/tsm.py:213 #, python-format -msgid "Ignoring DiskNotFound: %s" -msgstr "忽略异常 DiskNotFound: %s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/backup/drivers/tsm.py:260 #, python-format -msgid "terminating bdm %s" -msgstr "终止bdm %s" +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/backup/drivers/tsm.py:286 #, python-format -msgid "%s. Setting instance vm_state to ERROR" -msgstr "%s。把实例的 vm_state设置为ERROR" +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/backup/drivers/tsm.py:298 #, python-format msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." -msgstr "无法重建实例 [%(instance_uuid)s],因为给定的镜像不存在。" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/backup/drivers/tsm.py:308 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" -msgstr "无法重新创建实例 [%(instance_uuid)s]: %(exc)s" +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/backup/drivers/tsm.py:338 #, python-format -msgid "Rebuilding instance %s" -msgstr "正在重新创建实例 %s" +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/backup/drivers/tsm.py:352 #, python-format -msgid "Rebooting instance %s" -msgstr "正在重启虚拟机 %s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/backup/drivers/tsm.py:362 #, python-format msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" -msgstr "试图重启一个没有运行的实例:%(instance_uuid)s (状态:%(state)s 预计:%(running)s)" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/backup/drivers/tsm.py:413 #, python-format -msgid "instance %s: snapshotting" -msgstr "实例 %s: 快照中" +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/backup/drivers/tsm.py:421 #, python-format msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" -msgstr "试图为一个没有运行的实例快照:%(instance_uuid)s (状态:%(state)s 预计:%(running)s)" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/backup/drivers/tsm.py:432 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" -msgstr "找到 %(num_images)d 个镜像 (rotation: %(rotation)d)" +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/brick/exception.py:55 #, python-format -msgid "Rotating out %d backups" -msgstr "轮换出%d个备份" +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" -#: cinder/compute/manager.py:1005 +#: cinder/brick/exception.py:93 #, python-format -msgid "Deleting image %s" -msgstr "正在删除镜像 %s" +msgid "Volume device not found at %(device)s." +msgstr "" -#: cinder/compute/manager.py:1035 +#: cinder/brick/exception.py:97 #, python-format -msgid "Failed to set admin password. Instance %s is not running" -msgstr "设置管理员密码失败。实例 %s 没有运行" +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/brick/exception.py:101 #, python-format -msgid "Instance %s: Root password set" -msgstr "实例 %s:Root密码已设置" +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." -msgstr "该驱动不能执行set_admin_password。" +#: cinder/brick/exception.py:105 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/brick/exception.py:109 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" -msgstr "设置管理员密码出错" +#: cinder/brick/exception.py:113 +#, fuzzy, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" -#: cinder/compute/manager.py:1079 +#: cinder/brick/exception.py:117 #, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -"试图把一个文件注入到没有运行的实例:%(instance_uuid)s (状态: %(current_power_state)s 预计: " -"%(expected_state)s)" -#: cinder/compute/manager.py:1084 +#: cinder/brick/initiator/connector.py:127 #, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" -msgstr "实例 %(instance_uuid)s:把文件注入 %(path)s" +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -"试图更新没有运行的实例上的代理:%(instance_uuid)s (状态: %(current_power_state)s 预计: " -"%(expected_state)s)" -#: cinder/compute/manager.py:1103 +#: cinder/brick/initiator/connector.py:229 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" -msgstr "实例 %(instance_uuid)s:正在把代理更新到 %(url)s" +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "instance %s: rescuing" -msgstr "实例 %s:正在救援中" +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/brick/initiator/connector.py:317 #, python-format -msgid "instance %s: unrescuing" -msgstr "实例 %s:取消救援" +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" -msgstr "目标与来源一样。" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "instance %s: migrating" -msgstr "实例 %s:正在迁移" +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" -#: cinder/compute/manager.py:1471 -#, python-format -msgid "instance %s: pausing" -msgstr "实例 %s: 暂停" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/brick/initiator/connector.py:633 #, python-format -msgid "instance %s: unpausing" -msgstr "实例 %s: 取消暫停" +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/brick/initiator/connector.py:649 #, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "实例 %s :获取诊断" +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/brick/initiator/connector.py:658 #, python-format -msgid "instance %s: suspending" -msgstr "实例 %s:挂起" +msgid "Multipath device discovered %(device)s" +msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "instance %s: resuming" -msgstr "实例 %s: 恢复中" +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "instance %s: locking" -msgstr "实例%s:锁定中" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "instance %s: unlocking" -msgstr "实例%s:取消锁定" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" -#: cinder/compute/manager.py:1596 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "instance %s: getting locked state" -msgstr "实例%s:获取锁定的状态" +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/brick/initiator/connector.py:834 #, python-format -msgid "instance %s: reset network" -msgstr "实例%s:重置网络" +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" -#: cinder/compute/manager.py:1614 -#, python-format -msgid "instance %s: inject network info" -msgstr "实例 %s:注入网络信息" +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" -#: cinder/compute/manager.py:1618 -#, python-format -msgid "network_info to inject: |%s|" -msgstr "将注入的network_info:|%s|" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" -#: cinder/compute/manager.py:1655 -#, python-format -msgid "instance %s: getting vnc console" -msgstr "实例 %s:正在获得VNC控制台" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" -msgstr "卷 %(volume_id)s 正在 %(mountpoint)s 上启动" +msgid "multipath call failed exit (%(code)s)" +msgstr "" -#: cinder/compute/manager.py:1703 -#, fuzzy, python-format -msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" -msgstr "正在把卷 %(volume_id)s 附加到 %(mountpoint)s" +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" -#: cinder/compute/manager.py:1705 +#: cinder/brick/initiator/linuxscsi.py:149 #, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" -msgstr "正在把卷 %(volume_id)s 附加到 %(mountpoint)s" +msgid "Found multipath device = %(mdev)s" +msgstr "" -#: cinder/compute/manager.py:1714 -#, fuzzy, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" -msgstr "%(mountpoint)s 附加失败,移除中" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "Attach failed %(mountpoint)s, removing" -msgstr "%(mountpoint)s 附加失败,移除中" +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" -#: cinder/compute/manager.py:1752 +#: cinder/brick/iscsi/iscsi.py:177 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" -msgstr "卷 %(volume_id)s 从挂载点 %(mp)s 分离" +msgid "Creating iscsi_target for: %s" +msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/brick/iscsi/iscsi.py:184 #, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "从未知实例%s中分离卷" +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "没有为卷 %(volume_id)s 找到目标id。" -#: cinder/compute/manager.py:1822 +#: cinder/brick/iscsi/iscsi.py:227 #, python-format msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." -msgstr "创建tmpfile %s 来通知其他的计算节点需要挂载相同的存储。" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." -msgstr "实例没有卷。" +#: cinder/brick/iscsi/iscsi.py:258 +#, fuzzy, python-format +msgid "Removing iscsi_target for: %s" +msgstr "正在删除基文件:%s" -#: cinder/compute/manager.py:1916 +#: cinder/brick/iscsi/iscsi.py:262 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." -msgstr "plug_vifs() 失败%(cnt)d 次。最多重新尝试 %(max_retry)d 次在主机 %(hostname)s。" +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" -#: cinder/compute/manager.py:1973 -#, python-format -msgid "Pre live migration failed at %(dest)s" -msgstr "预在线迁移在%(dest)s失败" +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "尝试删除不存在的控制台%(console_id)s。" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." -msgstr "post_live_migration()已经启动。" +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" -msgstr "没有找到floating_ip" +#: cinder/brick/iscsi/iscsi.py:375 +#, fuzzy, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "跳过remove_export。没有为卷提供iscsi_target:%d" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." -msgstr "没有找到floating_ip" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/brick/iscsi/iscsi.py:489 #, python-format -msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +msgid "Creating iscsi_target for volume: %s" msgstr "" -"在线迁移:意外的错误:无法继承浮动ip。\n" -"%(e)s" -#: cinder/compute/manager.py:2073 +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." -msgstr "把实例迁移到 %(dest)s 成功完成。" - -#: cinder/compute/manager.py:2075 -msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -"你会看到错误“libvirt: QEMU error: Domain not found: no domain with matching " -"name。”这个错误可以放心的忽略。" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" -msgstr "迁移后操作启动" +#: cinder/brick/iscsi/iscsi.py:532 +#, fuzzy, python-format +msgid "Removing iscsi_target: %s" +msgstr "正在重启虚拟机 %s" -#: cinder/compute/manager.py:2226 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" -msgstr "更新带宽使用缓存" +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" -msgstr "更新主机状态" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." -msgstr "在数据库中找到 %(num_db_instances)s个实例,在虚拟机管理程序找到 %(num_vm_instances)s 个实例。" +msgid "Cmd :%s" +msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +msgid "StdOut :%s" msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +msgid "StdErr :%s" msgstr "" -#: cinder/compute/manager.py:2350 +#: cinder/brick/local_dev/lvm.py:82 #, fuzzy, python-format -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" -msgstr "数据库中发现实例 %(name)s ,但是虚拟机管理程序不知道。设置加电状态为NOSTATE" - -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." -msgstr "FLAGS.reclaim_instance_interval <= 0,跳过..." +msgid "Unable to locate Volume Group %s" +msgstr "无法找到 %s 卷" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" -msgstr "回收删除的实例" +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/brick/local_dev/lvm.py:370 #, fuzzy, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." -msgstr "检测标签名为 '%(name_label)s' 的实例,这些实例被标识为DELETED却仍然存在于主机上。" +msgid "Unable to find VG: %s" +msgstr "无法为VDI %s 找到VBD" -#: cinder/compute/manager.py:2465 -#, fuzzy, python-format +#: cinder/brick/local_dev/lvm.py:420 msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." -msgstr "销毁标签名为 '%(name_label)s' 的实例,这些实例被标识为DELETED却仍然存在于主机上。" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" -#: cinder/compute/manager.py:2472 -#, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" -msgstr "无法识别的FLAGS.running_deleted_instance_action的取值 '%(action)s'" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/brick/local_dev/lvm.py:489 #, fuzzy, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" -msgstr "聚合 %(aggregate_id)s已经有主机 %(host)s。" - -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" -msgstr "遗留的网络信息 nw_info 要求使用 IPv4 子网" - -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" -msgstr "正在添加控制台" +msgid "Unable to find LV: %s" +msgstr "无法为VDI %s 找到VBD" -#: cinder/console/manager.py:97 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." -msgstr "尝试删除不存在的控制台%(console_id)s。" +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" -#: cinder/console/vmrc_manager.py:122 -#, python-format -msgid "Tried to remove non-existent console %(console_id)s." -msgstr "尝试删除不存在的控制台%(console_id)s。" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "Removing console %(console_id)s." -msgstr "删除控制台%(console_id)s。" +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" -msgstr "重建xvp配置" +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" -#: cinder/console/xvp.py:116 -#, python-format -msgid "Re-wrote %s" -msgstr "重写%s" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" -msgstr "停止xvp" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" -msgstr "正在启动xvp" +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "Error starting xvp: %s" -msgstr "启动xvp发生错误:%s" +msgid "Already mounted: %s" +msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" -msgstr "重启xvp" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." -msgstr "xvp不在运行中" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" -#: cinder/consoleauth/manager.py:63 -#, python-format -msgid "Deleting Expired Token: (%s)" -msgstr "删除过期令牌:(%s)" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" -#: cinder/consoleauth/manager.py:75 -#, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" -msgstr "接收到令牌:%(token)s, %(token_dict)s)" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" -#: cinder/consoleauth/manager.py:79 +#: cinder/compute/nova.py:97 #, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" -msgstr "检查令牌:%(token)s, %(token_valid)s)" +msgid "Novaclient connection created using URL: %s" +msgstr "" -#: cinder/db/sqlalchemy/api.py:57 +#: cinder/db/sqlalchemy/api.py:63 msgid "Use of empty request context is deprecated" msgstr "使用空的请求上下文是不推荐的" -#: cinder/db/sqlalchemy/api.py:198 +#: cinder/db/sqlalchemy/api.py:190 #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "无法识别的 read_deleted 取值”%s“" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "No ComputeNode for %(host)s" -msgstr "主机 %(host)s 没有计算节点" +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "No backend config with id %(sm_backend_id)s" -msgstr "没有id为%(sm_backend_id)s的后台配置" +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "No sm_flavor called %(sm_flavor)s" -msgstr "没有 sm_flavor 调用 %(sm_flavor)s" +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "必须可用" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "No sm_volume with id %(volume_id)s" -msgstr "没有id为 %(volume_id)s 的 sm_volume" +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." -msgstr "没有安装 python-migrate。正退出。" +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 +#: cinder/db/sqlalchemy/migration.py:37 msgid "version should be an integer" msgstr "version应该是整数" -#: cinder/db/sqlalchemy/session.py:137 -#, fuzzy, python-format -msgid "SQL connection failed. %s attempts left." -msgstr "SQL连接失败 (%(connstring)s)。还剩 %(attempts)d 次。" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" -msgstr "interface 列没有加入networks 表中" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format msgid "Table |%s| not created!" msgstr "表 |%s| 没有创建" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" -msgstr "VIF 列没有加入到 fixed_ips 表中" - -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 -#, python-format -msgid "join list for moving mac_addresses |%s|" -msgstr "为移动 mac_addresses |%s| 加入列表" - -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" -msgstr "外键约束无法添加" - -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" -msgstr "外键约束无法删除" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" -msgstr "priority列没有加入到 networks 表中" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +#, fuzzy +msgid "quota_classes table not dropped" +msgstr "instance_info_caches 没有删除掉" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" -msgstr "外键约束无法去除" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +#, fuzzy +msgid "quota_usages table not dropped" +msgstr "instance_info_caches 没有删除掉" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" -msgstr "progress列没有加入到实例表中" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +#, fuzzy +msgid "reservations table not dropped" +msgstr "dns_domains 表没有删除" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 -#, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." -msgstr "无法把 flavorid 转化为整数:%s。设置 flavorid 成类似整数的字符串来降级。" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +#, fuzzy +msgid "volume_glance_metadata table not dropped" msgstr "instance_info_caches 没有删除掉" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" -msgstr "progress 列没有加入到 compute_nodes 表中" - -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +#, fuzzy +msgid "backups table not dropped" msgstr "dns_domains 表没有删除" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 #, fuzzy -msgid "quota_classes table not dropped" +msgid "snapshot_metadata table not dropped" msgstr "instance_info_caches 没有删除掉" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" -msgstr "glance服务器连接错误,重试中" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +#, fuzzy +msgid "transfers table not dropped" +msgstr "dns_domains 表没有删除" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" -msgstr "达到最大尝试次数" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" -#: cinder/image/glance.py:278 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "Creating image in Glance. Metadata passed in %s" -msgstr "正在Glance中创建镜像。元数据 %s 已经传入。" +msgid "Table |%s| not created" +msgstr "" -#: cinder/image/glance.py:281 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Metadata after formatting for Glance %s" -msgstr "为Glance 进行格式化后的元数据 %s" +msgid "Exception while dropping table %s." +msgstr "" -#: cinder/image/glance.py:289 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 #, python-format -msgid "Metadata returned from Glance formatted for Base %s" -msgstr "从Glance返回的为Base格式化的元数据 %s" - -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" -msgstr "不是镜像所有者" +msgid "Exception while creating table %s." +msgstr "" -#: cinder/image/glance.py:410 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" -msgstr "%(timestamp)s 没有遵循任何签名格式:%(iso_formats)s" +msgid "Column |%s| not created!" +msgstr "" -#: cinder/image/s3.py:309 -#, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" -msgstr "把 %(image_location)s 下载到 %(image_path)s失败" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" -#: cinder/image/s3.py:328 -#, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" -msgstr "解密 %(image_location)s 到 %(image_path)s失败" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" -msgstr "解包 %(image_location)s 到 %(image_path)s 失败" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" -msgstr "上传 %(image_location)s 到 %(image_path)s 失败" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" -#: cinder/image/s3.py:379 -#, python-format -msgid "Failed to decrypt private key: %s" -msgstr "解密私钥失败:%s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" -#: cinder/image/s3.py:387 -#, python-format -msgid "Failed to decrypt initialization vector: %s" -msgstr "解密初始化vector失败:%s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" -#: cinder/image/s3.py:398 -#, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" -msgstr "解密镜像文件 %(image_file)s 失败:%(err)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" -msgstr "镜像中不安全的文件名" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 -#, python-format -msgid "Bad mac for to_global_ipv6: %s" -msgstr "错误的to_global_ipv6 mac:%s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 -#, python-format -msgid "Bad prefix for to_global_ipv6: %s" -msgstr "错误的to_global_ipv6前缀:%s" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" -msgstr "错误的to_global_ipv6 oject_id;%s" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." -msgstr "驱动仅支持入口类型 'a'。" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "'qemu-img info'解析失败" -#: cinder/network/linux_net.py:166 +#: cinder/image/image_utils.py:101 #, python-format -msgid "Attempted to remove chain %s which does not exist" -msgstr "试图移除不存在的链 %s。" +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format -msgid "Unknown chain: %r" -msgstr "未知链:%r" +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" -#: cinder/network/linux_net.py:215 +#: cinder/image/image_utils.py:157 #, python-format msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" -msgstr "试图移除不存在的规则:%(chain)r %(rule)r %(wrap)r %(top)r" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" -msgstr "IPTablesManager.apply成功完成" +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" -#: cinder/network/linux_net.py:694 +#: cinder/image/image_utils.py:178 #, python-format -msgid "Hupping dnsmasq threw %s" -msgstr "挂起进程 dnsmasq 时抛出 %s" +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, fuzzy, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "fmt=%(fmt)s 由 %(backing_file)s 支持" -#: cinder/network/linux_net.py:696 +#: cinder/image/image_utils.py:224 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" -msgstr "Pid %d 过期了,重新启动dnsmasq" +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, fuzzy, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "转化为裸格式,但目前格式是 %s" -#: cinder/network/linux_net.py:756 +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "killing radvd threw %s" -msgstr "杀掉进程 radvd 时抛出 %s" +msgid "Not deleting key %s" +msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "Pid %d is stale, relaunching radvd" -msgstr "Pid %d 过期了,重新启动radvd" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, fuzzy, python-format +msgid "Original exception being dropped: %s" +msgstr "正在丢弃原来的异常。" -#: cinder/network/linux_net.py:967 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "Starting VLAN inteface %s" -msgstr "正在开启VLAN接口 %s" +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" -#: cinder/network/linux_net.py:999 +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 #, python-format -msgid "Starting Bridge interface for %s" -msgstr "正在为 %s 开启桥接口" +msgid "Could not release the acquired lock `%s`" +msgstr "" -#: cinder/network/linux_net.py:1142 +#: cinder/openstack/common/lockutils.py:189 #, fuzzy, python-format -msgid "Starting bridge %s " -msgstr "保证桥 %s" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "获得信号量 \"%(lock)s\" 为方法 \"%(method)s\" ...锁" -#: cinder/network/linux_net.py:1149 +#: cinder/openstack/common/lockutils.py:200 #, fuzzy, python-format -msgid "Done starting bridge %s" -msgstr "保证桥 %s" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "正在 试图获取锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" -#: cinder/network/linux_net.py:1167 +#: cinder/openstack/common/lockutils.py:227 #, fuzzy, python-format -msgid "Failed unplugging gateway interface '%s'" -msgstr "移除实例”%s“的虚拟网络设备时失败" - -#: cinder/network/linux_net.py:1170 -#, python-format -msgid "Unplugged gateway interface '%s'" -msgstr "" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "获得文件锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" -#: cinder/network/manager.py:291 +#: cinder/openstack/common/lockutils.py:235 #, fuzzy, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" -msgstr "证书 %(certificate_id)s 没有找到。" +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "获得文件锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 -#, python-format -msgid "Interface %(interface)s not found" -msgstr "没有找到接口 %(interface)s" +#: cinder/openstack/common/log.py:301 +#, fuzzy, python-format +msgid "Deprecated: %s" +msgstr "_delete: %s" -#: cinder/network/manager.py:315 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "floating IP allocation for instance |%s|" -msgstr "为实例 |%s| 分配浮动IP" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" -#: cinder/network/manager.py:353 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "floating IP deallocation for instance |%s|" -msgstr "为实例 |%s| 释放浮动IP" +msgid "syslog facility must be one of: %s" +msgstr "syslog设备必须作为一个 %s 。" -#: cinder/network/manager.py:386 -#, python-format -msgid "Address |%(address)s| is not allocated" -msgstr "地址 |%(address)s| 没有分配" +#: cinder/openstack/common/log.py:623 +#, fuzzy, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "类 %(fullname)s 是不推荐的:%(msg)s" -#: cinder/network/manager.py:390 +#: cinder/openstack/common/loopingcall.py:82 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" -msgstr "地址 |%(address)s| 没有分配给你的项目 |%(project)s|" +msgid "task run outlasted interval by %s sec" +msgstr "" -#: cinder/network/manager.py:402 -#, python-format -msgid "Quota exceeded for %s, tried to allocate address" -msgstr "%s 的配额超出,尝试分配地址" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +#, fuzzy +msgid "in fixed duration looping call" +msgstr "循环调用中。" -#: cinder/network/manager.py:614 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." -msgstr "数据库不一致:DNS域|%s| 在Cinder数据库中注册,但是对浮动或者实例DNS驱动均不可见。将被忽略。" +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" -#: cinder/network/manager.py:660 -#, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." -msgstr "域 |%(domain)s| 已经存在,把区域改变为 |%(av_zone)s|。" +#: cinder/openstack/common/loopingcall.py:136 +#, fuzzy +msgid "in dynamic looping call" +msgstr "循环调用中。" -#: cinder/network/manager.py:670 +#: cinder/openstack/common/periodic_task.py:43 #, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." -msgstr "域 |%(domain)s| 已经存在,把项目改变为 |%(project)s|。" +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" -#: cinder/network/manager.py:778 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "Disassociated %s stale fixed ip(s)" -msgstr "解除 %s 过期固定ip的关联" - -#: cinder/network/manager.py:782 -msgid "setting network host" -msgstr "设置网络主机" +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" -#: cinder/network/manager.py:896 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "network allocations for instance |%s|" -msgstr "为实例 |%s| 的网路分配" +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" -#: cinder/network/manager.py:901 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" -msgstr "为实例 |%(instance_id)s| 获取的网络:|%(networks)s|" +msgid "Running periodic task %(full_task_name)s" +msgstr "正在运行周期性任务 %(full_task_name)s" -#: cinder/network/manager.py:930 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "network deallocation for instance |%s|" -msgstr "为实例 |%s| 解除网络分配" +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "在 %(full_task_name)s 期间发生的错误:%(e)s" -#: cinder/network/manager.py:1152 +#: cinder/openstack/common/policy.py:149 #, python-format msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -"instance-dns-zone是 |%(domain)s|,该域位于区域 |%(zone)s| 中。实例|%(instance)s| 在区域 " -"|%(zone2)s| 里。没有DNS记录将创建。" -#: cinder/network/manager.py:1227 +#: cinder/openstack/common/policy.py:163 +#, fuzzy, python-format +msgid "Failed to understand rule %(match)r" +msgstr "注入文件失败:%(resp)r" + +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Unable to release %s because vif doesn't exist." +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Leased IP |%(address)s|" -msgstr "租用的IP |%(address)s|" +msgid "No handler for matches of kind %s" +msgstr "" -#: cinder/network/manager.py:1248 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "IP %s leased that is not associated" -msgstr "没有关联的IP %s 被租用了" +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "发现未知的 utils.execute 关键字参数:%r" -#: cinder/network/manager.py:1256 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "IP |%s| leased that isn't allocated" -msgstr "没有分配的IP |%s| 被租用了" +msgid "Running cmd (subprocess): %s" +msgstr "正在运行cmd (subprocess):%s" -#: cinder/network/manager.py:1261 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "Released IP |%(address)s|" -msgstr "释放的IP |%(address)s|" +msgid "Result was %s" +msgstr "运行结果为 %s" -#: cinder/network/manager.py:1265 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "IP %s released that is not associated" -msgstr "没有关联的IP %s 被释放了" +msgid "%r failed. Retrying." +msgstr "%r 失败,重试。" -#: cinder/network/manager.py:1268 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "IP %s released that was not leased" -msgstr "没有租用的IP %s 被释放了" - -#: cinder/network/manager.py:1331 -msgid "cidr already in use" -msgstr "cidr 已经在使用" +msgid "Running cmd (SSH): %s" +msgstr "运行cmd (SSH):%s" -#: cinder/network/manager.py:1334 -#, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" -msgstr "请求的cidr (%(cidr)s) 与已存在的超网络 (%(super)s) 冲突" +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "SSH上不支持环境变量" -#: cinder/network/manager.py:1345 -#, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" -msgstr "请求的cidr (%(cidr)s) 与已存在的较小的cidr (%(smaller)s) 冲突" - -#: cinder/network/manager.py:1404 -msgid "Network already exists!" -msgstr "网络已经存在。" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "SSH上不支持的进程输入参数。" -#: cinder/network/manager.py:1423 -#, python-format -msgid "Network must be disassociated from project %s before delete" -msgstr "网路在删除前必须与项目 %s 解除关联" +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, fuzzy, python-format +msgid "Caught %s, exiting" +msgstr "快照 %s:正在删除" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" -msgstr "网络数量与VLAN起始数之和不能大于4049" +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" -#: cinder/network/manager.py:1839 -#, python-format -msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" -msgstr "网络范围不够多而不适合 %(num_networks)s。网络大小是 %(network_size)s。" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" -msgstr "驱动仅支持类型 'a'" +#: cinder/openstack/common/service.py:275 +#, fuzzy +msgid "Unhandled exception" +msgstr "内层异常:%s" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" -msgstr "租户ID没有设" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" -#: cinder/network/quantum/client.py:180 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" -msgstr "Quantum客户请求:%(method)s %(action)s" +msgid "Started child %d" +msgstr "" -#: cinder/network/quantum/client.py:196 -#, python-format -msgid "Quantum entity not found: %s" -msgstr "Quantum实体没有找到:%s" +#: cinder/openstack/common/service.py:337 +#, fuzzy, python-format +msgid "Starting %d workers" +msgstr "起始地址" -#: cinder/network/quantum/client.py:206 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "Server %(status_code)s error: %(data)s" -msgstr "服务器 %(status_code)s 错误:%(data)s" +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "Unable to connect to server. Got error: %s" -msgstr "无法连接到服务器。出现错误:%s" +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/openstack/common/service.py:362 #, python-format -msgid "unable to deserialize object of type = '%s'" -msgstr "无法反序列化type = '%s' 的对象" - -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." -msgstr "QuantumManager 没有使用 'multi_host' 参数。" - -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" -msgstr "QuantumManager 要求每次调用仅创建一个网落。" - -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." -msgstr "QuantumManager 没有使用 'vlan_start' 参数。" - -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." -msgstr "QuantumManager 没有使用 'vpn_start' 参数。" +msgid "pid %d not in child list" +msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." -msgstr "QuantumManager 没有使用 'bridge' 参数。" +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." -msgstr "QuantumManager 没有使用 'bridge_interface' 参数。" +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." -msgstr "QuantumManager 要求一个有效的 (.1) 网关地址。" +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" -#: cinder/network/quantum/manager.py:204 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" -msgstr "无法为租户 '%(q_tenant_id)s' 找到已存在的 net-id是 '%(quantum_net_id)s' 的quantum网络" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" -#: cinder/network/quantum/manager.py:301 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "network allocations for instance %s" -msgstr "实例 %s 的网络分配" +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, fuzzy, python-format +msgid "Unknown byte multiplier: %s" +msgstr "未知的基文件:%s" -#: cinder/network/quantum/manager.py:588 +#: cinder/openstack/common/versionutils.py:69 #, python-format msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" -msgstr "实例的端口取消分配失败:|%(instance_id)s|, port_id: |%(port_id)s|" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/openstack/common/versionutils.py:73 #, python-format msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" -msgstr "实例的ipam取消分配失败:|%(instance_id)s|, vif_uuid: |%(vif_uuid)s|" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" -#: cinder/network/quantum/melange_connection.py:96 -#, python-format -msgid "Server returned error: %s" -msgstr "服务器返回错误:%s" +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" -msgstr "melange服务的连接错误,重试" +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -"在网络 |%(network_id)s| 中分配IP地址,该网络属于 |%(network_tenant_id)s|, 分配IP给该vif " -"|%(vif_id)s|, 其中mac是 |%(mac_address)s| 所属项目 |%(project_id)s| " -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" -msgstr "调用get_project_and_global_net_ids时 project_id必须是非空。" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" -msgstr "创建网络入口的错误" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "数据库异常被包裹。" -#: cinder/network/quantum/cinder_ipam_lib.py:90 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "No network with net_id = %s" -msgstr "没有net_id = %s的网络" +msgid "Got mysql server has gone away: %s" +msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:221 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, fuzzy, python-format -msgid "No fixed IPs to deallocate for vif %s" -msgstr "没有为vif %sid取消分配固定IP" - -#: cinder/network/quantum/quantum_connection.py:99 -#, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" -msgstr "将连接接口 %(interface_id)s 连接到net %(net_id)s 针对租户 %(tenant_id)s" +msgid "SQL connection failed. %s attempts left." +msgstr "SQL连接失败 (%(connstring)s)。还剩 %(attempts)d 次。" -#: cinder/network/quantum/quantum_connection.py:113 -#, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" -msgstr "端口 %(port_id)s 在net %(net_id)s 上删除,针对租户是 %(tenant_id)s" +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" -#: cinder/notifier/api.py:115 +#: cinder/openstack/common/notifier/api.py:129 #, python-format msgid "%s not in valid priorities" msgstr "%s 不在有效的优先级" -#: cinder/notifier/api.py:130 +#: cinder/openstack/common/notifier/api.py:145 #, python-format msgid "" "Problem '%(e)s' attempting to send to notification system. " "Payload=%(payload)s" msgstr "Problem '%(e)s' 试图发送到通知系统。Payload=%(payload)s" -#: cinder/notifier/list_notifier.py:65 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." -msgstr "Problem '%(e)s' 试图发送到通知驱动 %(driver)s。" +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format -msgid "Returning exception %s to caller" -msgstr "返回 %s 异常给调用者" +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" -#: cinder/rpc/amqp.py:188 +#: cinder/openstack/common/rpc/amqp.py:299 #, python-format msgid "unpacked context: %s" msgstr "未打包的上下文:%s" -#: cinder/rpc/amqp.py:231 +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 #, python-format msgid "received %s" msgstr "已接收 %s" -#: cinder/rpc/amqp.py:236 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format msgid "no method for message: %s" msgstr "没有适用于消息的方法:%s" -#: cinder/rpc/amqp.py:237 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format msgid "No method for message: %s" msgstr "没有适用于消息的方法:%s" -#: cinder/rpc/amqp.py:321 +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 #, python-format -msgid "Making asynchronous call on %s ..." +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, fuzzy, python-format +msgid "Making synchronous call on %s ..." msgstr "在 %s 做异步call" -#: cinder/rpc/amqp.py:324 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format msgid "MSG_ID is %s" msgstr "消息ID(MSG_ID)是 %s" -#: cinder/rpc/amqp.py:346 +#: cinder/openstack/common/rpc/amqp.py:631 #, python-format msgid "Making asynchronous cast on %s..." msgstr "在 %s 做异步cast" -#: cinder/rpc/amqp.py:354 +#: cinder/openstack/common/rpc/amqp.py:640 msgid "Making asynchronous fanout cast..." msgstr "做异步fanout cast" -#: cinder/rpc/amqp.py:379 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "Sending notification on %s..." -msgstr "正在 %s 上发送通知" +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "发生未知异常。" -#: cinder/rpc/common.py:54 +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" "Remote error: %(exc_type)s %(value)s\n" @@ -4176,25 +3420,66 @@ msgstr "" "远程错误:%(exc_type)s %(value)s\n" "%(traceback)s。" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." -msgstr "等待RPC响应返回超时" +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "无效的RPC连接重用。" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, fuzzy, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "删除 %(base_file)s 失败,错误是 %(error)s" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "返回 %s 异常给调用者" -#: cinder/rpc/impl_kombu.py:111 +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 msgid "Failed to process message... skipping it." msgstr "" -#: cinder/rpc/impl_kombu.py:407 +#: cinder/openstack/common/rpc/impl_kombu.py:477 #, python-format msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "正在重新连接位于 %(hostname)s:%(port)d 的AMQP服务器" -#: cinder/rpc/impl_kombu.py:430 +#: cinder/openstack/common/rpc/impl_kombu.py:499 #, python-format msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "连接到位于 %(hostname)s:%(port)d 的AMQP服务器" -#: cinder/rpc/impl_kombu.py:466 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format msgid "" "Unable to connect to AMQP server on %(hostname)s:%(port)d after " @@ -4203,402 +3488,532 @@ msgstr "" "无法连接到位于%(hostname)s:%(port)d的AMQP server,尝试已经 %(max_retries)d " "次:%(err_str)s" -#: cinder/rpc/impl_kombu.py:482 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format msgid "" "AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " "again in %(sleep_time)d seconds." msgstr "位于%(hostname)s:%(port)d的AMQP服务器不可达:%(err_str)s。%(sleep_time)d 秒钟后请再尝试。" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 #, python-format msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "为topic '%(topic)s'声明消费者失败:%(err_str)s" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 #, python-format msgid "Timed out waiting for RPC response: %s" msgstr "等待RPC响应超时:%s" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 #, python-format msgid "Failed to consume message from queue: %s" msgstr "从队列中消费消息失败:%s" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 #, python-format msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "给topic '%(topic)s'发布消息失败:%(err_str)s" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/openstack/common/rpc/impl_qpid.py:84 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, fuzzy, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "无法连接到AMQP服务器:%s " -#: cinder/rpc/impl_qpid.py:346 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format msgid "Connected to AMQP server on %s" msgstr "连接到 %s 的AMQP服务器" -#: cinder/rpc/impl_qpid.py:354 +#: cinder/openstack/common/rpc/impl_qpid.py:474 msgid "Re-established AMQP queues" msgstr "重建AMQP队列" -#: cinder/rpc/impl_qpid.py:412 +#: cinder/openstack/common/rpc/impl_qpid.py:534 msgid "Error processing message. Skipping it." msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" -msgstr "正确的服务在运行吗?" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" -msgstr "无法找到另一个计算节点" +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, fuzzy, python-format +msgid "Deserializing: %s" +msgstr "Ext 描述: %s" -#: cinder/scheduler/driver.py:63 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" -msgstr "把“%(method)s”投放在卷%(host)s\"" +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" -msgstr "把“%(method)s”投放在主机 \"%(host)s\"" +msgid "-> Subscribed to %(subscribe)s" +msgstr "" -#: cinder/scheduler/driver.py:89 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "Casted '%(method)s' to network '%(host)s'" -msgstr "把“%(method)s”投放在网络 \"%(host)s\"" +msgid "-> bind: %(bind)s" +msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" -msgstr "把“%(method)s”投放在 %(topic)s \"%(host)s\"" +msgid "Subscribing to %s" +msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" -msgstr "必须实现一个回滚 schedule" +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, fuzzy, python-format +msgid "Running func with context: %s" +msgstr "未打包的上下文:%s" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." -msgstr "块存储迁移无法在共享存储使用" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" -#: cinder/scheduler/driver.py:330 +#: cinder/openstack/common/rpc/impl_zmq.py:371 #, fuzzy -msgid "Live migration can not be used without shared storage." -msgstr "块存储迁移无法在共享存储使用" - -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 -#, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." -msgstr "主机 %(dest)s 和原先的主机 %(src)s 不兼容。" +msgid "Registering reactor" +msgstr "正在注销虚拟机 %s" -#: cinder/scheduler/driver.py:416 -#, fuzzy, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" -msgstr "无法迁移 %(instance_id)s 到 %(dest)s 上:缺少内存(主机:%(avail)s <= 实例:%(mem_inst)s)" +#: cinder/openstack/common/rpc/impl_zmq.py:383 +#, fuzzy +msgid "In reactor registered" +msgstr "没有虚拟机注册" -#: cinder/scheduler/driver.py:472 -#, fuzzy, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -"无法迁移%(instance_id)s 到 %(dest)s 上:缺少磁盘(主机:%(available)s <= " -"实例:%(necessary)s)" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/openstack/common/rpc/impl_zmq.py:437 #, python-format -msgid "No host selection for %s defined." -msgstr "没有主机选择所定义的 %s 主题消息" - -#: cinder/scheduler/filter_scheduler.py:64 -#, python-format -msgid "Attempting to build %(num_instances)d instance(s)" -msgstr "尝试创建 %(num_instances)d 个实例" +msgid "Creating proxy for topic: %s" +msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" -msgstr "调度器只能理解计算节点(暂时)" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 -#, python-format -msgid "Filtered %(hosts)s" -msgstr "过滤掉的主机 %(hosts)s" +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 +#: cinder/openstack/common/rpc/impl_zmq.py:481 #, python-format -msgid "Weighted %(weighted_host)s" -msgstr "加权的主机 %(weighted_host)s" +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" -#: cinder/scheduler/host_manager.py:144 +#: cinder/openstack/common/rpc/impl_zmq.py:497 #, python-format -msgid "Host filter fails for ignored host %(host)s" +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/openstack/common/rpc/impl_zmq.py:506 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/scheduler/host_manager.py:157 -#, python-format -msgid "Host filter function %(func)s failed for %(host)s" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/openstack/common/rpc/impl_zmq.py:543 #, fuzzy, python-format -msgid "Host filter passes for %(host)s" -msgstr "主机 %(host)s 没有计算节点" +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "给定数据:%s" -#: cinder/scheduler/host_manager.py:272 -#, python-format -msgid "Received %(service_name)s service update from %(host)s." -msgstr "接收到 %(service_name)s 服务更新,来自 %(host)s。" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" -msgstr "host_manager 只实现了“compute”" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" -#: cinder/scheduler/host_manager.py:323 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "No service for compute ID %s" -msgstr "计算节点 %s 没有服务" +msgid "Consumer is a zmq.%s" +msgstr "" -#: cinder/scheduler/manager.py:85 -#, fuzzy, python-format -msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" -msgstr "驱动方法 %(driver_method)s 丢失:%(e)s。撤销回schedule()" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +#, fuzzy +msgid "Creating payload" +msgstr "正在创建镜像" -#: cinder/scheduler/manager.py:150 -#, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" -msgstr "schedule_%(method)s 失败:%(ex)s" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" -#: cinder/scheduler/manager.py:159 -#, fuzzy, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." -msgstr "设置实例 %(instance_uuid)s 至 ERROR 状态" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" -#: cinder/scheduler/scheduler_options.py:66 -#, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" -msgstr "无法统计调度器的选项文件 %(filename)s:“%(e)s”" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" -#: cinder/scheduler/scheduler_options.py:75 -#, python-format -msgid "Could not decode scheduler options: '%(e)s'" -msgstr "无法解码调度器的选项:“%(e)s”" +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, fuzzy, python-format +msgid "Received message: %s" +msgstr "已接收 %s" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" -msgstr "没有足够可分配的剩余CPU核心" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" -msgstr "卷没有足够可分配的空间" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" -msgstr "未设置 VCPUs;假设 CPU 集合损坏了" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +#, fuzzy +msgid "RPC Message Invalid." +msgstr "请求无效。" -#: cinder/tests/fake_utils.py:72 +#: cinder/openstack/common/rpc/impl_zmq.py:721 #, python-format -msgid "Faking execution of cmd (subprocess): %s" -msgstr "伪执行命令(子进程):%s" +msgid "%(msg)s" +msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "Faked command matched %s" -msgstr "伪命令匹配 %s" +msgid "Sending message(s) to: %s" +msgstr "" -#: cinder/tests/fake_utils.py:96 -#, python-format -msgid "Faked command raised an exception %s" -msgstr "伪命令引起异常 %s" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" -msgstr "伪命令的标准输出stdout='%(stdout)s' 标准错误输出 stderr='%(stderr)s'" +msgid "topic is %s." +msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" -msgstr "请扩展模拟的 libvirt 模块来支持标记" +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" -msgstr "请扩展伪libvirt模块来支持这种认知方法" +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 -#, python-format -msgid "Running instances: %s" -msgstr "正在运行的实例:%s" +#: cinder/openstack/common/rpc/matchmaker.py:89 +#, fuzzy +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "xenapi.fake 没有 %s 的实现" -#: cinder/tests/test_compute.py:371 +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "After terminating instances: %s" -msgstr "终止实例之后:%s" +msgid "Matchmaker unregistered: %s, %s" +msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" -msgstr "内部错误" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 #, python-format -msgid "After force-killing instances: %s" -msgstr "强制杀死实例后:%s" +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -"下面的迁移缺少了降级:\n" -"\t%s" - -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" -msgstr "id" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" -msgstr "IPv4" +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" -msgstr "IPv6" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" -msgstr "起始地址" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" -msgstr "DNS1" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" -msgstr "DNS2" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" -msgstr "VlanID" +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "必须实现一个回滚 schedule" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" -msgstr "项目" +#: cinder/scheduler/driver.py:82 +#, fuzzy +msgid "Must implement schedule_create_volume" +msgstr "必须实现一个回滚 schedule" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" -msgstr "uuid" +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/scheduler/filter_scheduler.py:114 #, python-format -msgid "Target %s allocated" -msgstr "目标 %s 已经分配" +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format -msgid "Cannot confirm exported volume id:%s." -msgstr "无法确认导出的卷 id:%s。" +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" -#: cinder/tests/test_volume_types.py:58 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "Given data: %s" -msgstr "给定数据:%s" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" -#: cinder/tests/test_volume_types.py:59 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "Result data: %s" -msgstr "结果数据:%s" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" -#: cinder/tests/test_xenapi.py:626 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "Creating files in %s to simulate guest agent" -msgstr "在%s 创建文件模拟客户代理" +msgid "Filtered %s" +msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/scheduler/filter_scheduler.py:276 #, python-format -msgid "Removing simulated guest agent files in %s" -msgstr "移除在 %s 的模拟的客户代理文件" +msgid "Choosing %s" +msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/scheduler/host_manager.py:264 #, fuzzy, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "超出配额" +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "接收到 %(service_name)s 服务更新,来自 %(host)s。" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "_create: %s" -msgstr "_create: %s" +msgid "Received %(service_name)s service update from %(host)s." +msgstr "接收到 %(service_name)s 服务更新,来自 %(host)s。" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "_delete: %s" -msgstr "_delete: %s" +msgid "volume service is down or disabled. (host: %s)" +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 -#, python-format -msgid "_get: %s" -msgstr "_get: %s" +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 -#, python-format -msgid "_get_all: %s" -msgstr "_get_all: %s" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 -#, python-format -msgid "test_snapshot_create: param=%s" -msgstr "test_snapshot_create: param=%s" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "test_snapshot_create: resp_dict=%s" -msgstr "test_snapshot_create: resp_dict=%s" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "test_snapshot_create_force: param=%s" -msgstr "test_snapshot_create_force: param=%s" +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "schedule_%(method)s 失败:%(ex)s" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" -msgstr "test_snapshot_create_force: resp_dict=%s" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "无法统计调度器的选项文件 %(filename)s:“%(e)s”" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "test_snapshot_show: resp=%s" -msgstr "test_snapshot_show: resp=%s" +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +#, fuzzy +msgid "Free capacity not set: volume node info collection broken." +msgstr "未设置 VCPUs;假设 CPU 集合损坏了" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAKE ISCSI: %s" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "伪执行命令(子进程):%s" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "伪命令匹配 %s" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "伪命令引起异常 %s" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "伪命令的标准输出stdout='%(stdout)s' 标准错误输出 stderr='%(stderr)s'" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" +"下面的迁移缺少了降级:\n" +"\t%s" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, fuzzy, python-format +msgid "unrecognized argument %s" +msgstr "无法识别的 read_deleted 取值”%s“" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/tests/test_storwize_svc.py:1510 +#, fuzzy, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "伪命令的标准输出stdout='%(stdout)s' 标准错误输出 stderr='%(stderr)s'" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" -msgstr "test_snapshot_detail: resp_dict=%s" +msgid "Given data: %s" +msgstr "给定数据:%s" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "结果数据:%s" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/tests/test_xiv_ds8k.py:102 #, python-format -msgid "flavor: %s" -msgstr "类型:%s" +msgid "Volume not found for instance %(instance_id)s." +msgstr "没有为实例 %(instance_id)s 找到卷。" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/tests/api/contrib/test_backups.py:741 +#, fuzzy +msgid "Invalid input" +msgstr "无效的快照" + +#: cinder/tests/integrated/test_login.py:29 +#, fuzzy, python-format +msgid "volume: %s" +msgstr "分离卷 %s" + +#: cinder/tests/integrated/api/client.py:32 #, python-format msgid "" "%(message)s\n" @@ -4609,3456 +4024,6785 @@ msgstr "" "状态码: %(_status)s\n" "主体: %(_body)s" -#: cinder/tests/integrated/api/client.py:47 +#: cinder/tests/integrated/api/client.py:42 msgid "Authentication error" msgstr "认证错误" -#: cinder/tests/integrated/api/client.py:55 +#: cinder/tests/integrated/api/client.py:50 msgid "Authorization error" msgstr "授权错误" -#: cinder/tests/integrated/api/client.py:63 +#: cinder/tests/integrated/api/client.py:58 msgid "Item not found" msgstr "条目没有找到" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/tests/integrated/api/client.py:100 #, python-format msgid "Doing %(method)s on %(relative_url)s" msgstr "正在 %(relative_url)s 执行 %(method)s" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/tests/integrated/api/client.py:103 #, python-format msgid "Body: %s" msgstr "主体:%s" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/tests/integrated/api/client.py:121 #, python-format msgid "%(auth_uri)s => code %(http_status)s" msgstr "%(auth_uri)s => code %(http_status)s" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/tests/integrated/api/client.py:148 #, python-format msgid "%(relative_uri)s => code %(http_status)s" msgstr "%(relative_uri)s => code %(http_status)s" -#: cinder/tests/integrated/api/client.py:161 +#: cinder/tests/integrated/api/client.py:159 msgid "Unexpected status code" msgstr "意外的状态码" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/tests/integrated/api/client.py:166 #, python-format msgid "Decoding JSON: %s" msgstr "解码JSON:%s" -#: cinder/tests/rpc/common.py:133 -#, python-format -msgid "Nested received %(queue)s, %(value)s" -msgstr "嵌套(调用)接收到 %(queue)s, %(value)s" +#: cinder/transfer/api.py:68 +#, fuzzy +msgid "Volume in unexpected state" +msgstr "意外的状态码" -#: cinder/tests/rpc/common.py:142 -#, python-format -msgid "Nested return %s" -msgstr "嵌套(调用)返回 %s" +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "状态必须可用" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" -msgstr "RPC后台不支持超时" +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "为ip: %s获取元数据失败" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/transfer/api.py:136 #, python-format -msgid "Received %s" -msgstr "已接收 %s" - -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" -msgstr "打开虚拟机管理程序的连接失败" +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 -#, python-format -msgid "Compute_service record created for %s " -msgstr "Compute_service记录为 %s 创建 " +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "%(pid)s 的配额超出,尝试创建 %(size)sG 的卷" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 -#, python-format -msgid "Compute_service record updated for %s " -msgstr "Compute_service记录为 %s 更新 " +#: cinder/transfer/api.py:182 +#, fuzzy, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "更新代理失败:%(resp)r" -#: cinder/virt/firewall.py:130 +#: cinder/transfer/api.py:199 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" -msgstr "试图不过滤没有过滤的实例 %s" +msgid "Volume %s has been transferred." +msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/api.py:143 #, python-format -msgid "Filters added to instance %s" -msgstr "过滤器添加给实例 %s" +msgid "Unable to query if %s is in the availability zone set" +msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" -msgstr "提供者防火墙规则刷新" +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" -#: cinder/virt/firewall.py:291 -#, python-format -msgid "Adding security group rule: %r" -msgstr "添加安全组规则:%r" +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/api.py:214 #, python-format -msgid "Adding provider rule: %s" -msgstr "添加提供者规则:%s" +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." -msgstr "'qemu-img info'解析失败" +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/api.py:229 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s 由 %(backing_file)s 支持" +msgid "Volume still has %d dependent snapshots" +msgstr "" -#: cinder/virt/images.py:104 +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 #, python-format -msgid "Converted to raw, but format is now %s" -msgstr "转化为裸格式,但目前格式是 %s" +msgid "Searching by: %s" +msgstr "搜索条件: %s" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" -msgstr "针对useconnection_type=vmwareapi必须指定vmwareapi_host_ip,vmwareapi_host_username和vmwareapi_host_password" +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "已经附加" -#: cinder/virt/vmwareapi_conn.py:276 -#, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" -msgstr "在vmwareapi:_create_session,得到这个异常:%s" +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 -#, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" -msgstr "在vmwareapi:_call_method,得到这个异常:%s" +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "卷组状态必须可获取" -#: cinder/virt/vmwareapi_conn.py:398 -#, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" -msgstr "任务 [%(task_name)s] %(task_ref)s 状态:成功" +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 -#, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" -msgstr "任务 [%(task_name)s] %(task_ref)s 状态:错误 %(error_info)s" +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "必须可用" -#: cinder/virt/vmwareapi_conn.py:409 -#, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" -msgstr "在vmwareapi:_poll_task,得到这个错误 %s" +#: cinder/volume/api.py:490 +#, fuzzy, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "%(pid)s 的配额超出,尝试创建 %(size)sG 的卷" -#: cinder/virt/xenapi_conn.py:140 +#: cinder/volume/api.py:502 +#, fuzzy, python-format msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "%(pid)s 已经超过配额,试图运行 %(min_count)s 个实例" + +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "卷组状态必须可获取" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -"针对use connection_type=xenapi必须指定xenapi_connection_url, " -"xenapi_connection_username (可选) 和 xenapi_connection_password" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" -msgstr "无法确定iscsi initiator名字" +#: cinder/volume/api.py:585 +#, fuzzy +msgid "Metadata property key greater than 255 characters" +msgstr "安全组 %s 不能比255个字符更长。" + +#: cinder/volume/api.py:589 +#, fuzzy +msgid "Metadata property value greater than 255 characters" +msgstr "安全组 %s 不能比255个字符更长。" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." -msgstr "不支持在XenServer启动主机" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "卷组状态必须可获取" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" -msgstr "无法登录到XenAPI(Dom0磁盘是空么?)" +#: cinder/volume/api.py:723 +#, fuzzy +msgid "Volume status is in-use." +msgstr "卷 %s:卷繁忙" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#: cinder/volume/api.py:757 #, python-format -msgid "Got exception: %s" -msgstr "得到异常:%s" +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." -msgstr "没有域存在。" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" -#: cinder/virt/baremetal/dom.py:95 -#, python-format -msgid "============= initial domains =========== : %s" -msgstr "============= 初始域 =========== : %s" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" -msgstr "创建域:将被移除" +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" -msgstr "没有运行的域:移除" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" -msgstr "运行在位置结点的域:放弃" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" -#: cinder/virt/baremetal/dom.py:127 -#, python-format -msgid "No such domain (%s)" -msgstr "没有这个域 (%s)" +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/api.py:842 #, python-format -msgid "Failed power down Bare-metal node %s" -msgstr "裸机结点 %s 电源停止失败" - -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" -msgstr "deactivate -> activate失败" +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" -msgstr "destroy_domain:没有该域" +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/api.py:862 #, python-format -msgid "No such domain %s" -msgstr "没有该域 %s" +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/api.py:868 #, python-format -msgid "Domains: %s" -msgstr "域:%s" +msgid "Volume %s is already part of an active migration." +msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/api.py:874 #, python-format -msgid "Nodes: %s" -msgstr "结点:%s" +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/api.py:887 #, python-format -msgid "After storing domains: %s" -msgstr "储存域之后:%s" - -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" -msgstr "域去活/移除失败" +msgid "Invalid volume_type passed: %s" +msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" -msgstr "===== 域正在创建 =====" +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" -msgstr "同样的域名已经存在" +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" -msgstr "create_domain:在get_idle_node之前" +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 #, python-format -msgid "Created new domain: %s" -msgstr "新域创建:%s" +msgid "Recovering from a failed execute. Try number %s" +msgstr "从失败的执行中恢复。尝试编号 %s" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/driver.py:282 #, python-format -msgid "Failed to boot Bare-metal node %s" -msgstr "裸机结点 %s 启动失败" - -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" -msgstr "没有该域" +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" -#: cinder/virt/baremetal/dom.py:226 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "change_domain_state: to new state %s" -msgstr "change_domain_state:新状态 %s" +msgid "Failed to attach volume %(vol)s" +msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/driver.py:327 #, python-format -msgid "Stored fake domains to the file: %s" -msgstr "把伪域名存到文件中:%s" - -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" -msgstr "域不存在" +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/driver.py:340 #, python-format -msgid "Unknown baremetal driver %(d)s" -msgstr "未知的裸机驱动 %(d)s" +msgid "copy_image_to_volume %s." +msgstr "" -#: cinder/virt/baremetal/proxy.py:148 +#: cinder/volume/driver.py:358 #, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" -msgstr "销毁实例 '%(name)s' 时遇到错误:%(ex)s" +msgid "copy_volume_to_image %s." +msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/driver.py:394 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" -msgstr "实例 %(instance_name)s:正在删除实例文件 %(target)s" +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" -#: cinder/virt/baremetal/proxy.py:189 +#: cinder/volume/driver.py:433 #, python-format -msgid "instance %s: rebooted" -msgstr "实例 %s:重启" - -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" -msgstr "_wait_for_reboot失败" +msgid "Creating a new backup for volume %s." +msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/driver.py:451 #, python-format -msgid "instance %s: rescued" -msgstr "实例 %s:已救援" +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" -msgstr "_wait_for_rescue 失败" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" -msgstr "<============= 生产裸机=============>" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "采用discovery,ISCSI provider_location 没有存储" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/driver.py:546 #, python-format -msgid "instance %s: is building" -msgstr "实例 %s:正在创建中" - -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" -msgstr "已经注入密钥但是实例还没有运行" +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/driver.py:548 #, python-format -msgid "instance %s: booted" -msgstr "实例 %s:已启动" +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:268 -#, python-format -msgid "~~~~~~ current state = %s ~~~~~~" -msgstr "~~~~~~ 当前状态 = %s ~~~~~~" +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "instance %s spawned successfully" -msgstr "实例 %s 生产成功" +msgid "ISCSI Discovery: Found %s" +msgstr "ISCSI Discovery:找到 %s" -#: cinder/virt/baremetal/proxy.py:272 -#, python-format -msgid "instance %s:not booted" -msgstr "实例 %s:没有启动" +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." -msgstr "过多提交裸机分配" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" -#: cinder/virt/baremetal/proxy.py:354 -#, python-format -msgid "instance %s: Creating image" -msgstr "实例 %s:正在创建镜像" +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" -#: cinder/virt/baremetal/proxy.py:473 +#: cinder/volume/manager.py:203 #, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" -msgstr "实例 %(inst_name)s:将 %(injection)s 注入镜像 %(img_id)s" +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" -#: cinder/virt/baremetal/proxy.py:484 -#, python-format +#: cinder/volume/manager.py:209 msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" -msgstr "实例 %(inst_name)s:忽略向镜像 %(img_id)s 注入数据的错误 (%(e)s)" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/manager.py:228 #, python-format -msgid "instance %s: starting toXML method" -msgstr "实例 %s:开始方法 toXML" +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/manager.py:235 #, python-format -msgid "instance %s: finished toXML method" -msgstr "实例 %s:方法toXML 完成" - -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 -msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." -msgstr "无法得到CPU的数目,因为这个函数不是给这个平台执行的。这个错误可以被放心忽略。" +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/manager.py:244 #, python-format -msgid "#### RLK: cpu_arch = %s " -msgstr "#### RLK: cpu_arch = %s " - -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" -msgstr "正在更新。" - -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" -msgstr "正在更新主机状态" - -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." -msgstr "free_node..." - -#: cinder/virt/baremetal/tilera.py:216 -#, fuzzy, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" -msgstr "针对node_id = %(id)s node_ip = %(ip)s调用了deactivate_node" - -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" -msgstr "结点状态设为0" - -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" -msgstr "rootfs 已经被移除了" - -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" -msgstr "在ping裸机节点之前" +msgid "Re-exporting %s volumes" +msgstr "重新导出卷%s" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/manager.py:257 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/manager.py:264 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "卷 %s:跳过导出" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" -msgstr "activate_node" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." -msgstr "节点处于未知的错误状态。" +#: cinder/volume/manager.py:286 +#, fuzzy, python-format +msgid "Resuming delete on volume: %s" +msgstr "正在删除volumeID:%s " -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" -msgstr "没有合适的镜像句柄配置好" +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "unknown disk image handler: %s" -msgstr "未知的磁盘镜像句柄:%s" +msgid "volume %s: deleting" +msgstr "卷%s:删除中" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" -msgstr "镜像已经挂载" +#: cinder/volume/manager.py:380 +#, fuzzy +msgid "volume is not local to this node" +msgstr "卷不属于这个节点" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/manager.py:389 #, python-format -msgid "Failed to mount filesystem: %s" -msgstr "挂载文件系统失败:%s" +msgid "volume %s: removing export" +msgstr "卷%s:正在移除导出" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/manager.py:394 #, python-format -msgid "Failed to remove container: %s" -msgstr "移除容器失败:%s" +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +#, fuzzy +msgid "Failed to update usages deleting volume" +msgstr "更新代理失败:%(resp)r" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/manager.py:427 #, python-format -msgid "User %(username)s not found in password file." +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/virt/disk/api.py:457 +#: cinder/volume/manager.py:430 #, python-format -msgid "User %(username)s not found in shadow file." +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/virt/disk/guestfs.py:39 +#: cinder/volume/manager.py:434 #, python-format -msgid "unsupported partition: %s" -msgstr "不支持的分区:%s" - -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" -msgstr "未知的guestmount错误" +msgid "volume %s: deleted successfully" +msgstr "卷%s:删除成功" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/manager.py:451 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "无法给loopback附加镜像:%s" - -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" -msgstr "未找到分区" +msgid "snapshot %s: creating" +msgstr "快照 %s:正在创建" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/manager.py:462 #, python-format -msgid "Failed to map partitions: %s" -msgstr "映射分区失败:%s" - -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" -msgstr "NBD不可用:模块没有加载" - -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" -msgstr "没有空闲NBD设备" - -#: cinder/virt/disk/nbd.py:81 -#, python-format -msgid "qemu-nbd error: %s" -msgstr "qemu-nbd 错误:%s" - -#: cinder/virt/disk/nbd.py:93 -#, python-format -msgid "nbd device %s did not show up" -msgstr "nbd 设备 %s 没有出现" +msgid "snapshot %(snap_id)s: creating" +msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/manager.py:490 #, python-format -msgid "Connecting to libvirt: %s" -msgstr "正在连接 libvirt:%s" - -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" -msgstr "连接 libvirt 失败" +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/manager.py:496 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" -msgstr "销毁时发生错误。Code=%(errcode)s Error=%(e)s" +msgid "snapshot %s: created successfully" +msgstr "快照 %s:创建成功" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" -msgstr "删除已保存的实例时 libvirt 发生错误。Code=%(errcode)s Error=%(e)s" +msgid "snapshot %s: deleting" +msgstr "快照 %s:正在删除" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/manager.py:526 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" -msgstr "在 undefine 时 libvirt 发生错误。Code=%(errcode)s Error=%(e)s" - -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." -msgstr "实例销毁成功。" +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" -#: cinder/virt/libvirt/connection.py:435 -#, fuzzy, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" -msgstr "在 undefine 时 libvirt 发生错误。Code=%(errcode)s Error=%(e)s" +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/manager.py:559 #, python-format -msgid "Deleting instance files %(target)s" -msgstr "删除实例文件 %(target)s" - -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" -msgstr "附加 LXC 块设备" - -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" -msgstr "分离LXC 块设备" +msgid "snapshot %s: deleted successfully" +msgstr "快照 %s:删除成功" -#: cinder/virt/libvirt/connection.py:692 -#, fuzzy -msgid "Instance soft rebooted successfully." -msgstr "实例成功重启。" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" -#: cinder/virt/libvirt/connection.py:696 -#, fuzzy -msgid "Failed to soft reboot instance." -msgstr "重新启动实例失败" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" -#: cinder/virt/libvirt/connection.py:725 -#, fuzzy -msgid "Instance shutdown successfully." -msgstr "实例成功生产。" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." -msgstr "重启过程中,实例消失。" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." -msgstr "实例成功重启。" +#: cinder/volume/manager.py:698 +#, fuzzy, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "把卷 %(volume_id)s 附加到实例 %(instance_id)s 上位置在 %(device)s" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/manager.py:760 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" -msgstr "发现 %(migration_count)d 个超过 %(confirm_window)d 秒未经确认的迁移" +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/manager.py:807 #, python-format -msgid "Automatically confirming migration %d" -msgstr "正在自动确认迁移 %d" +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" -msgstr "实例在运行" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." -msgstr "实例成功生产。" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/manager.py:880 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" -msgstr "data:%(data)r, fpath: %(fpath)r" - -#: cinder/virt/libvirt/connection.py:978 -#, fuzzy -msgid "Guest does not have a console available" -msgstr "用户没有管理员权限" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/manager.py:909 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/manager.py:921 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/manager.py:940 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" -msgstr "正在创建镜像" - -#: cinder/virt/libvirt/connection.py:1339 +#: cinder/volume/manager.py:976 #, python-format -msgid "Injecting %(injection)s into image %(img_id)s" -msgstr "将 %(injection)s 注入到镜像 %(img_id)s" +msgid "volume %s: calling driver migrate_volume" +msgstr "" -#: cinder/virt/libvirt/connection.py:1349 -#, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" -msgstr "忽略向镜像 %(img_id)s 注入的数据的错误 (%(e)s)" +#: cinder/volume/manager.py:1016 +#, fuzzy +msgid "Updating volume status" +msgstr "更新主机状态" -#: cinder/virt/libvirt/connection.py:1381 +#: cinder/volume/manager.py:1024 #, python-format -msgid "block_device_list %s" -msgstr "块设备列表 block_device_list %s" - -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" -msgstr "正在启动 toXML 方法" +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" -msgstr "toXML方法完成" +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "收到通知 {%s}" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/manager.py:1091 #, python-format msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" -msgstr "查找 %(instance_name)s时libvirt出错:[错误代码 %(error_code)s] %(ex)s" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" -msgstr "libvirt版本过旧(不支持getVersion)" +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/manager.py:1105 #, python-format -msgid "'' must be 1, but %d\n" -msgstr "'' 必须为 1, 但是为 %d\n" +msgid "volume %s: extended successfully" +msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/manager.py:1107 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" -msgstr "拓扑 (%(topology)s) 必须含有 %(ks)s" +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" -#: cinder/virt/libvirt/connection.py:2067 +#: cinder/volume/manager.py:1190 #, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +msgid "Volume %s: retyped succesfully" msgstr "" -"已启动实例的CPU信息:\n" -"%s" -#: cinder/virt/libvirt/connection.py:2079 +#: cinder/volume/manager.py:1193 #, python-format msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -"CPU 不兼容.\n" -"\n" -"%(ret)s\n" -"\n" -"参考 %(u)s" -#: cinder/virt/libvirt/connection.py:2136 -#, python-format -msgid "Timeout migrating for %s. nwfilter not found." -msgstr "迁移 %s 超时" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" -#: cinder/virt/libvirt/connection.py:2352 -#, python-format -msgid "skipping %(path)s since it looks like volume" -msgstr "因它像卷,所以跳过 %(path)s" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/qos_specs.py:57 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" -msgstr "实例 %s:开始执行 migrate_disk_and_power_off" - -#: cinder/virt/libvirt/connection.py:2513 -#, fuzzy, python-format -msgid "During wait running, instance disappeared." -msgstr "%s 在运行中消失了。" +msgid "DB error: %s" +msgstr "数据库错误:%s" -#: cinder/virt/libvirt/connection.py:2518 -#, fuzzy, python-format -msgid "Instance running successfully." -msgstr "实例 %s 成功运行。" +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "id不能是None" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "Instance %s: Starting finish_migration" -msgstr "实例 %s:开始执行 finish_migration" +msgid "Failed to get all associations of qos specs %s" +msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "Instance %s: Starting finish_revert_migration" -msgstr "实例 %s:开始执行 finish_revert_migration" - -#: cinder/virt/libvirt/firewall.py:42 msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." -msgstr "Libvirt模块无法加载。NWFilterFirewall 无法正常工作。" - -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" -msgstr "在 nwfilter 里调用 setup_basic_filtering" - -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" -msgstr "正在确保静态过滤器" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" -#: cinder/virt/libvirt/firewall.py:171 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." -msgstr "nwfilter(%(instance_filter_name)s)未找到" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/qos_specs.py:212 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." -msgstr "名称为 %(name)s 的nwfilter(%(instance_filter_name)s) 未找到。" - -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" -msgstr "iptables 防火墙:设置基本的过滤规则" - -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" -msgstr "试图不过滤没有过滤的实例" +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "%s is a valid instance name" -msgstr "%s 是一个正确的实例名称" +msgid "Failed to disassociate qos specs %s." +msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 -#, python-format -msgid "%s has a disk file" -msgstr "%s 有一个磁盘文件" +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "name不能是None" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/utils.py:144 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" -msgstr "实例 %(instance)s 由文件 %(backing)s 来备份" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 +#: cinder/volume/volume_types.py:130 #, python-format msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" -msgstr "实例 %(instance)s 正在使用的备份文件 %(backing)s 没有出现在镜像服务里。" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 #, fuzzy, python-format -msgid "%(id)s (%(base_file)s): image verification failed" -msgstr "%(container_format)s-%(id)s (%(base_file)s):镜像验证失败" +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "跳过remove_export。没有为卷提供iscsi_target:%d" -#: cinder/virt/libvirt/imagecache.py:247 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, fuzzy, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" -msgstr "%(container_format)s-%(id)s (%(base_file)s):镜像验证跳过,没有散列存储" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "跳过remove_export。没有为卷导出iscsi_target:%d" -#: cinder/virt/libvirt/imagecache.py:266 +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 #, fuzzy, python-format -msgid "Cannot remove %(base_file)s, it does not exist" -msgstr "删除 %(base_file)s 失败,错误是 %(error)s" +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "跳过ensure_export。没有为卷提供iscsi_target:%d" -#: cinder/virt/libvirt/imagecache.py:278 -#, python-format -msgid "Base file too young to remove: %s" -msgstr "基文件太新不需要删除:%s" +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, fuzzy, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "跳过ensure_export。没有为卷提供iscsi_target:%d" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "Removing base file: %s" -msgstr "正在删除基文件:%s" +msgid "Performing secure delete on volume: %s" +msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" -msgstr "删除 %(base_file)s 失败,错误是 %(error)s" +msgid "Error unrecognized volume_clear option: %s" +msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 #, fuzzy, python-format -msgid "%(id)s (%(base_file)s): checking" -msgstr "%(container_format)s-%(id)s (%(base_file)s):正在检查中" +msgid "Creating clone of volume: %s" +msgstr "创建卷 %s 的快照" -#: cinder/virt/libvirt/imagecache.py:318 -#, fuzzy, python-format -msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -"%(container_format)s-%(id)s (%(base_file)s):正在使用中:本地节点 %(local)d, 远程节点 " -"%(remote)d" -#: cinder/virt/libvirt/imagecache.py:330 -#, fuzzy, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -"%(container_format)s-%(id)s (%(base_file)s):警告 -- 使用中缺少基文件! 实例: " -"%(instance_list)s" - -#: cinder/virt/libvirt/imagecache.py:338 -#, fuzzy, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" -msgstr "%(container_format)s-%(id)s (%(base_file)s):在使用中: 在远程节点 (%(remote)d 上" -#: cinder/virt/libvirt/imagecache.py:348 -#, fuzzy, python-format -msgid "%(id)s (%(base_file)s): image is not in use" -msgstr "%(container_format)s-%(id)s (%(base_file)s):镜像不在使用中" +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 -#, fuzzy, python-format -msgid "%(id)s (%(base_file)s): image is in use" -msgstr "%(container_format)s-%(id)s (%(base_file)s):镜像在使用中" +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "Skipping verification, no base directory at %s" -msgstr "跳过验证,在 %s 上没有基础目录" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" -msgstr "验证基础镜像" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 -#, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 -#, python-format -msgid "Unknown base file: %s" -msgstr "未知的基文件:%s" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "Active base files: %s" -msgstr "活跃的基文件:%s" +msgid "Coraid Appliance ping failed: %s" +msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Corrupt base files: %s" -msgstr "损坏的基文件:%s" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "Removable base files: %s" -msgstr "可删除的基文件:%s" - -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" -msgstr "确认完成" - -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" -msgstr "无法找到一个开放端口" +msgid "Volume \"%s\" deleted." +msgstr "" -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" -msgstr "保证vlan %(vlan)s 和桥 %(bridge)s" +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" -#: cinder/virt/libvirt/vif.py:99 +#: cinder/volume/drivers/coraid.py:319 #, python-format -msgid "Ensuring bridge %s" -msgstr "保证桥 %s" +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#: cinder/volume/drivers/coraid.py:333 #, python-format -msgid "Failed while unplugging vif of instance '%s'" -msgstr "移除实例”%s“的虚拟网络设备时失败" +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" -#: cinder/virt/libvirt/volume.py:163 +#: cinder/volume/drivers/coraid.py:505 #, python-format -msgid "iSCSI device not found at %s" -msgstr "在 %s 未找到iSCSI设备" +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" -#: cinder/virt/libvirt/volume.py:166 +#: cinder/volume/drivers/eqlx.py:139 #, python-format msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" -msgstr "在 %(mount_device)s 上还没有找到iSCSI卷。将再次扫描并重试。尝试次数:%(tries)s" +"CLI output\n" +"%s" +msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" -msgstr "找到iSCSI节点 %(mount_device)s (经过%(tries)s 次再扫描)" +msgid "Setting CLI terminal width: '%s'" +msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" -msgstr "调用 RetrieveProperties 时发生错误 %s" +msgid "Sending CLI command: '%s'" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 #, python-format -msgid "%(text)s: _db_content => %(content)s" -msgstr "%(text)s: _db_content => %(content)s" +msgid "EQL-driver: executing \"%s\"" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 #, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" -msgstr "属性 %(attr)s 没有为管理对象 %(objName)s 设置" +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" -msgstr "没有虚拟机注册" +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "握手出错:%s" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "Virtual Machine with ref %s is not there" -msgstr "ref 为 %s 的虚拟机不存在" +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "Logging out a session that is invalid or already logged out: %s" -msgstr "退出无效的会话或者已经退出了:%s" - -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" -msgstr "会话有错误" +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" -msgstr "会话无效" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" -msgstr " 还没有虚拟机被注册" +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "Glance image %s is in killed state" -msgstr "Glance 镜像 %s 在被杀死的状态" +msgid "Volume %s was not found while trying to delete it" +msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" -msgstr "Glance 镜像 %(image_id)s 处于未知状态 - %(state)s" +msgid "Failed to delete volume %s" +msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" -msgstr "ESX SOAP 服务器在响应里为托管系统返回一个空的端口组" +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "Creating Port Group with name %s on the ESX host" -msgstr "正在ESX主机上创建名称为 %s 的端口组" +msgid "Failed to create volume from snapshot %s" +msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/eqlx.py:374 #, python-format -msgid "Created Port Group with name %s on the ESX host" -msgstr "已经在ESX主机上创建了名称为 %s 的端口组" +msgid "Failed to create clone of volume %s" +msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/eqlx.py:384 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" -msgstr "HTTP连接关闭时发生异常 VMWareHTTpWrite. Exception:%s" +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." -msgstr "无法导入 suds。" +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" -msgstr "必须指定 vmwareapi_wsdl_loc" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" -msgstr "VI SDK没有提供这样的SOAP方法 “%s”" +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/eqlx.py:440 #, python-format -msgid "httplib error in %s: " -msgstr "在 %s 中发生 httplib 错误: " +msgid "Failed to ensure export of volume %s" +msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/eqlx.py:459 #, python-format -msgid "Socket error in %s: " -msgstr "在 %s 套接字中发生错误 " +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/glusterfs.py:86 #, python-format -msgid "Type error in %s: " -msgstr "在%s里发生类型错误: " +msgid "There's no Gluster config file configured (%s)" +msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/glusterfs.py:91 #, python-format -msgid "Exception in %s " -msgstr "在 %s 发生异常 " +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" -msgstr "获取实例列表" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "Got total of %s instances" -msgstr "总共获得 %s 个实例" +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" -msgstr "无法得到本地的存储引用" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 -#, python-format -msgid "Creating VM with the name %s on the ESX host" -msgstr "在ESX主机上创建名为 %s 的虚拟机" +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, fuzzy, python-format +msgid "casted to %s" +msgstr "嵌套(调用)返回 %s" -#: cinder/virt/vmwareapi/vmops.py:205 -#, python-format -msgid "Created VM with the name %s on the ESX host" -msgstr "已经在ESX主机上创建名为 %s 的虚拟机" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/glusterfs.py:238 #, python-format -msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -"创建 %(vmdk_file_size_in_kb)s KB 大的虚拟磁盘和适配器类型 %(adapter_type)s 在ESX主机的本地存储 " -"%(data_store_name)s 上" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" -msgstr "创建 %(vmdk_file_size_in_kb)s KB 大的虚拟磁盘在ESX主机的本地存储 %(data_store_name)s 上" +msgid "will copy from snapshot at %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 #, python-format -msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" -msgstr "删除文件 %(flat_uploaded_vmdk_path)s 在ESX主机的本地存储 %(data_store_name)s 上" +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/glusterfs.py:373 #, python-format -msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" -msgstr "删除文件 %(flat_uploaded_vmdk_path)s 在ESX主机的本地存储 %(data_store_name)s 上" +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/glusterfs.py:403 #, python-format -msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" -msgstr "下载文件数据 %(image_ref)s 到ESX主机的数据存储 %(data_store_name)s 上" +msgid "nova call result: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 -#, python-format -msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" -msgstr "已经下载镜像文件数据 %(image_ref)s 在ESX数据存储 %(data_store_name)s 上" +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 -#, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" -msgstr "正在重新配置虚拟机实例 %s 来附加镜像磁盘" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/glusterfs.py:431 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" -msgstr "已经重新配置虚拟机实例 %s 来附加于镜像磁盘" +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "Powering on the VM instance %s" -msgstr "正启动虚拟机实例 %s" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "Powered on the VM instance %s" -msgstr "已经启动虚拟机实例 %s" +msgid "create snapshot: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/glusterfs.py:457 #, python-format -msgid "Creating Snapshot of the VM instance %s " -msgstr "正在创建虚拟机实例快照 %s " +msgid "volume id: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 -#, python-format -msgid "Created Snapshot of the VM instance %s " -msgstr "已经创建虚拟机实例快照 %s " +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" -msgstr "正在快照虚拟机实例 %s 之前复制磁盘数据" +msgid "deleting snapshot %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 -#, python-format -msgid "Copied disk data before snapshot of the VM instance %s" -msgstr "已经在快照虚拟机实例 %s 之前复制磁盘数据" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format -msgid "Uploading image %s" -msgstr "上传镜像 %s" +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "Uploaded image %s" -msgstr "已经上传镜像 %s" +msgid "snapshot_file for this snap is %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/glusterfs.py:608 #, python-format -msgid "Deleting temporary vmdk file %s" -msgstr "正在删除临时的vmdk文件 %s" +msgid "No base file found for %s." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "Deleted temporary vmdk file %s" -msgstr "已经删除临时的vmdk文件 %s" - -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" -msgstr "实例未启动" +msgid "No %(base_id)s found for %(file)s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/glusterfs.py:680 #, python-format -msgid "Rebooting guest OS of VM %s" -msgstr "正在重启虚拟机客户操作系统 %s" +msgid "No file found with %s as backing file." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/glusterfs.py:690 #, python-format -msgid "Rebooted guest OS of VM %s" -msgstr "已经重启虚拟机客户操作系统 %s" +msgid "No snap found with %s as backing file." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/glusterfs.py:701 #, python-format -msgid "Doing hard reboot of VM %s" -msgstr "正在冷启动虚拟机 %s" +msgid "No file depends on %s." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "Did hard reboot of VM %s" -msgstr "已经冷启动虚拟机 %s" +msgid "Check condition failed: %s expected to be None." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 -#, python-format -msgid "instance - %s not present" -msgstr "实例 - %s 不存在" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/glusterfs.py:796 #, python-format -msgid "Powering off the VM %s" -msgstr "正在关闭虚拟机 %s" +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/glusterfs.py:802 #, python-format -msgid "Powered off the VM %s" -msgstr "关闭虚拟机 %s" +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "Unregistering the VM %s" -msgstr "正在注销虚拟机 %s" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "Unregistered the VM %s" -msgstr "已经注销虚拟机 %s" +msgid "%s must be a valid raw or qcow2 image." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 -#, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" -msgstr "注销虚拟机时在 vmwareapi:vmops:destroy 中发生异常:%s" +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/glusterfs.py:975 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" -msgstr "正在删除虚拟机 %(name)s 的内容位置在数据存储 %(datastore_name)s 中" +msgid "Unrecognized backing format: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/glusterfs.py:990 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" -msgstr "已经删除虚拟机 %(name)s 的内未知在数据存储 %(datastore_name)s 中" +msgid "creating new volume at %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/glusterfs.py:993 #, python-format -msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" -msgstr "当从磁盘删除虚拟机的内容时在 vmwareapi:vmops:destroy 里发生异常:%s" - -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" -msgstr "vmwareapi 不支持暂停" +msgid "file already exists at %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" -msgstr "vmwareapi 不支持取消暂停" +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, fuzzy, python-format +msgid "Exception during mounting %s" +msgstr "加载扩展发生异常:%s" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/glusterfs.py:1021 #, python-format -msgid "Suspending the VM %s " -msgstr "挂起虚拟机 %s " +msgid "Available shares: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/glusterfs.py:1038 #, python-format -msgid "Suspended the VM %s " -msgstr "已经挂起虚拟机 %s " - -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." -msgstr "实例已关闭,无法挂起。" +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" -msgstr "虚拟机 %s 已经在挂起状态。不做任何操作直接返回" +msgid "GPFS is not active. Detailed output: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/gpfs.py:97 #, python-format -msgid "Resuming the VM %s" -msgstr "正在恢复虚拟机 %s" +msgid "GPFS is not running - state: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 -#, python-format -msgid "Resumed the VM %s " -msgstr "已经恢复虚拟机 %s " +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" -msgstr "实例不在挂起状态" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" -msgstr "get_diagnostics 没有在 vmwareapi 里实现" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/gpfs.py:160 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" -msgstr "正在重新配置实例 %(name)s 来设置机器的id为ip - %(ip_addr)s" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/gpfs.py:169 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" -msgstr "已经重新配置虚拟机实例 %(name)s 来设置机器的id为ip - %(ip_addr)s" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "Creating directory with path %s" -msgstr "正在使用路径 %s 创建目录" +msgid "%s must be an absolute path." +msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/gpfs.py:188 #, python-format -msgid "Created directory with path %s" -msgstr "已经创建路径为 %s 的目录" +msgid "%s is not a directory." +msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/gpfs.py:197 #, python-format -msgid "Downloading image %s from glance image server" -msgstr "正在从glance镜像服务器中下载镜像 %s" +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/gpfs.py:556 #, python-format -msgid "Downloaded image %s from glance image server" -msgstr "已经从glance镜像服务器中下载镜像 %s" +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "Uploading image %s to the Glance image server" -msgstr "正在向Glance镜像服务器上传镜像 %s" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/gpfs.py:637 #, python-format -msgid "Uploaded image %s to the Glance image server" -msgstr "已经向Glance镜像服务器上传了镜像 %s" +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/lvm.py:189 #, python-format -msgid "Getting image size for the image %s" -msgstr "正在获取镜像 %s 的大小" +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" -msgstr "获得大小为 %(size)s 的镜像目的为了 %(image)s" - -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" -msgstr "产生 NotImplemented 错误" +msgid "Volume device file path %s does not exist." +msgstr "" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "xenapi.fake does not have an implementation for %s" -msgstr "xenapi.fake 没有 %s 的实现" +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Calling %(localname)s %(impl)s" -msgstr "正在调用 %(localname)s %(impl)s" +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/lvm.py:361 #, python-format -msgid "Calling getter %s" -msgstr "调用 getter %s" +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" -msgstr "xenapi.fake 没有 %s 的实现或者调用时用了错误数目的参数" +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 #, python-format -msgid "Unable to get SR for this host: %s" -msgstr "无法为主机得到存储库:%s" +msgid "%s" +msgstr "" -#: cinder/virt/xenapi/host.py:169 +#: cinder/volume/drivers/lvm.py:573 #, fuzzy, python-format -msgid "Unable to get updated status" -msgstr "无法得到最新的状态:%s" +msgid "Symbolic link %s not found" +msgstr "没有找到标记 [%s]" -#: cinder/virt/xenapi/host.py:172 -#, python-format -msgid "The call to %(method)s returned an error: %(e)s." -msgstr "对 %(method)s 的调用返回错误:%(e)s。" +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/nfs.py:263 #, python-format -msgid "Found non-unique network for name_label %s" -msgstr "发现不唯一的网络 name_label %s" +msgid "Expected volume size was %d" +msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/nfs.py:264 #, python-format -msgid "Found non-unique network for bridge %s" -msgstr "发现桥 %s 的网络不唯一" +msgid " but size is now %d" +msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 -#, python-format -msgid "Found no network for bridge %s" -msgstr "发现网桥 %s 没有网络" +#: cinder/volume/drivers/nfs.py:361 +#, fuzzy, python-format +msgid "%s is already mounted" +msgstr "镜像已经挂载" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/nfs.py:431 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/xenapi/pool.py:146 -#, fuzzy, python-format -msgid "Unable to join %(host)s in the pool" -msgstr "无法找到实例 %s 的宿主机" - -#: cinder/virt/xenapi/pool.py:162 -#, fuzzy, python-format -msgid "Pool-eject failed: %(e)s" -msgstr "注入文件失败:%(resp)r" - -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "无法使用全局角色 %(role_id)s" - -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/nfs.py:439 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "Found no PIF for device %s" -msgstr "没有找到设备 %s 的PIF" +msgid "Selected %s as target nfs share." +msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/nfs.py:526 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "%s is above nfs_used_ratio" msgstr "" -"网络 %(bridge)s 的 PIF %(pif_rec['uuid'])s 有VLAN id %(pif_vlan)d。期待的数目是 " -"%(vlan_num)d" - -#: cinder/virt/xenapi/vm_utils.py:218 -#, fuzzy, python-format -msgid "Created VM" -msgstr "_create: %s" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "VBD not found in instance %s" -msgstr "没有在实例 %s 找到VBD" - -#: cinder/virt/xenapi/vm_utils.py:262 -#, fuzzy, python-format -msgid "VBD %s already detached" -msgstr "已经分离" +msgid "%s is above nfs_oversub_ratio" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/rbd.py:160 #, python-format -msgid "Unable to unplug VBD %s" -msgstr "无法移除 VBD %s" +msgid "Invalid argument - whence=%s not supported" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 -#, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 -#, python-format -msgid "Unable to destroy VBD %s" -msgstr "无法销毁 VBD %s" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/rbd.py:210 #, fuzzy, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -msgstr "创建了VBD %(vbd_ref)s 目的是为了虚拟机 %(vm_ref)s,VDI %(vdi_ref)s" +msgid "error opening rbd image %s" +msgstr "启动xvp发生错误:%s" -#: cinder/virt/xenapi/vm_utils.py:308 -#, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." -msgstr "创建了VBD %(vbd_ref)s 目的是为了虚拟机 %(vm_ref)s,VDI %(vdi_ref)s" +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 -#, python-format -msgid "Unable to destroy VDI %s" -msgstr "无法销毁 VDI %s" +#: cinder/volume/drivers/rbd.py:265 +#, fuzzy +msgid "error connecting to ceph cluster" +msgstr "正在连接 libvirt:%s" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +msgid "clone depth exceeds limit of %s" msgstr "" -"创建了 VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) 位置在" -" %(sr_ref)s。" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/rbd.py:411 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." -msgstr "复制了 VDI %(vdi_ref)s ,对象来自VDI %(vdi_to_copy_ref)s ,位置在 %(sr_ref)s。" +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/rbd.py:423 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" -msgstr "克隆了 VDI %(vdi_ref)s 对象来自VDI %(vdi_to_clone_ref)s" +msgid "flattening source volume %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 -#, fuzzy, python-format -msgid "No primary VDI found for %(vm_ref)s" -msgstr "未找到 %(vm_ref)s 的主VDI" +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 -#, fuzzy, python-format -msgid "Snapshotting with label '%(label)s'" -msgstr "正在为虚拟机 %(vm_ref)s 做快照,采用标签是 “%(label)s”" +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 -#, fuzzy, python-format -msgid "Created snapshot %(template_vm_ref)s" -msgstr "已经创建了快照 %(template_vm_ref)s 快照对象是虚拟机 %(vm_ref)s。" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" -msgstr "请求xapi 上传 %(vdi_uuids)s 作为镜像ID %(image_id)s" +msgid "creating volume '%s'" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." -msgstr "只能在ext类型的缺省本地存储库支持快速克隆。这个系统的存储库类型为 %(sr_type)s。忽略此 cow 标记。" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 -#, python-format -msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "download_vhd failed: %r" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/rbd.py:568 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" -msgstr "请求 xapi 获取 vhd 镜像 %(image)s" +msgid "deleting parent snapshot %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/rbd.py:579 #, python-format -msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" -msgstr "xapi 'download_vhd' 返回“%(vdi_type)s”类型的VDI,其UUID为 “%(vdi_uuid)s”" +msgid "deleting parent %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/rbd.py:593 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" -msgstr "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "volume %s no longer exists in backend" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 -#, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" -msgstr "image_size_bytes=%(size_bytes)d,allowed_size_bytes=%(allowed_size_bytes)d" +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/rbd.py:625 #, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" -msgstr "镜像大小 %(size_bytes)d 超过instance_type所允许的小大 %(allowed_size_bytes)d" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 -#, fuzzy, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" -msgstr "获取镜像 %(image)s" +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/rbd.py:696 #, fuzzy, python-format -msgid "Size for image %(image)s: %(virtual_size)d" -msgstr "镜像 %(image)s 的大小:%(virtual_size)d" +msgid "connection data: %s" +msgstr "给定数据:%s" -#: cinder/virt/xenapi/vm_utils.py:853 -#, python-format -msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" -msgstr "内核/内存盘镜像太大:%(vdi_size)d 字节,最大 %(max_size)d 字节" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 -#, python-format -msgid "Copying VDI %s to /boot/guest on dom0" -msgstr "将VDI %s 复制到dom0的/boot/guest下" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 -#, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" -msgstr "内核/内存盘 VDI %s 已销毁" +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "无效的快照" -#: cinder/virt/xenapi/vm_utils.py:895 +#: cinder/volume/drivers/rbd.py:724 #, fuzzy, python-format -msgid "Failed to fetch glance image" -msgstr "实例 %s:获取Glance镜像失败" +msgid "not cloneable: %s" +msgstr "Ext name: %s" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" -msgstr "检测到 %(image_type_str)s 格式,目标是镜像 %(image_ref)s" +msgid "%s is in a different ceph cluster" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 -#, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "为PV内核查询vdi %s" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 -#, python-format -msgid "Unknown image format %(disk_image_type)s" -msgstr "未知的镜像格式 %(disk_image_type)s" +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "无法找到 %s 卷" -#: cinder/virt/xenapi/vm_utils.py:1016 -#, python-format -msgid "VDI %s is still available" -msgstr "VDI %s 依然可用" +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 -#, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" -msgstr "(VM_UTILS) xenserver 虚拟机状态 -> |%s|" +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" -msgstr "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Failed to Extend Volume %(volname)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 -#, python-format -msgid "Re-scanning SR %s" -msgstr "重新扫描存储库 %s" +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/scality.py:78 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" -msgstr "标记sr_matching_filter '%s' 没有遵循格式要求" +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" -msgstr "XenAPI无法找到安装客户实例的存储库。请检查你的配置或者配置标记'sr_matching_filter'" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" -msgstr "无法找到content-type ISO的存储库" +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" -msgstr "ISO:正在查看存储库 %(sr_rec)s" - -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" -msgstr "ISO:非iso内容" - -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" -msgstr "ISO:iso content_type,没有 'i18n-key' 键" - -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" -msgstr "ISO:iso content_type,i18n-key的值不是 'local-storage-iso'" +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" -msgstr "ISO: 存储库符合标准" +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog 没有工作:%s" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" -msgstr "ISO: ISO, 正在查看是否是本地的主机" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "Sheepdog 没有工作" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" -msgstr "ISO: PBD %(pbd_ref)s 消失了" +msgid "Payload for SolidFire API call: %s" +msgstr "SolidFire API 调用的参数:%s" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/solidfire.py:151 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" -msgstr "ISO: PBD匹配, 想要 %(pbd_rec)s, 目前有 %(host)s" +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" -msgstr "ISO:含有本地PBD的存储库" +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/solidfire.py:161 #, python-format msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." -msgstr "无法为含服务器详细信息的虚拟机 %(vm_uuid)s 获取RRD XML:%(server)s。" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." -msgstr "无法获取包含服务器详细情况的RRD XML更新:%(server)s。" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "Invalid statistics data from Xenserver: %s" -msgstr "来自Xenserver无效的统计数据:%s" +msgid "Call to json.loads() raised an exception: %s" +msgstr "调用 json.loads() 引起异常:%s" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" -msgstr "VHD %(vdi_uuid)s 有父 %(parent_ref)s" +msgid "Results of SolidFire API call: %s" +msgstr "SolidFire API调用结果:%s" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/solidfire.py:187 #, python-format -msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." -msgstr "父标识 %(parent_uuid)s 和原先的父标识 %(original_parent_uuid)s 不匹配,正在等待合并..." +msgid "Clone operation encountered: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." -msgstr "VHD coalesce 将要超过(%(max_attempts)d),放弃中..." +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Timeout waiting for device %s to be created" -msgstr "等待设备 %s 创建超时" +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 -#, python-format -msgid "Plugging VBD %s ... " -msgstr "插入VBD %s... " +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, fuzzy, python-format +msgid "API response: %s" +msgstr "响应 %s" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "Plugging VBD %s done." -msgstr "插入VBD %s 完成。" +msgid "Found solidfire account: %s" +msgstr "找到solidfire帐户:%s" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" -msgstr "VBD %(vbd_ref)s 作为 %(orig_dev)s 插入" +msgid "solidfire account: %s does not exist, create it..." +msgstr "solidfire帐户:%s 不存在,正在创建..." -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/solidfire.py:315 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" -msgstr "VBD %(vbd_ref)s 插入错误的设备,重新映射为 %(dev)s" +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 -#, python-format -msgid "Destroying VBD for VDI %s ... " -msgstr "正在销毁VDI为 %s 的 VBD " +#: cinder/volume/drivers/solidfire.py:398 +#, fuzzy +msgid "Failed to get model update from clone" +msgstr "为ip: %s获取元数据失败" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "Destroying VBD for VDI %s done." -msgstr "已经销毁VDI为 %s 的 VBD" +msgid "Failed volume create: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "Running pygrub against %s" -msgstr "对 %s 运行pygrub" +msgid "More than one valid preset was detected, using %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "Found Xen kernel %s" -msgstr "找到Xen内核 %s" - -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." -msgstr "没有找到Xen内核。正在启动HVM。" - -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" -msgstr "分区:" +msgid "Failed to get SolidFire Volume: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" -msgstr " %(num)s: %(ptype)s %(size)d sectors" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." -msgstr "将分区表 %(primary_first)d %(primary_last)d 写入到 %(dev_path)s..." +msgid "Volume %s, not found on SF Cluster." +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Writing partition table %s done." -msgstr "完成写入分区表 %s 。" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "进入SolidFire delete_volume..." -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 -#, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 -msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" -msgstr "安装在该镜像的XenServer工具可以进行网络注入。网络文件不会被操作。" +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" -msgstr "该镜像有XenServer工具,但是不能进行网络注入" +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" -msgstr "没有在该镜像上安装XenServer工具" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "离开SolidFire delete_volume" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" -msgstr "直接操作接口文件" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "正在执行SolidFire ensure_export..." -#: cinder/virt/xenapi/vm_utils.py:1751 -#, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" -msgstr "挂载文件系统失败(期望的是非Linux实例):%s" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "正在执行SolidFire create_export..." -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 -#, fuzzy, python-format -msgid "Updating progress to %(progress)d" -msgstr "将实例 '%(instance_uuid)s' 的进度更新到 %(progress)d" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" -#: cinder/virt/xenapi/vmops.py:231 -#, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" -msgstr "尝试过启动不存在的实例,实例的id %s 不正确" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" -#: cinder/virt/xenapi/vmops.py:233 +#: cinder/volume/drivers/solidfire.py:665 #, fuzzy -msgid "Starting instance" -msgstr "正在启动虚拟机" - -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" -msgstr "从 dom0 中移除内核/内存盘文件" +msgid "Updating cluster status info" +msgstr "更新主机状态" -#: cinder/virt/xenapi/vmops.py:358 +#: cinder/volume/drivers/solidfire.py:673 #, fuzzy -msgid "Failed to spawn, rolling back" -msgstr "在数据库更新卷失败" +msgid "Failed to get updated stats" +msgstr "无法得到最新的状态:%s" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 +#: cinder/volume/drivers/zadara.py:236 #, fuzzy, python-format -msgid "Auto configuring disk, attempting to resize partition..." -msgstr "正在自动配置实例 %(instance_uuid)s 的磁盘,尝试调整分区大小..." +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "把“%(method)s”投放在 %(topic)s \"%(host)s\"" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/zadara.py:260 #, fuzzy, python-format -msgid "Invalid value for injected_files: %r" -msgstr "injected_files无效的值:'%s'" +msgid "Operation completed. %(data)s" +msgstr "确认完成" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/zadara.py:357 #, python-format -msgid "Injecting file path: '%s'" -msgstr "注入文件路径:'%s'" - -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" -msgstr "设置管理员密码" - -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" -msgstr "重置网络" +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" -msgstr "设置VCPU 权重" +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, fuzzy, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "卷 %(volume_id)s 没有找到。" -#: cinder/virt/xenapi/vmops.py:544 -#, fuzzy -msgid "Starting VM" -msgstr "重启xvp" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" -msgstr "%(hypervisor)s/%(os)s/%(architecture)s 最新的agent build 是 %(version)s 版本" +msgid "Volume %(name)s not found" +msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" -msgstr "没有找到 %(hypervisor)s/%(os)s/%(architecture)s 的代理创建" +msgid "Delete snapshot: %s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" -msgstr "查询代理版本" +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "Instance agent version: %s" -msgstr "实例代理版本:%s" +msgid "Creating volume from snapshot: %s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/zadara.py:496 #, python-format -msgid "Updating Agent to %s" -msgstr "把代理更新为 %s" +msgid "Snapshot %(name)s not found" +msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "No opaque_ref could be determined for '%s'." -msgstr "无法为 '%s' 确定opaque_ref。" +msgid "Attach properties: %(properties)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -#, fuzzy, python-format -msgid "Finished snapshot and upload for VM" -msgstr "快照完毕并为虚拟机 %s 上传" +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -#, fuzzy, python-format -msgid "Starting snapshot for VM" -msgstr "开始为虚拟机 %s 快照" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +#, fuzzy +msgid "Entering create_volume." +msgstr "进入SolidFire create_volume..." -#: cinder/virt/xenapi/vmops.py:686 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "无法为实例 %(instance_uuid)s 快照:%(exc)s" - -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" -msgstr "将 vhd 转移到新主机失败" +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "卷%(vol_name)s:创建大小为%(vol_size)s的逻辑卷" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" -msgstr "将 VDI %(cow_uuid)s 由 %(old_gb)dGB 调小到 %(new_gb)dGB" +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:893 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" -msgstr "将 VDI %(vdi_uuid)s 由 %(old_gb)dGB 调大到 %(new_gb)dGB" - -#: cinder/virt/xenapi/vmops.py:901 -#, fuzzy, python-format -msgid "Resize complete" -msgstr "调整实例 %s 的大小完毕" +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format -msgid "Failed to query agent version: %(resp)r" -msgstr "查询代理版本失败:%(resp)r" - -#: cinder/virt/xenapi/vmops.py:949 -#, fuzzy, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" -msgstr "domid 由 %(olddomid)s 改变为 %(newdomid)s" +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format -msgid "Failed to update agent: %(resp)r" -msgstr "更新代理失败:%(resp)r" +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "Failed to exchange keys: %(resp)r" -msgstr "交换钥匙失败:%(resp)r" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format -msgid "Failed to update password: %(resp)r" -msgstr "更新密码失败:%(resp)r" +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Failed to inject file: %(resp)r" -msgstr "注入文件失败:%(resp)r" - -#: cinder/virt/xenapi/vmops.py:1032 -#, fuzzy, python-format -msgid "VM already halted, skipping shutdown..." -msgstr "虚拟机 %(instance_uuid)s 已经终止,跳过关闭..." - -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" -msgstr "无法为虚拟机找到VBD" - -#: cinder/virt/xenapi/vmops.py:1097 -#, fuzzy, python-format -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" -msgstr "实例 %(instance_uuid)s 使用RAW或者VHD,跳过内核和内存盘的删除" +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +#, fuzzy +msgid "Entering create_volume_from_snapshot." +msgstr "从快照 %s 创建卷" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" -msgstr "实例拥有内核或者内存盘,但不是二者均有" +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" -msgstr "内核/内存盘文件移除了" +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -#, fuzzy -msgid "Destroying VM" -msgstr "重启xvp" +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." -msgstr "虚拟机不存在,跳过销毁..." +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format -msgid "Instance is already in Rescue Mode: %s" -msgstr "实例已处于救援模式:%s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" -msgstr "找到%(instance_count)d个超过%(timeout)d秒悬挂的重启" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -#, fuzzy, python-format -msgid "Automatically hard rebooting" -msgstr "自动冷重启 %d" +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 -#, fuzzy, python-format +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" -msgstr "为实例 %(instance_uuid)s 关闭虚拟机" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 -#, fuzzy, python-format -msgid "Instance %(instance_uuid)s not found" -msgstr "没有找到实例 %(instance_id)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 +#: cinder/volume/drivers/emc/emc_smis_common.py:287 #, fuzzy -msgid "In ERROR state" -msgstr "节点处于未知的错误状态。" +msgid "Entering create_cloned_volume." +msgstr "进入SolidFire create_volume..." -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." -msgstr "无法得到带宽信息" - -#: cinder/virt/xenapi/vmops.py:1469 -#, fuzzy, python-format -msgid "Injecting network info to xenstore" -msgstr "为虚拟机注入网络信息到xs:|%s|" - -#: cinder/virt/xenapi/vmops.py:1483 -#, fuzzy -msgid "Creating vifs" -msgstr "正在创建镜像" - -#: cinder/virt/xenapi/vmops.py:1492 -#, fuzzy, python-format -msgid "Creating VIF for network %(network_ref)s" -msgstr "正在为虚拟机 %(vm_ref)s,网络 %(network_ref)s 创建VIF。" - -#: cinder/virt/xenapi/vmops.py:1495 -#, fuzzy, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" -msgstr "正在为虚拟机 %(vm_ref)s,网络 %(network_ref)s 创建VIF。" - -#: cinder/virt/xenapi/vmops.py:1520 -#, fuzzy, python-format -msgid "Injecting hostname to xenstore" -msgstr "为虚拟机注入hostname到xs:|%s|" - -#: cinder/virt/xenapi/vmops.py:1545 -#, fuzzy, python-format +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -"%(method)s 的代理调用返回无效的响应:%(ret)r。虚拟机id=%(instance_uuid)s; path=%(path)s; " -"args=%(addl_args)r" - -#: cinder/virt/xenapi/vmops.py:1566 -#, fuzzy, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" -msgstr "超时:调用 %(method)s 超时。虚拟机id=%(instance_uuid)s; args=%(args)r" -#: cinder/virt/xenapi/vmops.py:1570 -#, fuzzy, python-format +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" -msgstr "没有执行:代理不支持 %(method)s 的调用。虚拟机id=%(instance_uuid)s; args=%(args)r" - -#: cinder/virt/xenapi/vmops.py:1575 -#, fuzzy, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" -msgstr "对 %(method)s 的调用返回错误:%(e)s。" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "OpenSSL error: %s" -msgstr "OpenSSL错误:%s" - -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" -msgstr "在volume_utils创建存储库" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "type is = %s" -msgstr "类型is = %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "name = %s" -msgstr "name = %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Created %(label)s as %(sr_ref)s." -msgstr "将 %(label)s 作为 %(sr_ref)s 创建。" - -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" -msgstr "无法创建存储库" - -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" -msgstr "在volume_utils里引入sr" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." -msgstr "将 %(label)s 作为 %(sr_ref)s 引入。" - -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" -msgstr "为存储库创建pbd" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" -msgstr "插入存储库" +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" -msgstr "无法引入存储库" +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +#, fuzzy +msgid "Entering delete_volume." +msgstr "进入SolidFire delete_volume..." -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" -msgstr "无法得到使用uuid的存储库" +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, fuzzy, python-format +msgid "Delete Volume: %(volume)s" +msgstr "删除id为 %s 的卷" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Forgetting SR %s..." -msgstr "遗忘存储库 %s..." - -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" -msgstr "无法遗忘Storage Repository" +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format -msgid "Introducing %s..." -msgstr "引入 %s..." +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/emc/emc_smis_common.py:438 #, python-format -msgid "Unable to find SR from VBD %s" -msgstr "无法在VBD %s找到存储库" +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/emc/emc_smis_common.py:442 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" -msgstr "异常 %(exc)s 在为 %(sr_ref)s 得到PBDs时被忽略" +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" -msgstr "异常 %(exc)s 在拔开PBD %(pbd)s 时被忽略" +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Unable to introduce VDI on SR %s" -msgstr "无法在存储库 %s 上引入VDI" +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 -#, python-format -msgid "Unable to get record of VDI %s on" -msgstr "无法使得VDI %s 的记录运行" +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 -#, python-format -msgid "Unable to introduce VDI for SR %s" -msgstr "无法为存储库 %s 引入VDI" +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, fuzzy, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "创建卷 %s 的快照" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "Error finding vdis in SR %s" -msgstr "在存储库 %s 寻找VDIs出错" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Unable to find vbd for vdi %s" -msgstr "无法为VDI %s 找到VBD" +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:502 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" -msgstr "无法获得目标信息 %(data)s, %(mountpoint)s" +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/emc/emc_smis_common.py:518 #, python-format -msgid "Mountpoint cannot be translated: %s" -msgstr "挂载点无法被翻译:%s" +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" -msgstr "找不到VDI ref" +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, fuzzy, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "创建卷 %s 的快照" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format -msgid "Creating SR %s" -msgstr "正在创建存储库 %s" +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" -msgstr "无法创建存储库" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" -msgstr "无法获取存储库记录" +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, fuzzy, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "创建卷 %s 的快照" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "Introducing SR %s" -msgstr "引入存储库 %s" - -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" -msgstr "在xapi数据库找到存储库。无需引入。" - -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" -msgstr "无法引入存储库" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "Checking for SR %s" -msgstr "检查存储库 %s" +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format -msgid "SR %s not found in the xapi database" -msgstr "在xapi数据库没有找到存储库 %s" - -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" -msgstr "不能遗忘存储库" +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" -msgstr "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" -msgstr "无法在存储库 %(sr_ref)s 上为实例 %(instance_name)s 创建 VDI" +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "存储库 %(sr_ref)s 不能为实例%(instance_name)s使用" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 -#, python-format -msgid "Unable to attach volume to instance %s" -msgstr "无法附加卷到实例 %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, fuzzy, python-format +msgid "Create export: %(volume)s" +msgstr "重新导出卷%s" -#: cinder/virt/xenapi/volumeops.py:200 -#, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -msgstr "挂载点 %(mountpoint)s 附加到实例 %(instance_name)s" +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, fuzzy, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "把卷 %(volume_id)s 附加到实例 %(server_id)s 的 %(device)s 设备上" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "分离_volume: %(instance_name)s, %(mountpoint)s" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Unable to locate volume %s" -msgstr "无法找到 %s 卷" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 -#, python-format -msgid "Unable to detach volume %s" -msgstr "无法分离 %s 卷" +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, fuzzy, python-format +msgid "Error mapping volume %s." +msgstr "启动xvp发生错误:%s" -#: cinder/virt/xenapi/volumeops.py:232 -#, python-format -msgid "Unable to destroy vbd %s" -msgstr "无法销毁VBD %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, fuzzy, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "卷%s:删除成功" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format -msgid "Error purging SR %s" -msgstr "净化存储库 %s 出错" +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" -msgstr "挂载点 %(mountpoint)s 从实例 %(instance_name)s 分离" +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, fuzzy, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "卷%s:删除成功" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "Error in handshake: %s" -msgstr "握手出错:%s" +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Invalid request: %s" -msgstr "无效的请求:%s" +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, fuzzy, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "卷%s:删除成功" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format -msgid "Request: %s" -msgstr "请求:%s" +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Request made with missing token: %s" -msgstr "请求缺少令牌:%s" +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, fuzzy, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "卷%s:删除成功" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, fuzzy, python-format +msgid "Map volume: %(volume)s" +msgstr "没有id为 %(volume_id)s 的 sm_volume" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "Request made with invalid token: %s" -msgstr "请求中有无效令牌:%s" +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, fuzzy, python-format +msgid "Unmap volume: %(volume)s" +msgstr "没有id为 %(volume_id)s 的 sm_volume" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/emc/emc_smis_common.py:810 #, python-format -msgid "Unexpected error: %s" -msgstr "意外错误:%s" +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" -msgstr "启动cinder-xvpvncproxy节点(版本 %s)" +msgid "Initialize connection: %(volume)s" +msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" -msgstr "状态必须可用" +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, fuzzy, python-format +msgid "Volume %s is already mapped." +msgstr "rootfs 已经被移除了" -#: cinder/volume/api.py:85 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" -msgstr "%(pid)s 的配额超出,尝试创建 %(size)sG 的卷" +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, fuzzy, python-format +msgid "Found Storage Type: %s" +msgstr "找到solidfire帐户:%s" -#: cinder/volume/api.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:887 #, fuzzy -msgid "Volume status must be available or error" -msgstr "卷组状态必须可获取" +msgid "Storage type not found." +msgstr "镜像没有找到。" -#: cinder/volume/api.py:142 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "Volume still has %d dependent snapshots" +msgid "Found Masking View: %s" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" -msgstr "已经附加" +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +#, fuzzy +msgid "Masking View not found." +msgstr "镜像没有找到。" -#: cinder/volume/api.py:230 -msgid "already detached" -msgstr "已经分离" +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +#, fuzzy +msgid "Ecom user not found." +msgstr "没有找到服务器。" -#: cinder/volume/api.py:292 -msgid "must be available" -msgstr "必须可用" +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" -#: cinder/volume/api.py:325 +#: cinder/volume/drivers/emc/emc_smis_common.py:952 #, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "卷组状态必须可获取" +msgid "Ecom server not found." +msgstr "没有找到服务器。" -#: cinder/volume/driver.py:96 -#, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "从失败的执行中恢复。尝试编号 %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +#, fuzzy +msgid "Cannot connect to ECOM server" +msgstr "连接到 %s 的AMQP服务器" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "volume group %s doesn't exist" -msgstr "卷组 %s 不存在" +msgid "Found Replication Service: %s" +msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" -msgstr "跳过ensure_export。没有为卷提供iscsi_target:%d" +msgid "Found Storage Configuration Service: %s" +msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" -msgstr "跳过remove_export。没有为卷提供iscsi_target:%d" +msgid "Found Controller Configuration Service: %s" +msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" -msgstr "跳过remove_export。没有为卷导出iscsi_target:%d" +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" -msgstr "采用discovery,ISCSI provider_location 没有存储" +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, fuzzy, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "角色 %(role_id)s 没有找到。" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "无法为卷 %s 找到 iSCSI 导出" +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "ISCSI Discovery: Found %s" -msgstr "ISCSI Discovery:找到 %s" +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." -msgstr "无法确认导出的卷id:%(volume_id)s。" +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, fuzzy, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "卷 %(volume_id)s 没有找到。" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, fuzzy, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "没有为实例 %(instance_id)s 找到卷。" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "FAKE ISCSI: %s" -msgstr "FAKE ISCSI: %s" +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "rbd has no pool %s" -msgstr "RBD没有池 %s" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "Sheepdog is not working: %s" -msgstr "Sheepdog 没有工作:%s" +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" -msgstr "Sheepdog 没有工作" +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, fuzzy, python-format +msgid "Error finding %s." +msgstr "在存储库 %s 寻找VDIs出错" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "LoggingVolumeDriver: %s" -msgstr "LoggingVolumeDriver: %s" +msgid "Found %(name)s: %(initiator)s." +msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format -msgid "Re-exporting %s volumes" -msgstr "重新导出卷%s" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format -msgid "volume %s: skipping export" -msgstr "卷 %s:跳过导出" +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "volume %s: creating" -msgstr "卷 %s: 创建中" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" -msgstr "卷%(vol_name)s:创建大小为%(vol_size)s的逻辑卷" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format -msgid "volume %s: creating export" -msgstr "卷%s:正在创建导出" +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "volume %s: created successfully" -msgstr "卷%s:创建成功" - -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" -msgstr "卷仍在附加中" - -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" -msgstr "卷不属于这个节点" +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "volume %s: removing export" -msgstr "卷%s:正在移除导出" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "volume %s: deleting" -msgstr "卷%s:删除中" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "volume %s: volume is busy" -msgstr "卷 %s:卷繁忙" +msgid "Device info: %(data)s." +msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "volume %s: deleted successfully" -msgstr "卷%s:删除成功" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "snapshot %s: creating" -msgstr "快照 %s:正在创建" +msgid "Found Storage Processor System: %s" +msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format -msgid "snapshot %(snap_name)s: creating" -msgstr "快照 %(snap_name)s:正在创建" +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" -#: cinder/volume/manager.py:202 -#, python-format -msgid "snapshot %s: created successfully" -msgstr "快照 %s:创建成功" +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format -msgid "snapshot %s: deleting" -msgstr "快照 %s:正在删除" +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" -#: cinder/volume/manager.py:214 -#, fuzzy, python-format -msgid "snapshot %s: snapshot is busy" -msgstr "快照 %s:创建成功" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "snapshot %s: deleted successfully" -msgstr "快照 %s:删除成功" - -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" -msgstr "检查卷能力" +msgid "Add target WWN: %s." +msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "New capabilities found: %s" -msgstr "找到新能力:%s" +msgid "Target WWNs: %s." +msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" -msgstr "清理能力" +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "Notification {%s} received" -msgstr "收到通知 {%s}" +msgid "Could not find iSCSI export for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 #, fuzzy, python-format -msgid "API %(name)sfailed: %(reason)s" -msgstr "镜像 %(image_id)s 无法接受,原因是: %(reason)s" +msgid "Cannot find device number for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, fuzzy, python-format -msgid "%s is not set" -msgstr "租户ID没有设" +msgid "Found iSCSI endpoint: %s" +msgstr "引起异常 NotFound: %s" -#: cinder/volume/netapp.py:128 -#, fuzzy, python-format -msgid "Connected to DFM server" -msgstr "连接到 %s 的AMQP服务器" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" -#: cinder/volume/netapp.py:159 -#, fuzzy, python-format -msgid "Job failed: %s" -msgstr "未知的基文件:%s" +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" -#: cinder/volume/netapp.py:240 -#, fuzzy -msgid "Failed to provision dataset member" -msgstr "更新数据库失败" +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 #, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "无法为VDI %s 找到VBD" +msgid "XML exception reading parameter: %s" +msgstr "加载扩展发生异常:%s" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, fuzzy, python-format -msgid "No LUN ID for volume %s" -msgstr "无法为卷 %s 找到 iSCSI 导出" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#: cinder/volume/drivers/hds/hds.py:250 #, fuzzy, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "为ip: %s获取元数据失败" +msgid "HDP not found: %s" +msgstr "没有找到主机" -#: cinder/volume/netapp.py:614 -#, fuzzy, python-format -msgid "Failed to get host details for host ID %s" -msgstr "为ip: %s获取元数据失败" +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" -#: cinder/volume/netapp.py:620 +#: cinder/volume/drivers/hds/hds.py:395 #, fuzzy, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "为ip: %s获取元数据失败" +msgid "delete lun %(lun)s on %(name)s" +msgstr "删除id为 %s 的卷" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" -#: cinder/volume/netapp.py:625 +#: cinder/volume/drivers/hds/hds.py:522 #, fuzzy, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "为ip: %s获取元数据失败" +msgid "LUN %s is deleted." +msgstr "rootfs 已经被移除了" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "指定san_password或者san_private_key" +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "san_ip必须设置" +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" -#: cinder/volume/san.py:320 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "没有为 %(zfs_poolname)s 找到LUID。Output=%(out)s" +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" -#: cinder/volume/san.py:452 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format -msgid "CLIQ command returned %s" -msgstr "返回CLIQ命令 %s" +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" -#: cinder/volume/san.py:458 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "CLIQ命令 %(verb)s %(cliq_args)s 错误格式的响应。Result=%(out)s" +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" -#: cinder/volume/san.py:466 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "在运行CLIQ命令 %(verb)s %(cliq_args)s 时发生错误。输出结果 Result=%(out)s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" -#: cinder/volume/san.py:496 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "集群 %(cluster_name)s 有意外数量的虚拟 ip 地址。输出结果 Result=%(_xml)s" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" -#: cinder/volume/san.py:549 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "卷信息:%(volume_name)s => %(volume_attributes)s" +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "不支持local_path" +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" -#: cinder/volume/san.py:626 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "无法为卷 %s 确定项目,无法导出" +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" -#: cinder/volume/san.py:696 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "SolidFire API 调用的参数:%s" +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" -#: cinder/volume/san.py:713 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "调用 json.loads() 引起异常:%s" +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" -#: cinder/volume/san.py:718 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Results of SolidFire API call: %s" -msgstr "SolidFire API调用结果:%s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" -#: cinder/volume/san.py:732 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Found solidfire account: %s" -msgstr "找到solidfire帐户:%s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" -#: cinder/volume/san.py:746 +#: cinder/volume/drivers/huawei/huawei_utils.py:40 #, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "solidfire帐户:%s 不存在,正在创建..." +msgid "parse_xml_file: %s" +msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "进入SolidFire create_volume..." +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "离开SolidFire create_volume" +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "进入SolidFire delete_volume..." +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" -#: cinder/volume/san.py:880 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "Deleting volumeID: %s " -msgstr "正在删除volumeID:%s " +msgid "HVS Response Data: %(res)s" +msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "离开SolidFire delete_volume" +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "正在执行SolidFire ensure_export..." +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "正在执行SolidFire create_export..." +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "name不能是None" +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "id不能是None" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, fuzzy, python-format +msgid "%s is not set" +msgstr "租户ID没有设" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"命令:%(cmd)s\n" +"退出代码:%(exit_code)s\n" +"标准输出:%(stdout)r\n" +"标准错误输出:%(stderr)r" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, fuzzy, python-format +msgid "Using NetApp filer: %s" +msgstr "正在删除基文件:%s" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, fuzzy, python-format +msgid "Created LUN with name %s" +msgstr "已经创建路径为 %s 的目录" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, fuzzy, python-format +msgid "Destroyed LUN %s" +msgstr "嵌套(调用)返回 %s" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, fuzzy, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, fuzzy, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, fuzzy, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, fuzzy, python-format +msgid "Snapshot %s deletion successful" +msgstr "快照 %s:删除成功" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "转化为裸格式,但目前格式是 %s" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "卷组 %s 不存在" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, fuzzy, python-format +msgid "Sending JSON data: %s" +msgstr "给定数据:%s" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +#, fuzzy +msgid "Bad response from server" +msgstr "来自SolidFire API的错误响应" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "响应 %s" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "返回CLIQ命令 %s" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "CLIQ命令 %(verb)s %(cliq_args)s 错误格式的响应。Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "在运行CLIQ命令 %(verb)s %(cliq_args)s 时发生错误。输出结果 Result=%(out)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "集群 %(cluster_name)s 有意外数量的虚拟 ip 地址。输出结果 Result=%(_xml)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "卷信息:%(volume_name)s => %(volume_attributes)s" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "不支持local_path" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "指定san_password或者san_private_key" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "san_ip必须设置" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "没有为 %(zfs_poolname)s 找到LUID。Output=%(out)s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "握手出错:%s" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, fuzzy, python-format +msgid "Volume (%s) already exists on array" +msgstr "安全组 %s 已经存在" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, fuzzy, python-format +msgid "Creating folder %s " +msgstr "正在创建存储库 %s" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "卷组状态必须可获取" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "数据库错误:%s" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "运行命令时出现意外错误" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "在 %s 未找到iSCSI设备" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "得到异常:%s" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "移除容器失败:%s" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "卷%(vol_name)s:创建大小为%(vol_size)s的逻辑卷" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "卷%s:正在创建导出" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "卷%s:正在创建导出" + +#~ msgid "volume %s: creating from image" +#~ msgstr "卷 %s: 创建中" + +#~ msgid "volume %s: creating" +#~ msgstr "卷 %s: 创建中" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "卷%s:正在创建导出" + +#~ msgid "volume %s: create failed" +#~ msgstr "卷 %s: 创建中" + +#~ msgid "volume %s: created successfully" +#~ msgstr "卷%s:创建成功" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "正在抓取 %s" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "本地IP地址没有找到:%s" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "无法连接到 %(interface)s 的本地IP:%(ex)s" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "启动%(name)s 位置在 %(host)s:%(port)s" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "无法分离 %s 卷" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "无法找到 %s 卷" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "快照 %(snap_name)s:正在创建" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "%(code)s: %(message)s" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "无法找到地址 %r" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "迁移错误" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "给定数据:%s" + +#~ msgid "Configure response : %s" +#~ msgstr "响应 %s" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "无法找到 %s 卷" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "无法找到 %s 卷" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "重新启动实例失败" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "快照 %(snap_name)s:正在创建" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "从快照 %s 创建卷" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "卷组 %s 不存在" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "运行命令时出现意外错误" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "服务器返回错误:%s" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "连接到glance失败" + +#~ msgid "Invalid snapshot" +#~ msgstr "无效的快照" + +#~ msgid "Invalid input received" +#~ msgstr "收到无效的输入" + +#~ msgid "Invalid volume type" +#~ msgstr "无效的卷类型" + +#~ msgid "Invalid volume" +#~ msgstr "无效的卷" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "无效的快照" + +#~ msgid "Invalid metadata" +#~ msgstr "无效的元数据" + +#~ msgid "Invalid metadata size" +#~ msgstr "无效的元数据键" + +#~ msgid "Migration error" +#~ msgstr "迁移错误" + +#~ msgid "Quota exceeded" +#~ msgstr "超出配额" + +#~ msgid "Connection to swift failed" +#~ msgstr "连接到glance失败" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "回调中debug:%s" + +#~ msgid "Expected object of type: %s" +#~ msgstr "期望的对象类型:%s" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "timefunc:'%(name)s' 用了%(total_time).2f 秒" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "卷组 %s 不存在" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "没有找到主机" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "创建卷 %s 的快照" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "_delete: %s" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "请求无效。" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "卷 %(volume_id)s 没有找到。" + +#~ msgid "No disk at %(location)s" +#~ msgstr "在 %(location)s 没有磁盘" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "找不到类 %(class_name)s :异常 %(exception)s" + +#~ msgid "Action not allowed." +#~ msgstr "操作不允许。" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "密钥对 %(key_name)s 已经存在。" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "删除id为 %s 的卷" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "存储库 name = %s" +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "参数:%s" +#~ msgid "There are no datastores present under %s." +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "创建存储库 %s失败... 继续" +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "创建失败" +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "无法为卷 %s 找到 iSCSI 导出" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "新存储库的存储库 UUID:%s" +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "更新数据库失败" +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "引入存储库 %s 失败... 继续" +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "到达后台 %d 失败" +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "XenSMDriver要求xenapi连接" +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "初始化会话失败" +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "卷会在后台创建 - %d" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "在数据库更新卷失败" +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "无法创建卷" +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "删除VDI失败" +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "在数据库删除卷失败" +#~ msgid "JSON transfer Error" +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "没有在数据库找到卷" +#~ msgid "create volume error: %(err)s" +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "没有在数据库中找到后台(backend)" +#~ msgid "Create snapshot error." +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -#, fuzzy, python-format -msgid "Nexenta SA returned the error" -msgstr "服务器返回错误:%s" +#~ msgid "Create luncopy error." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, fuzzy, python-format -msgid "Sending JSON data: %s" -msgstr "给定数据:%s" +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -#, fuzzy -msgid "Bad response from server" -msgstr "来自SolidFire API的错误响应" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, fuzzy, python-format -msgid "Got response: %s" -msgstr "响应 %s" +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, fuzzy, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "卷组 %s 不存在" +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" -#~ msgid "Cinder access parameters were not specified." -#~ msgstr "未指定Cinder访问参数。" +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." -#~ msgstr "无法找到虚拟存储阵列 %(id)d。" +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." -#~ msgstr "无法找到虚拟存储阵列 %(name)s。" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" -#~ msgstr "父组id和组id不能一样" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" -#~ msgid "No body provided" -#~ msgstr "没有提供主体" +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" -#~ msgstr "创建VSA %(display_name)s 类型是 %(vc_type)s" +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" -#~ msgid "Delete VSA with id: %s" -#~ msgstr "删除id:%s的VSA" +#~ msgid "_read_xml:%s" +#~ msgstr "_create: %s" -#~ msgid "Associate address %(ip)s to VSA %(id)s" -#~ msgstr "把地址 %(ip)s 与VSA %(id)s关联" +#~ msgid "request ip info is %s." +#~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" -#~ msgstr "取消地址与VSA %(id)s关联" +#~ msgid "new str info is %s." +#~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" -#~ msgstr "没有找到 %(obj)s 该对象的ID是 %(id)s" +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." -#~ msgstr "%(obj)s 的ID是 %(id)s 它属于VSA %(own_vsa_id)s而不是VSA %(vsa_id)s。" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" -#~ msgstr "索引。vsa_id=%(vsa_id)s" +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" -#~ msgstr "详细内容。vsa_id=%(vsa_id)s" +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" -#~ msgstr "创建卷。vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" -#~ msgstr "创建 %(size)s GB的卷来源是VSA ID %(vsa_id)s" +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" -#~ msgstr "更新%(obj)s至id:%(id)s ,修改:%(changes)s" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" -#~ msgstr "删除卷。vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" -#~ msgstr "显示卷。vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" -#~ msgid "Index instances for VSA %s" -#~ msgstr "为 VSA %s 索引实例" +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" -#~ msgstr "准备强制删除虚拟机 %(instance_uuid)s,即使已经删除了。" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" -#~ msgstr "数据库中不存在实例 %(instance_uuid)s,但是无论如何要用特殊上下文来关闭。" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" -#~ msgstr "尝试销毁已经销毁的实例: %s" +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" -#~ msgid "Can't downgrade without losing data" -#~ msgstr "无法不丢失数据地降级" +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." -#~ msgstr "%(fl_host)s:%(fl_port)d上的AMQP服务器不可达:%(e)s。 %(fl_intv)d 秒钟后再尝试。" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." -#~ msgstr "%(tries)d 次尝试后依然无法连接到AMQP服务器。正在关闭。" +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" -#~ msgid "Reconnected to queue" -#~ msgstr "重新连接队列" +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" -#~ msgstr "从队列获取消息失败:%s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" -#~ msgstr "为%s初始化适配器消费者(Adapter Consumer)" +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" -#~ msgstr "\"%(exchange)s\" fanout exchange用路由密钥 \"%(key)s\" 创建的" +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" -#~ msgid "Exception while processing consumer" -#~ msgstr "处理消费者出现异常" +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" -#~ msgstr "正在创建 \"%(exchange)s\" fanout exchange" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" -#~ msgid "topic is %s" -#~ msgstr "主题是 %s" +#~ msgid "iSER device not found at %s" +#~ msgstr "" -#~ msgid "message %s" -#~ msgstr "消息 %s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." -#~ msgstr "无法确认tmpfile %(ipath)s 在相同的共享存储的 %(src)s 和 %(dest)s之间。" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" -#~ msgstr "_filter_hosts: %(request_spec)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" -#~ msgid "Filter hosts for drive type %s" -#~ msgstr "按照驱动类型 %s 过滤主机" +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" -#~ msgstr "主机 %s 没有足够的容量。跳过" +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" -#~ msgid "Filter hosts: %s" -#~ msgstr "过滤主机:%s" +#~ msgid "Invalid request body" +#~ msgstr "无效的请求主体" -#~ msgid "Must implement host selection mechanism" -#~ msgstr "必须实现主机选择机制" +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" -#~ msgstr "所选择主机的最大数目(%d)" +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" -#~ msgid "Selected excessive host %(host)s" -#~ msgstr "选择超过了主机 %(host)s" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" -#~ msgstr "提供卷 %(name)s 它的大小为 %(size)s 位置在主机 %(host)s" +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" -#~ msgid "volume_params %(volume_params)s" -#~ msgstr "volume_params %(volume_params)s" +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" -#~ msgstr "%(i)d:卷名%(name)s" +#~ msgid "Expected volume result not found" +#~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" -#~ msgstr "正在尝试生产 %(num_volumes)d 个卷" +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" -#~ msgid "Error creating volumes" -#~ msgstr "创建卷失败" +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" -#~ msgid "Non-VSA volume %d" -#~ msgstr "Non-VSA 卷 %d" +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" -#~ msgstr "正在生产卷 %(volume_id)s 它的驱动类型为 %(drive_type)s" +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" -#~ msgid "Error creating volume" -#~ msgstr "创建卷失败" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" -#~ msgstr "没有能力分配大小为 %(size)s 的卷" +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" -#~ msgid "Host %s:" -#~ msgstr "主机 %s:" +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ "\t驱动 %(qosgrp)-25s:总共 %(total)2s,已用 %(used)2s," -#~ " 空闲 %(free)2s。可用 %(avail)-5s" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" -#~ msgstr "\t [ tab] LeastUsedHost:最佳主机: %(best_host)s。(使用的空间 %(min_used)s)" +#~ msgid "Clear capabilities" +#~ msgstr "清理能力" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" -#~ msgstr "\t MostAvailCap:最佳主机: %(best_host)s。 (可用 %(max_avail)s %(type_str)s)" +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" -#~ msgstr "(%(nm)s) 发布 (键:%(routing_key)s) %(message)s" +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" -#~ msgid "Publishing to route %s" -#~ msgstr "发布并路由到 %s" +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" -#~ msgid "Declaring queue %s" -#~ msgstr "正在声明队列 %s" +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" -#~ msgid "Declaring exchange %s" -#~ msgstr "正在声明交换(exchange) %s" +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" -#~ msgstr "把队列 %(queue)s 绑定到 %(exchange)s 采用的键是 %(routing_key)s" +#~ msgid "in looping call" +#~ msgstr "循环调用中。" -#~ msgid "Getting from %(queue)s: %(message)s" -#~ msgstr "从队列 %(queue)s 取消息:%(message)s" +#~ msgid "Is the appropriate service running?" +#~ msgstr "正确的服务在运行吗?" -#~ msgid "Test: Emulate wrong VSA name. Raise" -#~ msgstr "测试:模拟错误的 VSA 名称。抛出异常" +#~ msgid "Could not find another host" +#~ msgstr "无法找到另一个计算节点" -#~ msgid "Test: Emulate DB error. Raise" -#~ msgstr "测试:模拟数据测试。抛出" +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "卷没有足够可分配的空间" -#~ msgid "Test: user_data = %s" -#~ msgstr "测试:user_data = %s" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" -#~ msgid "_create: param=%s" -#~ msgstr "_create: param=%s" +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" -#~ msgid "Host %s" -#~ msgstr "主机 %s" +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" -#~ msgstr "测试:在主机 %(host)s 上提供 %(name)s" +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" -#~ msgid "\t vol=%(vol)s" -#~ msgstr "\t vol=%(vol)s" +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" -#~ msgstr "测试:VSA更新请求:vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" -#~ msgid "Test: Volume create: %s" -#~ msgstr "测试:卷创建:%s" +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" -#~ msgstr "测试:卷获取请求:id=%(volume_id)s" +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "迁移 %(migration_id)s 没有找到。" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" -#~ msgstr "测试:卷更新请求:id=%(volume_id)s values=%(values)s" +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" -#~ msgstr "测试:卷获取:id=%(volume_id)s" +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "实例 %(instance_id)s 没有找到。" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" -#~ msgstr "任务 [%(name)s] %(task)s 状态:成功 %(result)s" +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" -#~ msgstr "任务 [%(name)s] %(task)s 状态:%(status)s %(error_info)s" +#~ msgid "SIGTERM received" +#~ msgstr "" -#~ msgid "virsh said: %r" -#~ msgstr "virsh 输出: %r" +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" -#~ msgid "cool, it's a device" -#~ msgstr "酷,这是个设备" +#~ msgid "_wait_child %d" +#~ msgstr "" -#~ msgid "Unable to read LXC console" -#~ msgstr "无法读取 LXC 控制台" +#~ msgid "wait wrap.failed %s" +#~ msgstr "未知的基文件:%s" #~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ "转换成 xml...\n" -#~ ":%s " -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." -#~ msgstr "nwfilter(%(instance_secgroup_filter_name)s) 未找到" +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" -#~ msgid "Created VM %s..." -#~ msgstr "创建虚拟机 %s ..." +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." -#~ msgstr "已经将虚拟机 %(instance_name)s 创建成 %(vm_ref)s。" +#~ msgid "Get code level failed" +#~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -#~ msgstr "正在为虚拟机 %(vm_ref)s 创建一个特定的CDROM VBD, VDI %(vdi_ref)s ... " +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." -#~ msgstr "已经创建了一个基于CDROM 的VBD %(vbd_ref)s,目的是为虚拟机 %(vm_ref)s,VDI %(vdi_ref)s。" +#~ msgid "Failed to get license information." +#~ msgstr "" -#~ msgid "Image Type: %s" -#~ msgstr "镜像类型:%s" +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" -#~ msgstr "ISO:找到可能包含该ISO镜像的存储库" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " -#~ msgstr "正在为VDI%s创建VBD " +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." -#~ msgstr "为VDI %s 创建VBD完成。" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" -#~ msgid "VBD.unplug successful first time." -#~ msgstr "VBD.unplug 操作第一次成功。" +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." -#~ msgstr "VBD.unplug 操作被拒绝:重试中..." +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" -#~ msgid "Not sleeping anymore!" -#~ msgstr "不再睡眠!" +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" -#~ msgid "VBD.unplug successful eventually." -#~ msgstr "VBD.unplug 操作最终成功。" +#~ msgid "_create_host: No connector ports" +#~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" -#~ msgstr "在 VBD.unplug 操作中忽略XenAPI.Failure:%s" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "把主机 %(host)s 设置为 %(state)s。" -#~ msgid "Ignoring XenAPI.Failure %s" -#~ msgstr "忽略 XenAPI.Failure %s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "正在删除有快照的卷 %(volume_name)s" -#~ msgid "Starting instance %s" -#~ msgstr "启动实例 %s" +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" -#~ msgid "instance %s: Failed to spawn" -#~ msgstr "实例 %s:生产失败" +#~ msgid "volume %s mapping to multi host" +#~ msgstr "卷 %s:跳过导出" -#~ msgid "Instance %s failed to spawn - performing clean-up" -#~ msgstr "实例 %s 生产失败 - 正在进行清理" +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" -#~ msgstr "实例 %s:生产失败 - 无法创建虚拟机" +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" -#~ msgid "Starting VM %s..." -#~ msgstr "启动虚拟机 %s..." +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." -#~ msgstr "生产 VM %(instance_uuid)s 它是由 %(vm_ref)s 创建的。" +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" -#~ msgid "Instance %s: waiting for running" -#~ msgstr "实例 %s:等待运行" +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" -#~ msgid "Instance %s: running" -#~ msgstr "实例 %s:运行" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" -#~ msgid "Resources to remove:%s" -#~ msgstr "将要移除的资源:%s" +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" -#~ msgstr "移除VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" -#~ msgstr "跳过 %s 的VDI的销毁" +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" -#~ msgstr "正在为实例 %(instance_uuid)s 销毁 VDIs" +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" -#~ msgstr "实例 %(instance_uuid)s 销毁了" +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" -#~ msgstr "正在为实例%(instance_uuid)s 销毁虚拟机" +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" -#~ msgstr "没有找到为迁移 %d 的实例,正在跳过" +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" -#~ msgstr "正在为虚拟机创建 vif(s):|%s|" +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." -#~ msgstr "创建VIF %(vif_ref)s 是为了虚拟机 %(vm_ref)s,网络 %(network_ref)s。" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" -#~ msgstr "%(method)s 的调用返回错误:%(e)s。虚拟机id=%(instance_uuid)s; args=%(args)r" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -#~ msgstr "为虚拟机 %(vm_ref)s ,VDI %(vdi_ref)s 创建VBD... " +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" -#~ msgid "Error destroying VDI" -#~ msgstr "销毁VDI出错" +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "_create: %s" -#~ msgid "\tVolume %s is NOT VSA volume" -#~ msgstr "\t卷 %s 不是VSA 卷" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" -#~ msgstr "\tFE VSA 卷 %s 的创建 - 什么都不做" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" -#~ msgstr "为 %s 的VSA BE create_volume失败" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" -#~ msgstr "为 %s 的VSA BE create_volume成功" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" -#~ msgstr "\tFE VSA 卷 %s 的删除 - 什么都不做" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" -#~ msgstr "为 %s 的VSA BE delete_volume失败" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" -#~ msgstr "为 %s 的VSA BE delete_volume成功" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" -#~ msgstr "\tFE VSA 卷 %s 本地路径调用 - 调用discover" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" -#~ msgstr "\tFE VSA Volume %s 确保导出 - 什么都不做" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" -#~ msgstr "\tFE VSA Volume %s 创建导出 - 什么都不做" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" -#~ msgstr "\tFE VSA Volume %s 删除导出 - 什么都不做" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" -#~ msgstr "为 %s 的VSA BE remove_export失败" +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" -#~ msgid "Failed to retrieve QoS info" -#~ msgstr "获取QoS信息失败" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" -#~ msgid "drive_name not defined" -#~ msgstr "没有定义drive_name" +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" -#~ msgid "invalid drive type name %s" -#~ msgstr "无效的drive类型名称 %s" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" -#~ msgid "*** Experimental VSA code ***" -#~ msgstr "*** 用于实验的VSA代码 ***" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" -#~ msgstr "请求的VCs (%d)数目过大。设置为默认值" +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" -#~ msgid "Creating VSA: %s" -#~ msgstr "正在创建VSA:%s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s:创建卷 " -#~ "%(vol_name)s,%(vol_size)d GB,类型 %(vol_type_id)s" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" -#~ msgstr "VSA ID %(vsa_id)d:更新VSA状态到 %(status)s" - -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" -#~ msgstr "VSA ID %(vsa_id)d:更新VSA调用" +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." -#~ msgstr "添加%(add_cnt)s VCs到 %(vsa_name)s。" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." -#~ msgstr "%(del_cnt)s VCs 从VSA %(vsa_name)s 中删除。" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" -#~ msgstr "VSA ID %(vsa_id)s:删除 %(direction)s 卷 %(vol_name)s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" -#~ msgid "Unable to delete volume %s" -#~ msgstr "无法删除卷 %s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" -#~ msgstr "VSA ID %(vsa_id)s:强迫删除。%(direction)s 卷 %(vol_name)s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" -#~ msgstr "将要试图终止VSA ID %s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" -#~ msgstr "VSA ID %(vsa_id)s:删除实例 %(name)s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" -#~ msgid "Create call received for VSA %s" -#~ msgstr "收到VSA %s 的Create call" +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" -#~ msgstr "没有找到VSA %(vsa_id)d" +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" -#~ msgstr "VSA ID %(vsa_id)s:Drive %(vol_id)s 创建。状态 %(status)s" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" -#~ msgstr "Drive %(vol_name)s (%(vol_disp_name)s) 依然在创建阶段 - 请等待" +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" -#~ msgstr "VSA ID %(vsa_id)d: 不是所有的卷都创建了 (%(cvol_real)d of %(cvol_exp)d)" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ "VSA ID %(vsa_id)d:Drive %(vol_name)s " -#~ "(%(vol_disp_name)s)处于 %(status)s 状态。" - -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" -#~ msgstr "更新卷 %(vol_name)s 的附加状态失败。%(ex)s" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" -#~ msgstr "VSA ID %(vsa_id)d:删除所有BE卷" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" -#~ msgstr "VSA ID %(vsa_id)d:启动 %(vc_count)d 个实例" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" diff --git a/cinder/locale/zh_HK/LC_MESSAGES/cinder.po b/cinder/locale/zh_HK/LC_MESSAGES/cinder.po new file mode 100644 index 0000000000..fe0dc87005 --- /dev/null +++ b/cinder/locale/zh_HK/LC_MESSAGES/cinder.po @@ -0,0 +1,10736 @@ +# Chinese (Hong Kong SAR China) translations for cinder. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Cinder\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" +"PO-Revision-Date: 2013-05-29 08:13+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Chinese (Hong Kong) " +"(http://www.transifex.com/projects/p/openstack/language/zh_HK/)\n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: cinder/context.py:61 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:102 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" +msgstr "" + +#: cinder/exception.py:107 +#, python-format +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" +msgstr "" + +#: cinder/exception.py:112 +#, python-format +msgid "Volume driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:116 +#, python-format +msgid "Backup driver reported an error: %(message)s" +msgstr "" + +#: cinder/exception.py:120 +#, python-format +msgid "Connection to glance failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:124 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:129 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:133 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:137 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:150 +#, python-format +msgid "Invalid snapshot: %(reason)s" +msgstr "" + +#: cinder/exception.py:154 +#, python-format +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:159 +#, python-format +msgid "Volume %(volume_id)s is still attached, detach volume first." +msgstr "" + +#: cinder/exception.py:163 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:167 +msgid "The results are invalid." +msgstr "" + +#: cinder/exception.py:171 +#, python-format +msgid "Invalid input received: %(reason)s" +msgstr "" + +#: cinder/exception.py:175 +#, python-format +msgid "Invalid volume type: %(reason)s" +msgstr "" + +#: cinder/exception.py:179 +#, python-format +msgid "Invalid volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:183 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" +msgstr "" + +#: cinder/exception.py:193 cinder/brick/exception.py:81 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:197 +#, python-format +msgid "Invalid auth key: %(reason)s" +msgstr "" + +#: cinder/exception.py:201 +#, python-format +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" +msgstr "" + +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:210 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:214 +#, python-format +msgid "The device in the path %(path)s is unavailable: %(reason)s" +msgstr "" + +#: cinder/exception.py:218 +#, python-format +msgid "Expected a uuid but received %(uuid)s." +msgstr "" + +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." +msgstr "" + +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" +msgstr "" + +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" +msgstr "" + +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:255 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." +msgstr "" + +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:278 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:282 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:287 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:295 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:299 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:303 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:307 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:311 +#, python-format +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." +msgstr "" + +#: cinder/exception.py:315 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:319 +#, python-format +msgid "Invalid reservation expiration %(expire)s." +msgstr "" + +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: cinder/exception.py:328 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:332 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: cinder/exception.py:336 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:340 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:344 +#, python-format +msgid "Quota usage for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." +msgstr "" + +#: cinder/exception.py:352 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: cinder/exception.py:356 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:365 +#, python-format +msgid "Volume Type %(id)s already exists." +msgstr "" + +#: cinder/exception.py:369 +#, python-format +msgid "Volume type encryption for type %(type_id)s already exists." +msgstr "" + +#: cinder/exception.py:373 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "Could not find parameter %(param)s" +msgstr "" + +#: cinder/exception.py:385 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:398 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:402 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/exception.py:409 +#, python-format +msgid "" +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." +msgstr "" + +#: cinder/exception.py:415 +#, python-format +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:419 +#, python-format +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" +msgstr "" + +#: cinder/exception.py:423 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:432 +#, python-format +msgid "Unknown or unsupported command %(cmd)s" +msgstr "" + +#: cinder/exception.py:436 +#, python-format +msgid "Malformed response to command %(cmd)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" +msgstr "" + +#: cinder/exception.py:449 +#, python-format +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Failed to export for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Failed to create metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:461 +#, python-format +msgid "Failed to update metadata for volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:465 +#, python-format +msgid "Failed to copy metadata to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:469 +#, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" +msgstr "" + +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" +msgstr "" + +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" +msgstr "" + +#: cinder/exception.py:485 +#, python-format +msgid "Backup %(backup_id)s could not be found." +msgstr "" + +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." +msgstr "" + +#: cinder/exception.py:493 +#, python-format +msgid "Invalid backup: %(reason)s" +msgstr "" + +#: cinder/exception.py:497 +#, python-format +msgid "Connection to swift failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Transfer %(transfer_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Volume migration failed: %(reason)s" +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "SSH command injection detected: %(command)s" +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "QoS Specs %(specs_id)s already exists." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:522 +#, python-format +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." +msgstr "" + +#: cinder/exception.py:527 +#, python-format +msgid "No such QoS spec %(specs_id)s." +msgstr "" + +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." +msgstr "" + +#: cinder/exception.py:546 +#, python-format +msgid "Invalid qos specs: %(reason)s" +msgstr "" + +#: cinder/exception.py:550 +#, python-format +msgid "QoS Specs %(specs_id)s is still associated with entities." +msgstr "" + +#: cinder/exception.py:554 +#, python-format +msgid "key manager error: %(reason)s" +msgstr "" + +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:564 +msgid "Failed to encode json data." +msgstr "" + +#: cinder/exception.py:568 +msgid "Login on ESM failed." +msgstr "" + +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." +msgstr "" + +#: cinder/exception.py:576 +#, python-format +msgid "Group with name \"%(group_name)s\" not found." +msgstr "" + +#: cinder/exception.py:580 +#, python-format +msgid "ESM configure request failed: %(message)s." +msgstr "" + +#: cinder/exception.py:584 +#, python-format +msgid "Coraid ESM not available with reason: %(reason)s." +msgstr "" + +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." +msgstr "" + +#: cinder/exception.py:593 +#, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:597 +#, python-format +msgid "Unable to find server object for initiator %(name)s" +msgstr "" + +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" +msgstr "" + +#: cinder/exception.py:605 +#, python-format +msgid "Failed to retrieve attachments for volume %(name)s" +msgstr "" + +#: cinder/exception.py:609 +#, python-format +msgid "Invalid attachment info for volume %(name)s: %(reason)s" +msgstr "" + +#: cinder/exception.py:613 +#, python-format +msgid "Bad HTTP response status %(status)s" +msgstr "" + +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" +msgstr "" + +#: cinder/exception.py:626 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:630 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:636 +#, python-format +msgid "Invalid 3PAR Domain: %(err)s" +msgstr "" + +#: cinder/exception.py:641 +msgid "Unknown NFS exception" +msgstr "" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" +msgstr "" + +#: cinder/exception.py:649 cinder/exception.py:662 +#, python-format +msgid "There is no share which can host %(volume_size)sG" +msgstr "" + +#: cinder/exception.py:654 +msgid "Unknown Gluster exception" +msgstr "" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" +msgstr "" + +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/quota.py:105 +#, python-format +msgid "" +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." +msgstr "" + +#: cinder/quota.py:748 +#, python-format +msgid "Created reservations %s" +msgstr "" + +#: cinder/quota.py:770 +#, python-format +msgid "Failed to commit reservations %s" +msgstr "" + +#: cinder/quota.py:790 +#, python-format +msgid "Failed to roll back reservations %s" +msgstr "" + +#: cinder/quota.py:876 +msgid "Cannot register resource" +msgstr "" + +#: cinder/quota.py:879 +msgid "Cannot register resources" +msgstr "" + +#: cinder/quota_utils.py:46 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" +msgstr "" + +#: cinder/service.py:95 +#, python-format +msgid "Starting %(topic)s node (version %(version_string)s)" +msgstr "" + +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:148 +#, python-format +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" +msgstr "" + +#: cinder/service.py:216 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:298 +#, python-format +msgid "" +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." +msgstr "" + +#: cinder/service.py:373 +msgid "serve() can only be called once" +msgstr "" + +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" +msgstr "" + +#: cinder/service.py:387 +#, python-format +msgid "%s : FLAG SET " +msgstr "" + +#: cinder/utils.py:96 +#, python-format +msgid "Can not translate %s to integer." +msgstr "" + +#: cinder/utils.py:127 +#, python-format +msgid "May specify only one of %s" +msgstr "" + +#: cinder/utils.py:212 +msgid "Specify a password or private_key" +msgstr "" + +#: cinder/utils.py:228 +#, python-format +msgid "Error connecting via ssh: %s" +msgstr "" + +#: cinder/utils.py:412 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:423 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/utils.py:759 +#, python-format +msgid "Volume driver %s not initialized" +msgstr "" + +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: cinder/wsgi.py:169 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: cinder/wsgi.py:206 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:313 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." +msgstr "" + +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." +msgstr "" + +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/common.py:120 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/common.py:134 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/common.py:162 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/extensions.py:235 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/extensions.py:237 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/extensions.py:239 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/extensions.py:242 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/extensions.py:256 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/extensions.py:262 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/extensions.py:276 +#, python-format +msgid "osapi_volume_extension is set to deprecated path: %s" +msgstr "" + +#: cinder/api/extensions.py:278 +#, python-format +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" +msgstr "" + +#: cinder/api/extensions.py:287 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:356 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/extensions.py:381 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" +msgstr "" + +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" +msgstr "" + +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/xmlutil.py:786 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/contrib/admin_actions.py:81 +#, python-format +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" +msgstr "" + +#: cinder/api/contrib/backups.py:128 +#, python-format +msgid "show called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:140 +#, python-format +msgid "delete called for member %s" +msgstr "" + +#: cinder/api/contrib/backups.py:143 +#, python-format +msgid "Delete backup with id: %s" +msgstr "" + +#: cinder/api/contrib/backups.py:185 +#, python-format +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/contrib/backups.py:201 +#, python-format +msgid "Creating backup of volume %(volume_id)s in container %(container)s" +msgstr "" + +#: cinder/api/contrib/backups.py:224 +#, python-format +msgid "Restoring backup %(backup_id)s (%(body)s)" +msgstr "" + +#: cinder/api/contrib/backups.py:234 +#, python-format +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" +msgstr "" + +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." +msgstr "" + +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." +msgstr "" + +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:168 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/contrib/hosts.py:180 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." +msgstr "" + +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." +msgstr "" + +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." +msgstr "" + +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." +msgstr "" + +#: cinder/api/contrib/quotas.py:111 +#, python-format +msgid "Bad key(s) in quota set: %s" +msgstr "" + +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:61 +#, python-format +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:67 +#, python-format +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." +msgstr "" + +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:99 +#, python-format +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." +msgstr "" + +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" +msgstr "" + +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:147 +#, python-format +msgid "Creating new volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:162 +#, python-format +msgid "Creating transfer of volume %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:183 +#, python-format +msgid "Accepting volume transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" +msgstr "" + +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:83 +#, python-format +msgid "Valid control location are: %s" +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." +msgstr "" + +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." +msgstr "" + +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." +msgstr "" + +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" +msgstr "" + +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." +msgstr "" + +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:80 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:104 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:110 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." +msgstr "" + +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." +msgstr "" + +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:677 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:682 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:685 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:914 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:987 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." +msgstr "" + +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." +msgstr "" + +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" +msgstr "" + +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" +msgstr "" + +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " +msgstr "" + +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" +msgstr "" + +#: cinder/api/v1/volumes.py:111 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 +#, python-format +msgid "snapshot id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:401 +#, python-format +msgid "source vol id:%s not found" +msgstr "" + +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/v1/volumes.py:496 +#, python-format +msgid "Removing options '%(bad_options)s' from query" +msgstr "" + +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 +#, python-format +msgid "Missing required element '%s' in request body" +msgstr "" + +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" +msgstr "" + +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" +msgstr "" + +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" +msgstr "" + +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" +msgstr "" + +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." +msgstr "" + +#: cinder/api/v2/volumes.py:366 +#, python-format +msgid "source volume id:%s not found" +msgstr "" + +#: cinder/api/v2/volumes.py:472 +#, python-format +msgid "Removing options '%s' from query" +msgstr "" + +#: cinder/backup/api.py:66 +msgid "Backup status must be available or error" +msgstr "" + +#: cinder/backup/api.py:105 +msgid "Volume to be backed up must be available" +msgstr "" + +#: cinder/backup/api.py:140 +msgid "Backup status must be available" +msgstr "" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" +msgstr "" + +#: cinder/backup/api.py:154 +#, python-format +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/api.py:170 +msgid "Volume to be restored to must be available" +msgstr "" + +#: cinder/backup/api.py:176 +#, python-format +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." +msgstr "" + +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" +msgstr "" + +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." +msgstr "" + +#: cinder/backup/manager.py:100 +#, python-format +msgid "Checking hostname '%s' for backend info." +msgstr "" + +#: cinder/backup/manager.py:107 +#, python-format +msgid "Backend not found in hostname (%s) so using default." +msgstr "" + +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." +msgstr "" + +#: cinder/backup/manager.py:123 +#, python-format +msgid "Volume manager for backend '%s' does not exist." +msgstr "" + +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." +msgstr "" + +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." +msgstr "" + +#: cinder/backup/manager.py:154 +#, python-format +msgid "Registering default backend %s." +msgstr "" + +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." +msgstr "" + +#: cinder/backup/manager.py:165 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s." +msgstr "" + +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." +msgstr "" + +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." +msgstr "" + +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." +msgstr "" + +#: cinder/backup/manager.py:206 +#, python-format +msgid "Resetting backup %s to error (was creating)." +msgstr "" + +#: cinder/backup/manager.py:212 +#, python-format +msgid "Resetting backup %s to available (was restoring)." +msgstr "" + +#: cinder/backup/manager.py:217 +#, python-format +msgid "Resuming delete on backup: %s." +msgstr "" + +#: cinder/backup/manager.py:225 +#, python-format +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:237 +#, python-format +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:249 +#, python-format +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." +msgstr "" + +#: cinder/backup/manager.py:286 +#, python-format +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:299 +#, python-format +msgid "" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:310 +#, python-format +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:329 +#, python-format +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:360 +#, python-format +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/backup/manager.py:379 +#, python-format +msgid "Delete backup started, backup: %s." +msgstr "" + +#: cinder/backup/manager.py:386 +#, python-format +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." +msgstr "" + +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." +msgstr "" + +#: cinder/backup/manager.py:422 +#, python-format +msgid "Delete backup finished, backup %s deleted." +msgstr "" + +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" +msgstr "" + +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" +msgstr "" + +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:232 +#, python-format +msgid "writing zeroes chunk %d" +msgstr "" + +#: cinder/backup/drivers/ceph.py:246 +#, python-format +msgid "transferring data between '%(src)s' and '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" +msgstr "" + +#: cinder/backup/drivers/ceph.py:269 +#, python-format +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:279 +#, python-format +msgid "transferring remaining %s bytes" +msgstr "" + +#: cinder/backup/drivers/ceph.py:295 +#, python-format +msgid "creating base image '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 +#, python-format +msgid "deleting backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:369 +#, python-format +msgid "image %s not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:377 +#, python-format +msgid "base image still has %s snapshots so skipping base image delete" +msgstr "" + +#: cinder/backup/drivers/ceph.py:382 +#, python-format +msgid "deleting base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:389 +#, python-format +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" +msgstr "" + +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" +msgstr "" + +#: cinder/backup/drivers/ceph.py:397 +#, python-format +msgid "base backup image='%s' deleted)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:407 +#, python-format +msgid "deleting source snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:453 +#, python-format +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:478 +#, python-format +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" +msgstr "" + +#: cinder/backup/drivers/ceph.py:488 +#, python-format +msgid "image '%s' not found - trying diff format name" +msgstr "" + +#: cinder/backup/drivers/ceph.py:493 +#, python-format +msgid "diff format image '%s' not found" +msgstr "" + +#: cinder/backup/drivers/ceph.py:528 +#, python-format +msgid "using --from-snap '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" +msgstr "" + +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:586 +#, python-format +msgid "differential backup transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:625 +#, python-format +msgid "creating base image='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" +msgstr "" + +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" +msgstr "" + +#: cinder/backup/drivers/ceph.py:704 +#, python-format +msgid "backup '%s' has no snapshot" +msgstr "" + +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:751 +#, python-format +msgid "Starting backup of volume='%s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" +msgstr "" + +#: cinder/backup/drivers/ceph.py:776 +#, python-format +msgid "backup '%s' finished." +msgstr "" + +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" +msgstr "" + +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" +msgstr "" + +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" +msgstr "" + +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" +msgstr "" + +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" +msgstr "" + +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1005 +#, python-format +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." +msgstr "" + +#: cinder/backup/drivers/ceph.py:1023 +#, python-format +msgid "restore finished with error - %s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" +msgstr "" + +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" +msgstr "" + +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:146 +#, python-format +msgid "container %s does not exist" +msgstr "" + +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" +msgstr "" + +#: cinder/backup/drivers/swift.py:157 +#, python-format +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:173 +#, python-format +msgid "_generate_swift_object_name_prefix: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:182 +#, python-format +msgid "generated object list: %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:192 +#, python-format +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:209 +#, python-format +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" +msgstr "" + +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" +msgstr "" + +#: cinder/backup/drivers/swift.py:234 +#, python-format +msgid "volume size %d is invalid." +msgstr "" + +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" +msgstr "" + +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" +msgstr "" + +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" +msgstr "" + +#: cinder/backup/drivers/swift.py:297 +#, python-format +msgid "swift MD5 for %(object_name)s: %(etag)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:301 +#, python-format +msgid "backup MD5 for %(object_name)s: %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" +msgstr "" + +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 +#, python-format +msgid "backup %s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" +msgstr "" + +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" +msgstr "" + +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" +msgstr "" + +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" +msgstr "" + +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" +msgstr "" + +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." +msgstr "" + +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" +msgstr "" + +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" +msgstr "" + +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" +msgstr "" + +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:199 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." +msgstr "" + +#: cinder/backup/drivers/tsm.py:206 +#, python-format +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." +msgstr "" + +#: cinder/backup/drivers/tsm.py:213 +#, python-format +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." +msgstr "" + +#: cinder/backup/drivers/tsm.py:260 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:286 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" + +#: cinder/backup/drivers/tsm.py:298 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:308 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:352 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:362 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:413 +#, python-format +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:421 +#, python-format +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" +msgstr "" + +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." +msgstr "" + +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." +msgstr "" + +#: cinder/brick/exception.py:93 +#, python-format +msgid "Volume device not found at %(device)s." +msgstr "" + +#: cinder/brick/exception.py:97 +#, python-format +msgid "Unable to find Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:101 +#, python-format +msgid "Failed to create Volume Group: %(vg_name)s" +msgstr "" + +#: cinder/brick/exception.py:105 +#, python-format +msgid "Failed to create iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:109 +#, python-format +msgid "Failed to remove iscsi target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:113 +#, python-format +msgid "Failed to attach iSCSI target for volume %(volume_id)s." +msgstr "" + +#: cinder/brick/exception.py:117 +#, python-format +msgid "Connect to volume via protocol %(protocol)s not supported." +msgstr "" + +#: cinder/brick/initiator/connector.py:127 +#, python-format +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:140 +#, python-format +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." +msgstr "" + +#: cinder/brick/initiator/connector.py:229 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:242 +#, python-format +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" +msgstr "" + +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" +msgstr "" + +#: cinder/brick/initiator/connector.py:619 +#, python-format +msgid "Looking for Fibre Channel dev %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." +msgstr "" + +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:776 +#, python-format +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:789 +#, python-format +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" +msgstr "" + +#: cinder/brick/initiator/connector.py:815 +#, python-format +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:825 +#, python-format +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:834 +#, python-format +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" +msgstr "" + +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." +msgstr "" + +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" +msgstr "" + +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:145 +#, python-format +msgid "Couldn't find multipath device %(line)s" +msgstr "" + +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:158 +#, python-format +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:177 +#, python-format +msgid "Creating iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:184 +#, python-format +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:227 +#, python-format +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:258 +#, python-format +msgid "Removing iscsi_target for: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:280 +#, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:375 +#, python-format +msgid "Removing iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:489 +#, python-format +msgid "Creating iscsi_target for volume: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 +#, python-format +msgid "Failed to create iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:532 +#, python-format +msgid "Removing iscsi_target: %s" +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:542 +#, python-format +msgid "Failed to remove iscsi target for volume id:%s." +msgstr "" + +#: cinder/brick/iscsi/iscsi.py:571 +#, python-format +msgid "Failed to add initiator iqn %s to target" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 +#, python-format +msgid "Cmd :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 +#, python-format +msgid "StdOut :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 +#, python-format +msgid "StdErr :%s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:82 +#, python-format +msgid "Unable to locate Volume Group %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:370 +#, python-format +msgid "Unable to find VG: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:489 +#, python-format +msgid "Unable to find LV: %s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:563 +#, python-format +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" +msgstr "" + +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." +msgstr "" + +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" +msgstr "" + +#: cinder/brick/remotefs/remotefs.py:86 +#, python-format +msgid "Already mounted: %s" +msgstr "" + +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." +msgstr "" + +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." +msgstr "" + +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: cinder/compute/nova.py:97 +#, python-format +msgid "Novaclient connection created using URL: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:190 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:843 +#, python-format +msgid "Change will make usage less than 0 for the following resources: %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:1842 +#, python-format +msgid "VolumeType %s deletion failed, VolumeType in use." +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2530 +#, python-format +msgid "No backup with id %s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2615 +msgid "Volume must be available" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2639 +#, python-format +msgid "Volume in unexpected state %s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:2662 +#, python-format +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 +#, python-format +msgid "Table |%s| not created" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 +#, python-format +msgid "Exception while dropping table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 +#, python-format +msgid "Column |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." +msgstr "" + +#: cinder/image/glance.py:161 cinder/image/glance.py:169 +#, python-format +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." +msgstr "" + +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 +#, python-format +msgid "" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." +msgstr "" + +#: cinder/image/image_utils.py:157 +#, python-format +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." +msgstr "" + +#: cinder/image/image_utils.py:178 +#, python-format +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" +msgstr "" + +#: cinder/image/image_utils.py:206 +#, python-format +msgid "fmt=%(fmt)s backed by:%(backing_file)s" +msgstr "" + +#: cinder/image/image_utils.py:224 +#, python-format +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" +msgstr "" + +#: cinder/image/image_utils.py:260 +#, python-format +msgid "Converted to %(f1)s, but format is now %(f2)s" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" +msgstr "" + +#: cinder/keymgr/conf_key_mgr.py:134 +#, python-format +msgid "Not deleting key %s" +msgstr "" + +#: cinder/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: cinder/openstack/common/excutils.py:48 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: cinder/openstack/common/fileutils.py:64 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." +msgstr "" + +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" +msgstr "" + +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: cinder/openstack/common/lockutils.py:189 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:200 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:227 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/lockutils.py:235 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." +msgstr "" + +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: cinder/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: cinder/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: cinder/openstack/common/periodic_task.py:134 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:139 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:177 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/openstack/common/periodic_task.py:186 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/openstack/common/policy.py:149 +#, python-format +msgid "" +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." +msgstr "" + +#: cinder/openstack/common/policy.py:163 +#, python-format +msgid "Failed to understand rule %(match)r" +msgstr "" + +#: cinder/openstack/common/policy.py:173 +#, python-format +msgid "Inheritance-based rules are deprecated; update _check_%s" +msgstr "" + +#: cinder/openstack/common/policy.py:180 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:127 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/openstack/common/processutils.py:142 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:179 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." +msgstr "" + +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" +msgstr "" + +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" +msgstr "" + +#: cinder/openstack/common/service.py:327 +#, python-format +msgid "Started child %d" +msgstr "" + +#: cinder/openstack/common/service.py:337 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: cinder/openstack/common/service.py:354 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: cinder/openstack/common/service.py:358 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: cinder/openstack/common/service.py:392 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: cinder/openstack/common/service.py:410 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: cinder/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:86 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: cinder/openstack/common/strutils.py:182 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: cinder/openstack/common/strutils.py:189 +#, python-format +msgid "Unknown byte multiplier: %s" +msgstr "" + +#: cinder/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: cinder/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:36 +#, python-format +msgid "Block size of %(given)d is too big, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/crypto/utils.py:45 +#, python-format +msgid "Length of %(given)d is too long, max = %(maximum)d" +msgstr "" + +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:538 +#, python-format +msgid "Got mysql server has gone away: %s" +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/session.py:610 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." +msgstr "" + +#: cinder/openstack/common/notifier/api.py:129 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:145 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/openstack/common/notifier/api.py:164 +#, python-format +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." +msgstr "" + +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/openstack/common/rpc/__init__.py:105 +#, python-format +msgid "" +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:208 +#, python-format +msgid "no calling threads waiting for msg_id : %s, message : %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:216 +#, python-format +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:422 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:423 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:597 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:668 +#, python-format +msgid "Sending %(event_type)s on %(topic)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:74 +msgid "An unknown RPC related exception occurred." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:104 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:121 +#, python-format +msgid "" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:151 +#, python-format +msgid "Specified RPC version, %(version)s, not supported by this endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:156 +#, python-format +msgid "" +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." +msgstr "" + +#: cinder/openstack/common/rpc/common.py:280 +#, python-format +msgid "Failed to sanitize %(item)s. Key error %(err)s" +msgstr "" + +#: cinder/openstack/common/rpc/common.py:302 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:477 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:499 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:536 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:552 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:455 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:461 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:101 +#, python-format +msgid "Deserializing: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:136 +#, python-format +msgid "Connecting to %(addr)s with %(type)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:137 +#, python-format +msgid "-> Subscribed to %(subscribe)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:138 +#, python-format +msgid "-> bind: %(bind)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:267 +#, python-format +msgid "Running func with context: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:597 +#, python-format +msgid "Consumer is a zmq.%s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:681 +#, python-format +msgid "Received message: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:721 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:724 +#, python-format +msgid "Sending message(s) to: %s" +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:771 +#, python-format +msgid "topic is %s." +msgstr "" + +#: cinder/openstack/common/rpc/impl_zmq.py:815 +#, python-format +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:239 +#, python-format +msgid "Matchmaker unregistered: %s, %s" +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." +msgstr "" + +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 +#, python-format +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 +#, python-format +msgid "%(host_state)s fails resource_type extra_specs requirements" +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." +msgstr "" + +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 +#, python-format +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" +msgstr "" + +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" +msgstr "" + +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" +msgstr "" + +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:98 +#, python-format +msgid "cannot place volume %(id)s on %(host)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:114 +#, python-format +msgid "No valid hosts for volume %(id)s with type %(type)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:125 +#, python-format +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:174 +#, python-format +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:207 +#, python-format +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:259 +#, python-format +msgid "Filtered %s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:276 +#, python-format +msgid "Choosing %s" +msgstr "" + +#: cinder/scheduler/host_manager.py:264 +#, python-format +msgid "Ignoring %(service_name)s service update from %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:269 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:294 +#, python-format +msgid "volume service is down or disabled. (host: %s)" +msgstr "" + +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." +msgstr "" + +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" +msgstr "" + +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." +msgstr "" + +#: cinder/scheduler/manager.py:174 +#, python-format +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." +msgstr "" + +#: cinder/scheduler/manager.py:192 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:68 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:78 +#, python-format +msgid "Could not decode scheduler options: '%s'" +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." +msgstr "" + +#: cinder/scheduler/filters/capacity_filter.py:57 +#, python-format +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:116 +#, python-format +msgid "Failed to schedule_create_volume: %(cause)s" +msgstr "" + +#: cinder/scheduler/flows/create_volume.py:135 +#, python-format +msgid "Failed notifying on %(topic)s payload %(payload)s" +msgstr "" + +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 +#, python-format +msgid "FAKE ISER: %s" +msgstr "" + +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" +msgstr "" + +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/tests/fake_utils.py:70 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:94 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/test_misc.py:58 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" +msgstr "" + +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:260 +#, python-format +msgid "unrecognized argument %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1507 +#, python-format +msgid "Run CLI command: %s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1510 +#, python-format +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" +msgstr "" + +#: cinder/tests/test_storwize_svc.py:1515 +#, python-format +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/tests/test_volume_types.py:60 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/tests/api/contrib/test_backups.py:741 +msgid "Invalid input" +msgstr "" + +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:100 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:103 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:121 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:148 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:166 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" +msgstr "" + +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" +msgstr "" + +#: cinder/transfer/api.py:119 +#, python-format +msgid "Failed to create transfer record for %s" +msgstr "" + +#: cinder/transfer/api.py:136 +#, python-format +msgid "Attempt to transfer %s with invalid auth key." +msgstr "" + +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/transfer/api.py:182 +#, python-format +msgid "Failed to update quota donating volumetransfer id %s" +msgstr "" + +#: cinder/transfer/api.py:199 +#, python-format +msgid "Volume %s has been transferred." +msgstr "" + +#: cinder/volume/api.py:143 +#, python-format +msgid "Unable to query if %s is in the availability zone set" +msgstr "" + +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" +msgstr "" + +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" +msgstr "" + +#: cinder/volume/api.py:214 +#, python-format +msgid "Volume status must be available or error, but current status is: %s" +msgstr "" + +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" +msgstr "" + +#: cinder/volume/api.py:229 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/volume/api.py:370 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" +msgstr "" + +#: cinder/volume/api.py:388 +msgid "Volume status must be available to reserve" +msgstr "" + +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" +msgstr "" + +#: cinder/volume/api.py:468 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:490 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/api.py:502 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" +msgstr "" + +#: cinder/volume/api.py:553 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" +msgstr "" + +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" +msgstr "" + +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +msgid "Volume status must be available/in-use." +msgstr "" + +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." +msgstr "" + +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." +msgstr "" + +#: cinder/volume/api.py:757 +#, python-format +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" +msgstr "" + +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" +msgstr "" + +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" +msgstr "" + +#: cinder/volume/api.py:797 +#, python-format +msgid "No available service named %s" +msgstr "" + +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" +msgstr "" + +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." +msgstr "" + +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." +msgstr "" + +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." +msgstr "" + +#: cinder/volume/api.py:862 +#, python-format +msgid "Unable to update type due to incorrect status on volume: %s" +msgstr "" + +#: cinder/volume/api.py:868 +#, python-format +msgid "Volume %s is already part of an active migration." +msgstr "" + +#: cinder/volume/api.py:874 +#, python-format +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" +msgstr "" + +#: cinder/volume/api.py:887 +#, python-format +msgid "Invalid volume_type passed: %s" +msgstr "" + +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" +msgstr "" + +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" +msgstr "" + +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" +msgstr "" + +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." +msgstr "" + +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 +#, python-format +msgid "Failed to attach volume %(vol)s" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "Failed to copy volume %(src)s to %(dest)d" +msgstr "" + +#: cinder/volume/driver.py:340 +#, python-format +msgid "copy_image_to_volume %s." +msgstr "" + +#: cinder/volume/driver.py:358 +#, python-format +msgid "copy_volume_to_image %s." +msgstr "" + +#: cinder/volume/driver.py:394 +#, python-format +msgid "Unable to access the backend storage via the path %(path)s." +msgstr "" + +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." +msgstr "" + +#: cinder/volume/driver.py:451 +#, python-format +msgid "Restoring backup %(backup)s to volume %(volume)s." +msgstr "" + +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" +msgstr "" + +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:546 +#, python-format +msgid "ISCSI discovery attempt failed for:%s" +msgstr "" + +#: cinder/volume/driver.py:548 +#, python-format +msgid "Error from iscsiadm -m discovery: %s" +msgstr "" + +#: cinder/volume/driver.py:595 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." +msgstr "" + +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" +msgstr "" + +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" +msgstr "" + +#: cinder/volume/manager.py:203 +#, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 +msgid "" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." +msgstr "" + +#: cinder/volume/manager.py:228 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)" +msgstr "" + +#: cinder/volume/manager.py:235 +#, python-format +msgid "Error encountered during initialization of driver: %(name)s" +msgstr "" + +#: cinder/volume/manager.py:244 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" +msgstr "" + +#: cinder/volume/manager.py:264 +#, python-format +msgid "volume %s stuck in a downloading state" +msgstr "" + +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" +msgstr "" + +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" +msgstr "" + +#: cinder/volume/manager.py:286 +#, python-format +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" +msgstr "" + +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:389 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:394 +#, python-format +msgid "Cannot delete volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" +msgstr "" + +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" +msgstr "" + +#: cinder/volume/manager.py:430 +#, python-format +msgid "no glance metadata found for volume %s" +msgstr "" + +#: cinder/volume/manager.py:434 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:462 +#, python-format +msgid "snapshot %(snap_id)s: creating" +msgstr "" + +#: cinder/volume/manager.py:490 +#, python-format +msgid "" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" +msgstr "" + +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" +msgstr "" + +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" +msgstr "" + +#: cinder/volume/manager.py:583 +msgid "being attached by another host" +msgstr "" + +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" +msgstr "" + +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" +msgstr "" + +#: cinder/volume/manager.py:698 +#, python-format +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" +msgstr "" + +#: cinder/volume/manager.py:760 +#, python-format +msgid "Unable to fetch connection information from backend: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:807 +#, python-format +msgid "Unable to terminate volume connection: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" +msgstr "" + +#: cinder/volume/manager.py:880 +#, python-format +msgid "Failed to copy volume %(vol1)s to %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:909 +#, python-format +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:921 +#, python-format +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" +msgstr "" + +#: cinder/volume/manager.py:940 +#, python-format +msgid "Failed to delete migration source vol %(vol)s: %(err)s" +msgstr "" + +#: cinder/volume/manager.py:976 +#, python-format +msgid "volume %s: calling driver migrate_volume" +msgstr "" + +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" +msgstr "" + +#: cinder/volume/manager.py:1024 +#, python-format +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." +msgstr "" + +#: cinder/volume/manager.py:1044 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/manager.py:1091 +#, python-format +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" +msgstr "" + +#: cinder/volume/manager.py:1103 +#, python-format +msgid "volume %s: extending" +msgstr "" + +#: cinder/volume/manager.py:1105 +#, python-format +msgid "volume %s: extended successfully" +msgstr "" + +#: cinder/volume/manager.py:1107 +#, python-format +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" +msgstr "" + +#: cinder/volume/manager.py:1190 +#, python-format +msgid "Volume %s: retyped succesfully" +msgstr "" + +#: cinder/volume/manager.py:1193 +#, python-format +msgid "" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." +msgstr "" + +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." +msgstr "" + +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." +msgstr "" + +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 +#, python-format +msgid "Failed to get all associations of qos specs %s" +msgstr "" + +#: cinder/volume/qos_specs.py:189 +#, python-format +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:198 +#, python-format +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" +msgstr "" + +#: cinder/volume/qos_specs.py:226 +#, python-format +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/utils.py:144 +#, python-format +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." +msgstr "" + +#: cinder/volume/volume_types.py:130 +#, python-format +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 +#, python-format +msgid "Performing secure delete on volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:287 +#, python-format +msgid "Error unrecognized volume_clear option: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 +#, python-format +msgid "Creating clone of volume: %s" +msgstr "" + +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" +msgstr "" + +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" +msgstr "" + +#: cinder/volume/drivers/coraid.py:84 +#, python-format +msgid "Invalid ESM url scheme \"%s\". Supported https only." +msgstr "" + +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." +msgstr "" + +#: cinder/volume/drivers/coraid.py:134 +#, python-format +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." +msgstr "" + +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." +msgstr "" + +#: cinder/volume/drivers/coraid.py:284 +#, python-format +msgid "Coraid Appliance ping failed: %s" +msgstr "" + +#: cinder/volume/drivers/coraid.py:297 +#, python-format +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:311 +#, python-format +msgid "Volume \"%s\" deleted." +msgstr "" + +#: cinder/volume/drivers/coraid.py:315 +#, python-format +msgid "Resize volume \"%(name)s\" to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" +msgstr "" + +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." +msgstr "" + +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:162 +#, python-format +msgid "Sending CLI command: '%s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, python-format +msgid "Error running SSH command: %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:282 +#, python-format +msgid "Volume %s does not exist, it may have already been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:300 +#, python-format +msgid "EQL-driver: Setup is complete, group IP is %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:329 +#, python-format +msgid "Volume %s was not found while trying to delete it" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:333 +#, python-format +msgid "Failed to delete volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:348 +#, python-format +msgid "Failed to create snapshot of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:361 +#, python-format +msgid "Failed to create volume from snapshot %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:436 +#, python-format +msgid "Volume %s is not found!, it may have been deleted" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:440 +#, python-format +msgid "Failed to ensure export of volume %s" +msgstr "" + +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:86 +#, python-format +msgid "There's no Gluster config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:91 +#, python-format +msgid "Gluster config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:161 +#, python-format +msgid "Cloning volume %(src)s to volume %(dst)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 +#, python-format +msgid "casted to %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:257 +#, python-format +msgid "will copy from snapshot at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:403 +#, python-format +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:431 +#, python-format +msgid "Status of snapshot %(id)s is now %(status)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:444 +#, python-format +msgid "Timed out while waiting for Nova update for creation of snapshot %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:456 +#, python-format +msgid "create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:457 +#, python-format +msgid "volume id: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:562 +#, python-format +msgid "deleting snapshot %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:582 +#, python-format +msgid "" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:587 +#, python-format +msgid "snapshot_file for this snap is %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:608 +#, python-format +msgid "No base file found for %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:625 +#, python-format +msgid "No %(base_id)s found for %(file)s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:727 +#, python-format +msgid "Check condition failed: %s expected to be None." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:815 +#, python-format +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:904 +#, python-format +msgid "%s must be a valid raw or qcow2 image." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 +#, python-format +msgid "Exception during mounting %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:1038 +#, python-format +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:96 +#, python-format +msgid "GPFS is not active. Detailed output: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:97 +#, python-format +msgid "GPFS is not running - state: %s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:160 +#, python-format +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:183 +#, python-format +msgid "%s must be an absolute path." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." +msgstr "" + +#: cinder/volume/drivers/gpfs.py:556 +#, python-format +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:604 +#, python-format +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" +msgstr "" + +#: cinder/volume/drivers/gpfs.py:637 +#, python-format +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." +msgstr "" + +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:215 +#, python-format +msgid "Volume device file path %s does not exist." +msgstr "" + +#: cinder/volume/drivers/lvm.py:221 +#, python-format +msgid "Size for volume: %s not found, cannot secure delete." +msgstr "" + +#: cinder/volume/drivers/lvm.py:262 +#, python-format +msgid "snapshot: %s not found, skipping delete operations" +msgstr "" + +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:462 +#, python-format +msgid "Error creating iSCSI target, retrying creation for target: %s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:482 +#, python-format +msgid "volume_info:%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" +msgstr "" + +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" +msgstr "" + +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" +msgstr "" + +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." +msgstr "" + +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" +msgstr "" + +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" +msgstr "" + +#: cinder/volume/drivers/nfs.py:421 +#, python-format +msgid "There's no NFS config file configured (%s)" +msgstr "" + +#: cinder/volume/drivers/nfs.py:426 +#, python-format +msgid "NFS config file at %(config)s doesn't exist" +msgstr "" + +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" +msgstr "" + +#: cinder/volume/drivers/nfs.py:493 +#, python-format +msgid "Selected %s as target nfs share." +msgstr "" + +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:529 +#, python-format +msgid "%s is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/nfs.py:532 +#, python-format +msgid "%s reserved space is above nfs_oversub_ratio" +msgstr "" + +#: cinder/volume/drivers/rbd.py:160 +#, python-format +msgid "Invalid argument - whence=%s not supported" +msgstr "" + +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" +msgstr "" + +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" +msgstr "" + +#: cinder/volume/drivers/rbd.py:210 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" +msgstr "" + +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" +msgstr "" + +#: cinder/volume/drivers/rbd.py:377 +#, python-format +msgid "clone depth exceeds limit of %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" +msgstr "" + +#: cinder/volume/drivers/rbd.py:423 +#, python-format +msgid "flattening source volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:435 +#, python-format +msgid "creating snapshot='%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:445 +#, python-format +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" +msgstr "" + +#: cinder/volume/drivers/rbd.py:468 +#, python-format +msgid "creating volume '%s'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:484 +#, python-format +msgid "flattening %(pool)s/%(img)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:490 +#, python-format +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" +msgstr "" + +#: cinder/volume/drivers/rbd.py:550 +#, python-format +msgid "volume %s is not a clone" +msgstr "" + +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:593 +#, python-format +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" +msgstr "" + +#: cinder/volume/drivers/rbd.py:625 +#, python-format +msgid "deleting rbd volume %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:629 +msgid "" +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" +msgstr "" + +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" +msgstr "" + +#: cinder/volume/drivers/rbd.py:712 +msgid "Not an rbd snapshot" +msgstr "" + +#: cinder/volume/drivers/rbd.py:724 +#, python-format +msgid "not cloneable: %s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:728 +#, python-format +msgid "%s is in a different ceph cluster" +msgstr "" + +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" +msgstr "" + +#: cinder/volume/drivers/rbd.py:747 +#, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." +msgstr "" + +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 +#, python-format +msgid "Failed to Extend Volume %(volname)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 +#, python-format +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." +msgstr "" + +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" +msgstr "" + +#: cinder/volume/drivers/scality.py:78 +#, python-format +msgid "Cannot access 'scality_sofs_config': %s" +msgstr "" + +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" +msgstr "" + +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" +msgstr "" + +#: cinder/volume/drivers/scality.py:139 +#, python-format +msgid "Cannot find volume dir for Scality SOFS at '%s'" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:59 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:144 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:151 +#, python-format +msgid "" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:154 +#, python-format +msgid "Failed to make httplib connection: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:161 +#, python-format +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:166 +#, python-format +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:177 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:183 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:189 +#, python-format +msgid "Waiting for outstanding operation before retrying snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:195 +#, python-format +msgid "Detected xDBVersionMismatch, retry %s of 5" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 +#, python-format +msgid "API response: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:222 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:253 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:410 +#, python-format +msgid "Failed volume create: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:425 +#, python-format +msgid "More than one valid preset was detected, using %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:460 +#, python-format +msgid "Failed to get SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:469 +#, python-format +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:478 +#, python-format +msgid "Volume %s, not found on SF Cluster." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:481 +#, python-format +msgid "Found %(count)s volumes mapped to id: %(uuid)s." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:554 +#, python-format +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 +#, python-format +msgid "Volume ID %s was not found on the SolidFire Cluster!" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:673 +msgid "Failed to get updated stats" +msgstr "" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." +msgstr "" + +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 +#, python-format +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" +msgstr "" + +#: cinder/volume/drivers/zadara.py:260 +#, python-format +msgid "Operation completed. %(data)s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:357 +#, python-format +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" +msgstr "" + +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" +msgstr "" + +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 +#, python-format +msgid "Volume %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:456 +#, python-format +msgid "Delete snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:464 +#, python-format +msgid "snapshot: original volume %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:472 +#, python-format +msgid "snapshot: snapshot %s not found, skipping delete operation" +msgstr "" + +#: cinder/volume/drivers/zadara.py:483 +#, python-format +msgid "Creating volume from snapshot: %s" +msgstr "" + +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" +msgstr "" + +#: cinder/volume/drivers/zadara.py:614 +#, python-format +msgid "Attach properties: %(properties)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:83 +#, python-format +msgid "Create Volume: %(volume)s Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:91 +#, python-format +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:98 +#, python-format +msgid "" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:107 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:115 +#, python-format +msgid "" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:130 +#, python-format +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:137 +#, python-format +msgid "" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:144 +#, python-format +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:157 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:167 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:177 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:188 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:197 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:218 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:230 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:241 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:257 +#, python-format +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:266 +#, python-format +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:278 +#, python-format +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:292 +#, python-format +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:302 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:312 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:321 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:342 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:354 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:365 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:381 +#, python-format +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:390 +#, python-format +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:402 +#, python-format +msgid "" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:413 +#, python-format +msgid "Delete Volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:420 +#, python-format +msgid "Volume %(name)s not found on the array. No volume to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:430 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:456 +#, python-format +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:465 +#, python-format +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:476 +#, python-format +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:488 +#, python-format +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 +#, python-format +msgid "Cannot find Replication Service to create snapshot for volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:527 +#, python-format +msgid "" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:535 +#, python-format +msgid "" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:545 +#, python-format +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:551 +#, python-format +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:559 +#, python-format +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:574 +#, python-format +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:590 +#, python-format +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:599 +#, python-format +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:611 +#, python-format +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:626 +#, python-format +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:648 +#, python-format +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:663 +#, python-format +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:674 +#, python-format +msgid "Error mapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 +#, python-format +msgid "" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:707 +#, python-format +msgid "Error unmapping volume %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:724 +#, python-format +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:739 +#, python-format +msgid "Error mapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:744 +#, python-format +msgid "AddMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:757 +#, python-format +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:770 +#, python-format +msgid "Error unmapping volume %(vol)s. %(error)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:775 +#, python-format +msgid "RemoveMembers for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:781 +#, python-format +msgid "Map volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 +#, python-format +msgid "Cannot find Controller Configuration Service for storage system %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:804 +#, python-format +msgid "Unmap volume: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:834 +#, python-format +msgid "Initialize connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:840 +#, python-format +msgid "Volume %s is already mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:852 +#, python-format +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:903 +#, python-format +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 +#, python-format +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 +#, python-format +msgid "Found Replication Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:984 +#, python-format +msgid "Found Storage Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:997 +#, python-format +msgid "Found Controller Configuration Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 +#, python-format +msgid "Found Storage Hardware ID Management Service: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 +#, python-format +msgid "Pool %(storage_type)s is not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 +#, python-format +msgid "Storage system not found for pool %(storage_type)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 +#, python-format +msgid "Pool: %(pool)s SystemName: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 +#, python-format +msgid "Pool name: %(poolname)s System name: %(systemname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 +#, python-format +msgid "Volume %(volumename)s not found on the array." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 +#, python-format +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 +#, python-format +msgid "Source: %(volumename)s Target: %(snapshotname)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 +#, python-format +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 +#, python-format +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 +#, python-format +msgid "Error finding %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 +#, python-format +msgid "Found %(name)s: %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 +#, python-format +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 +#, python-format +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 +#, python-format +msgid "" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 +#, python-format +msgid "Available device number on %(storage)s: %(device)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 +#, python-format +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 +#, python-format +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 +#, python-format +msgid "Device info: %(data)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 +#, python-format +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 +#, python-format +msgid "Found Storage Processor System: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 +#, python-format +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 +#, python-format +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 +#, python-format +msgid "Add target WWN: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 +#, python-format +msgid "Target WWNs: %s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 +#, python-format +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, python-format +msgid "Cannot find device number for volume %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 +#, python-format +msgid "Found iSCSI endpoint: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 +#, python-format +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 +#, python-format +msgid "ISCSI properties: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:70 +#, python-format +msgid "Range: start LU: %(start)s, end LU: %(end)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:84 +#, python-format +msgid "setting LU upper (end) limit to %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:178 +#, python-format +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:372 +#, python-format +msgid "LUN %(lun)s extended to %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:395 +#, python-format +msgid "delete lun %(lun)s on %(name)s" +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:503 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." +msgstr "" + +#: cinder/volume/drivers/hds/hds.py:522 +#, python-format +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:64 +#, python-format +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:84 +#, python-format +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 +#, python-format +msgid "initialize_connection: Target FC ports WWNS: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:101 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 +#, python-format +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 +#, python-format +msgid "_get_tgt_iqn: iSCSI IP is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:234 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:248 +#, python-format +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 +#, python-format +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:351 +#, python-format +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:443 +#, python-format +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:578 +#, python-format +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:59 +#, python-format +msgid "HVS Request URL: %(url)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:60 +#, python-format +msgid "HVS Request Data: %(data)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:73 +#, python-format +msgid "HVS Response Data: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:75 +#, python-format +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:102 +#, python-format +msgid "Login error, reason is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format +msgid "" +"%(err)s\n" +"result: %(res)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:280 +#, python-format +msgid "Invalid resource pool: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:298 +#, python-format +msgid "Get pool info error, pool name is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:327 +#, python-format +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:574 +#, python-format +msgid "the fc server properties is:%s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:688 +#, python-format +msgid "JSON transfer data error. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:982 +#, python-format +msgid "" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1056 +#, python-format +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1157 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1162 +#, python-format +msgid "The config parameters are: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 +#, python-format +msgid "_check_conf_file: Config file invalid. %s must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:54 +#, python-format +msgid "ssh_read: Read SSH timeout. %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:169 +#, python-format +msgid "_get_login_info: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:224 +#, python-format +msgid "create_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:242 +#, python-format +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:279 +#, python-format +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 +#, python-format +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:650 +#, python-format +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:688 +#, python-format +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:739 +#, python-format +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:747 +#, python-format +msgid "extend_volume: volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:779 +#, python-format +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:792 +#, python-format +msgid "create_snapshot: Volume %(name)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:855 +#, python-format +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:933 +#, python-format +msgid "map_volume: Volume %s was not found." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1079 +#, python-format +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1106 +#, python-format +msgid "remove_map: Volume %s does not exist." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1119 +#, python-format +msgid "remove_map: No map between host %(host)s and volume %(volume)s." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1138 +#, python-format +msgid "" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1146 +#, python-format +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1389 +#, python-format +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1396 +#, python-format +msgid "create_cloned_volume: %(device)s does not support clone volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1404 +#, python-format +msgid "extend_volume: %(device)s does not support extend volume." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 +#, python-format +msgid "Failed getting details for pool %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 +#, python-format +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 +#, python-format +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 +#, python-format +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 +#, python-format +msgid "initialize_connection: Failed to get attributes for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 +#, python-format +msgid "Did not find expected column name in lsvdisk: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 +#, python-format +msgid "initialize_connection: Missing volume attribute for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 +#, python-format +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 +#, python-format +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 +#, python-format +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 +#, python-format +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 +#, python-format +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 +#, python-format +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 +#, python-format +msgid "enter: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 +#, python-format +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 +#, python-format +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 +#, python-format +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 +#, python-format +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 +#, python-format +msgid "Failed to get code level (%s)." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 +#, python-format +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 +#, python-format +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 +#, python-format +msgid "Failed to find host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 +#, python-format +msgid "enter: get_host_from_connector: %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 +#, python-format +msgid "leave: get_host_from_connector: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 +#, python-format +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 +#, python-format +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 +#, python-format +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 +#, python-format +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 +#, python-format +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 +#, python-format +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 +#, python-format +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 +#, python-format +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 +#, python-format +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 +#, python-format +msgid "enter: create_vdisk: vdisk %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 +#, python-format +msgid "leave: _create_vdisk: volume %s " +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 +#, python-format +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 +#, python-format +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 +#, python-format +msgid "" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 +#, python-format +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 +#, python-format +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 +#, python-format +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 +#, python-format +msgid "enter: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 +#, python-format +msgid "Tried to delete non-existant vdisk %s." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 +#, python-format +msgid "leave: delete_vdisk: vdisk %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 +#, python-format +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 +#, python-format +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 +#, python-format +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 +#, python-format +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 +#, python-format +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 +#, python-format +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:419 +#, python-format +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:103 +#, python-format +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:109 +#, python-format +msgid "Storage family %s is not supported" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:116 +#, python-format +msgid "No default storage protocol found for storage family %(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:123 +#, python-format +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:130 +#, python-format +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:158 +#, python-format +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:69 +#, python-format +msgid "No metadata property %(prop)s defined for the LUN %(name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:105 +#, python-format +msgid "Using NetApp filer: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 +#, python-format +msgid "Created LUN with name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:175 +#, python-format +msgid "No entry in LUN table for volume/snapshot %(name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:227 +#, python-format +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:238 +#, python-format +msgid "Failed to get LUN target details for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:249 +#, python-format +msgid "Failed to get target portal for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:252 +#, python-format +msgid "Failed to get target IQN for the LUN %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:325 +#, python-format +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, python-format +msgid "Error running ssh command: %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" + +#~ msgid "Unexpected error while running command." +#~ msgstr "" + +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" + +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" + +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" + +#~ msgid "Connection to glance failed" +#~ msgstr "" + +#~ msgid "Invalid snapshot" +#~ msgstr "" + +#~ msgid "Invalid input received" +#~ msgstr "" + +#~ msgid "Invalid volume type" +#~ msgstr "" + +#~ msgid "Invalid volume" +#~ msgstr "" + +#~ msgid "Invalid host" +#~ msgstr "" + +#~ msgid "Invalid auth key" +#~ msgstr "" + +#~ msgid "Invalid metadata" +#~ msgstr "" + +#~ msgid "Invalid metadata size" +#~ msgstr "" + +#~ msgid "Migration error" +#~ msgstr "" + +#~ msgid "Quota exceeded" +#~ msgstr "" + +#~ msgid "Connection to swift failed" +#~ msgstr "" + +#~ msgid "Volume migration failed" +#~ msgstr "" + +#~ msgid "SSH command injection detected" +#~ msgstr "" + +#~ msgid "Invalid qos specs" +#~ msgstr "" + +#~ msgid "debug in callback: %s" +#~ msgstr "" + +#~ msgid "Expected object of type: %s" +#~ msgstr "" + +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" + +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" + +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" + +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" + +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" + +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" + +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" + +#~ msgid "pool %s doesn't exist" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" + +#~ msgid "Disk not found: %s" +#~ msgstr "" + +#~ msgid "read timed out" +#~ msgstr "" + +#~ msgid "check_for_setup_error." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" + +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" + +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" + +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "create_export: volume name:%s" +#~ msgstr "" + +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" + +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" + +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" + +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" + +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" + +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" + +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" + +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" + +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" + +#~ msgid "_check_conf_file: %s" +#~ msgstr "" + +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" + +#~ msgid "_get_login_info error. %s" +#~ msgstr "" + +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" + +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" + +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" + +#~ msgid "CLI command:%s" +#~ msgstr "" + +#~ msgid "_execute_cli:%s" +#~ msgstr "" + +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" + +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" +#~ msgstr "" + +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" +#~ msgstr "" + +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." +#~ msgstr "" + +#~ msgid "Ignored target creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored target group member addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LU creation error while ensuring export" +#~ msgstr "" + +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" +#~ msgstr "" + +#~ msgid "Invalid source volume %(reason)s." +#~ msgstr "" + +#~ msgid "The request is invalid." +#~ msgstr "" + +#~ msgid "Volume %(volume_id)s persistence file could not be found." +#~ msgstr "" + +#~ msgid "No disk at %(location)s" +#~ msgstr "" + +#~ msgid "Class %(class_name)s could not be found: %(exception)s" +#~ msgstr "" + +#~ msgid "Action not allowed." +#~ msgstr "" + +#~ msgid "Key pair %(key_name)s already exists." +#~ msgstr "" + +#~ msgid "Migration error: %(reason)s" +#~ msgstr "" + +#~ msgid "Maximum volume/snapshot size exceeded" +#~ msgstr "" + +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" +#~ msgstr "" + +#~ msgid "Backup volume %(volume_id)s type not recognised." +#~ msgstr "" + +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout" +#~ msgstr "" + +#~ msgid "do_setup." +#~ msgstr "" + +#~ msgid "create_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "delete_volume: volume name: %s." +#~ msgstr "" + +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" +#~ msgstr "" + +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" +#~ msgstr "" + +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Updating volume stats" +#~ msgstr "" + +#~ msgid "restore finished." +#~ msgstr "" + +#~ msgid "Error encountered during initialization of driver: %s" +#~ msgstr "" + +#~ msgid "Unabled to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Snapshot file at %s does not exist." +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %s does not exist" +#~ msgstr "" + +#~ msgid "Login to 3PAR array invalid" +#~ msgstr "" + +#~ msgid "There are no datastores present under %s." +#~ msgstr "" + +#~ msgid "Size for volume: %s not found, skipping secure delete." +#~ msgstr "" + +#~ msgid "Could not find attribute for LUN named %s" +#~ msgstr "" + +#~ msgid "Cleaning up incomplete backup operations" +#~ msgstr "" + +#~ msgid "Resetting volume %s to available (was backing-up)" +#~ msgstr "" + +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to error (was creating)" +#~ msgstr "" + +#~ msgid "Resetting backup %s to available (was restoring)" +#~ msgstr "" + +#~ msgid "Resuming delete on backup: %s" +#~ msgstr "" + +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" +#~ msgstr "" + +#~ msgid "create_backup finished. backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup started, backup: %s" +#~ msgstr "" + +#~ msgid "delete_backup finished, backup %s deleted" +#~ msgstr "" + +#~ msgid "JSON transfer Error" +#~ msgstr "" + +#~ msgid "create volume error: %(err)s" +#~ msgstr "" + +#~ msgid "Create snapshot error." +#~ msgstr "" + +#~ msgid "Create luncopy error." +#~ msgstr "" + +#~ msgid "_find_host_lun_id transfer data error! " +#~ msgstr "" + +#~ msgid "ssh_read: Read SSH timeout." +#~ msgstr "" + +#~ msgid "There are no hosts in the inventory." +#~ msgstr "" + +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." +#~ msgstr "" + +#~ msgid "Successfully cloned new backing: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." +#~ msgstr "" + +#~ msgid "Copying backing files from %(src)s to %(dest)s." +#~ msgstr "" + +#~ msgid "Initiated copying of backing via task: %s." +#~ msgstr "" + +#~ msgid "Successfully copied backing to %s." +#~ msgstr "" + +#~ msgid "Registering backing at path: %s to inventory." +#~ msgstr "" + +#~ msgid "Initiated registring backing, task: %s." +#~ msgstr "" + +#~ msgid "Successfully registered backing: %s." +#~ msgstr "" + +#~ msgid "Reverting backing to snapshot: %s." +#~ msgstr "" + +#~ msgid "Initiated reverting snapshot via task: %s." +#~ msgstr "" + +#~ msgid "Successfully reverted to snapshot: %s." +#~ msgstr "" + +#~ msgid "Successfully copied disk data to: %s." +#~ msgstr "" + +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." +#~ msgstr "" + +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" +#~ msgstr "" + +#~ msgid "Deploy v1 of the Cinder API. " +#~ msgstr "" + +#~ msgid "Deploy v2 of the Cinder API. " +#~ msgstr "" + +#~ msgid "_read_xml:%s" +#~ msgstr "" + +#~ msgid "request ip info is %s." +#~ msgstr "" + +#~ msgid "new str info is %s." +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "rtstool is not installed correctly" +#~ msgstr "" + +#~ msgid "Creating iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Removing iser_target for: %s" +#~ msgstr "" + +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" +#~ msgstr "" + +#~ msgid "Volume %s does not exist, it seems it was already deleted" +#~ msgstr "" + +#~ msgid "Executing zfs send/recv on the appliance" +#~ msgstr "" + +#~ msgid "zfs send/recv done, new volume %s created" +#~ msgstr "" + +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" +#~ msgstr "" + +#~ msgid "rbd export-diff failed - %s" +#~ msgstr "" + +#~ msgid "rbd import-diff failed - %s" +#~ msgstr "" + +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." +#~ msgstr "" + +#~ msgid "Folder %s does not exist, it seems it was already deleted." +#~ msgstr "" + +#~ msgid "No 'os-update_readonly_flag' was specified in request." +#~ msgstr "" + +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." +#~ msgstr "" + +#~ msgid "ISER provider_location not stored, using discovery" +#~ msgstr "" + +#~ msgid "Could not find iSER export for volume %s" +#~ msgstr "" + +#~ msgid "ISER Discovery: Found %s" +#~ msgstr "" + +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." +#~ msgstr "" + +#~ msgid "iSER device not found at %s" +#~ msgstr "" + +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." +#~ msgstr "" + +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" +#~ msgstr "" + +#~ msgid "Downloading image: %s from glance image server." +#~ msgstr "" + +#~ msgid "Uploading image: %s to the Glance image server." +#~ msgstr "" + +#~ msgid "Invalid request body" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: prefix %s" +#~ msgstr "" + +#~ msgid "Schedule volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete schedule volume using flow: %s" +#~ msgstr "" + +#~ msgid "Create volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete create volume workflow" +#~ msgstr "" + +#~ msgid "Expected volume result not found" +#~ msgstr "" + +#~ msgid "Manager volume flow not retrieved" +#~ msgstr "" + +#~ msgid "Failed to successfully complete manager volume workflow" +#~ msgstr "" + +#~ msgid "Unable to update stats, driver is uninitialized" +#~ msgstr "" + +#~ msgid "Bad reponse from server: %s" +#~ msgstr "" + +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" +#~ msgstr "" + +#~ msgid "No request spec, will not reschedule" +#~ msgstr "" + +#~ msgid "No retry filter property or associated retry info, will not reschedule" +#~ msgstr "" + +#~ msgid "Retry info not present, will not reschedule" +#~ msgstr "" + +#~ msgid "Clear capabilities" +#~ msgstr "" + +#~ msgid "This usually means the volume was never succesfully created." +#~ msgstr "" + +#~ msgid "setting LU uppper (end) limit to %s" +#~ msgstr "" + +#~ msgid "Can't find lun or lun goup in array" +#~ msgstr "" + +#~ msgid "Volume to be restored to is smaller than the backup to be restored" +#~ msgstr "" + +#~ msgid "Volume driver '%(driver)s' not initialized." +#~ msgstr "" + +#~ msgid "in looping call" +#~ msgstr "" + +#~ msgid "Is the appropriate service running?" +#~ msgstr "" + +#~ msgid "Could not find another host" +#~ msgstr "" + +#~ msgid "Not enough allocatable volume gigabytes remaining" +#~ msgstr "" + +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" +#~ msgstr "" + +#~ msgid "do_setup: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "migrate_volume started with more than one vdisk copy" +#~ msgstr "" + +#~ msgid "migrate_volume: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "Selected datastore: %s for the volume." +#~ msgstr "" + +#~ msgid "There are no valid datastores present under %s." +#~ msgstr "" + +#~ msgid "Unable to create volume, driver not initialized" +#~ msgstr "" + +#~ msgid "Migration %(migration_id)s could not be found." +#~ msgstr "" + +#~ msgid "Bad driver response status: %(status)s" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s could not be found." +#~ msgstr "" + +#~ msgid "Volume retype failed: %(reason)s" +#~ msgstr "" + +#~ msgid "SIGTERM received" +#~ msgstr "" + +#~ msgid "Child %(pid)d exited with status %(code)d" +#~ msgstr "" + +#~ msgid "_wait_child %d" +#~ msgstr "" + +#~ msgid "wait wrap.failed %s" +#~ msgstr "" + +#~ msgid "" +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target for volume %(name)s." +#~ msgstr "" + +#~ msgid "Updating iscsi target: %s" +#~ msgstr "" + +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" +#~ msgstr "" + +#~ msgid "Caught '%(exception)s' exception." +#~ msgstr "" + +#~ msgid "Get code level failed" +#~ msgstr "" + +#~ msgid "do_setup: Could not get system name" +#~ msgstr "" + +#~ msgid "Failed to get license information." +#~ msgstr "" + +#~ msgid "do_setup: No configured nodes" +#~ msgstr "" + +#~ msgid "enter: _get_chap_secret_for_host: host name %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" +#~ msgstr "" + +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" +#~ msgstr "" + +#~ msgid "enter: _get_host_from_connector: %s" +#~ msgstr "" + +#~ msgid "leave: _get_host_from_connector: host %s" +#~ msgstr "" + +#~ msgid "enter: _create_host: host %s" +#~ msgstr "" + +#~ msgid "_create_host: No connector ports" +#~ msgstr "" + +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" +#~ msgstr "" + +#~ msgid "" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" +#~ msgstr "" + +#~ msgid "volume %s mapping to multi host" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" +#~ msgstr "" + +#~ msgid "enter: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "leave: _delete_host: host %s " +#~ msgstr "" + +#~ msgid "_create_host failed to return the host name." +#~ msgstr "" + +#~ msgid "_get_host_from_connector failed to return the host name for connector" +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." +#~ msgstr "" + +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" +#~ msgstr "" + +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " +#~ msgstr "" + +#~ msgid "" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." +#~ msgstr "" + +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" +#~ msgstr "" + +#~ msgid "_prepare_fc_map: %s" +#~ msgstr "" + +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" +#~ msgstr "" + +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" +#~ msgstr "" + +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" +#~ msgstr "" + +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" +#~ msgstr "" + +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" +#~ msgstr "" + +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" +#~ msgstr "" + +#~ msgid "enter: _is_vdisk_defined: vdisk %s " +#~ msgstr "" + +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " +#~ msgstr "" + +#~ msgid "enter: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." +#~ msgstr "" + +#~ msgid "leave: _delete_vdisk: vdisk %s" +#~ msgstr "" + +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" +#~ msgstr "" + +#~ msgid "_get_pool_attrs: Pool %s does not exist" +#~ msgstr "" + +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" +#~ msgstr "" + +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" +#~ msgstr "" + +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" +#~ msgstr "" + +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" +#~ msgstr "" + +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" +#~ msgstr "" + +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." +#~ msgstr "" + +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." +#~ msgstr "" + +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." +#~ msgstr "" + +#~ msgid "Updating volume %(volume_id)s with %(update)s" +#~ msgstr "" + +#~ msgid "Volume %s: resetting 'creating' status failed" +#~ msgstr "" + +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" +#~ msgstr "" + +#~ msgid "Marking volume %s as bootable" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" +#~ msgstr "" + +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" +#~ msgstr "" + +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" +#~ msgstr "" + +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" +#~ msgstr "" + diff --git a/cinder/locale/zh_TW/LC_MESSAGES/cinder.po b/cinder/locale/zh_TW/LC_MESSAGES/cinder.po index 2a93395a27..407d1fdee8 100644 --- a/cinder/locale/zh_TW/LC_MESSAGES/cinder.po +++ b/cinder/locale/zh_TW/LC_MESSAGES/cinder.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: cinder\n" "Report-Msgid-Bugs-To: FULL NAME \n" -"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"POT-Creation-Date: 2014-02-03 06:16+0000\n" "PO-Revision-Date: 2012-03-07 02:00+0000\n" "Last-Translator: Charles Hsu \n" "Language-Team: Chinese (Traditional) \n" @@ -15,8193 +15,10740 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.6\n" +"Generated-By: Babel 1.3\n" -#: cinder/context.py:59 +#: cinder/context.py:61 #, python-format msgid "Arguments dropped when creating context: %s" msgstr "" -#: cinder/context.py:90 +#: cinder/context.py:102 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: cinder/crypto.py:48 -msgid "Filename of root CA" -msgstr "" - -#: cinder/crypto.py:51 -msgid "Filename of private key" -msgstr "Private key 私鑰檔案名稱" +#: cinder/exception.py:66 cinder/brick/exception.py:33 +msgid "An unknown exception occurred." +msgstr "發生一個未知例外" -#: cinder/crypto.py:54 -msgid "Filename of root Certificate Revocation List" +#: cinder/exception.py:88 cinder/openstack/common/rpc/common.py:86 +msgid "Exception in string format operation" msgstr "" -#: cinder/crypto.py:57 -msgid "Where we keep our keys" -msgstr "我們的keys留存於何處" - -#: cinder/crypto.py:60 -msgid "Where we keep our root CA" -msgstr "我們的根憑證留存於何處" - -#: cinder/crypto.py:63 -msgid "Should we use a CA for each project?" -msgstr "是否需要一個project 使用一個CA" - -#: cinder/crypto.py:67 +#: cinder/exception.py:107 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +msgid "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" -#: cinder/crypto.py:72 +#: cinder/exception.py:112 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" +msgid "Volume driver reported an error: %(message)s" msgstr "" -#: cinder/crypto.py:292 +#: cinder/exception.py:116 #, python-format -msgid "Flags path: %s" +msgid "Backup driver reported an error: %(message)s" msgstr "" -#: cinder/exception.py:56 -msgid "Unexpected error while running command." -msgstr "非預期的執行錯誤" - -#: cinder/exception.py:59 +#: cinder/exception.py:120 #, python-format -msgid "" -"%(description)s\n" -"Command: %(cmd)s\n" -"Exit code: %(exit_code)s\n" -"Stdout: %(stdout)r\n" -"Stderr: %(stderr)r" -msgstr "" -"%(description)s\n" -"命令: %(cmd)s\n" -"退出代碼: %(exit_code)s\n" -"標準輸出: %(stdout)r\n" -"標準錯誤輸出: %(stderr)r" - -#: cinder/exception.py:94 -msgid "DB exception wrapped." -msgstr "" - -#: cinder/exception.py:155 -msgid "An unknown exception occurred." -msgstr "發生一個未知例外" - -#: cinder/exception.py:178 -msgid "Failed to decrypt text" -msgstr "內文解碼失敗" - -#: cinder/exception.py:182 -msgid "Failed to paginate through images from image service" -msgstr "" - -#: cinder/exception.py:186 -msgid "Virtual Interface creation failed" -msgstr "建立虛擬介面失敗" - -#: cinder/exception.py:190 -msgid "5 attempts to create virtual interfacewith unique mac address failed" -msgstr "連續嘗試 5 次建立唯一網路位置(MAC)的虛擬介面失敗" - -#: cinder/exception.py:195 -msgid "Connection to glance failed" -msgstr "連接到glance失敗" - -#: cinder/exception.py:199 -msgid "Connection to melange failed" +msgid "Connection to glance failed: %(reason)s" msgstr "" -#: cinder/exception.py:203 +#: cinder/exception.py:124 msgid "Not authorized." msgstr "未被授權" -#: cinder/exception.py:208 +#: cinder/exception.py:129 msgid "User does not have admin privileges" msgstr "使用者並沒有管理者權力" -#: cinder/exception.py:212 +#: cinder/exception.py:133 #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "" -#: cinder/exception.py:216 +#: cinder/exception.py:137 #, python-format msgid "Not authorized for image %(image_id)s." msgstr "" -#: cinder/exception.py:220 +#: cinder/exception.py:141 +msgid "Volume driver not ready." +msgstr "" + +#: cinder/exception.py:145 cinder/brick/exception.py:74 msgid "Unacceptable parameters." msgstr "不可接受的參數值" -#: cinder/exception.py:225 -msgid "Invalid snapshot" -msgstr "無效的快照(snapshot)" - -#: cinder/exception.py:229 +#: cinder/exception.py:150 #, python-format -msgid "Volume %(volume_id)s is not attached to anything" -msgstr "" - -#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 -msgid "Keypair data is invalid" -msgstr "無效的Keypair" - -#: cinder/exception.py:237 -msgid "Failed to load data into json format" -msgstr "" - -#: cinder/exception.py:241 -msgid "The request is invalid." +msgid "Invalid snapshot: %(reason)s" msgstr "" -#: cinder/exception.py:245 +#: cinder/exception.py:154 #, python-format -msgid "Invalid signature %(signature)s for user %(user)s." -msgstr "" - -#: cinder/exception.py:249 -msgid "Invalid input received" +msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:253 +#: cinder/exception.py:159 #, python-format -msgid "Invalid instance type %(instance_type)s." +msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" -#: cinder/exception.py:257 -msgid "Invalid volume type" +#: cinder/exception.py:163 +msgid "Failed to load data into json format" msgstr "" -#: cinder/exception.py:261 -msgid "Invalid volume" +#: cinder/exception.py:167 +msgid "The results are invalid." msgstr "" -#: cinder/exception.py:265 +#: cinder/exception.py:171 #, python-format -msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgid "Invalid input received: %(reason)s" msgstr "" -#: cinder/exception.py:269 +#: cinder/exception.py:175 #, python-format -msgid "Invalid IP protocol %(protocol)s." +msgid "Invalid volume type: %(reason)s" msgstr "" -#: cinder/exception.py:273 +#: cinder/exception.py:179 #, python-format -msgid "Invalid content type %(content_type)s." +msgid "Invalid volume: %(reason)s" msgstr "" -#: cinder/exception.py:277 +#: cinder/exception.py:183 #, python-format -msgid "Invalid cidr %(cidr)s." -msgstr "" - -#: cinder/exception.py:281 -msgid "Invalid reuse of an RPC connection." +msgid "Invalid content type %(content_type)s." msgstr "" -#: cinder/exception.py:285 -msgid "Invalid Parameter: Unicode is not supported by the current database." +#: cinder/exception.py:187 +#, python-format +msgid "Invalid host: %(reason)s" msgstr "" -#: cinder/exception.py:292 +#: cinder/exception.py:193 cinder/brick/exception.py:81 #, python-format msgid "%(err)s" msgstr "" -#: cinder/exception.py:296 +#: cinder/exception.py:197 #, python-format -msgid "" -"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" -" %(reason)s." +msgid "Invalid auth key: %(reason)s" msgstr "" -#: cinder/exception.py:301 +#: cinder/exception.py:201 #, python-format -msgid "Group not valid. Reason: %(reason)s" +msgid "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "" -#: cinder/exception.py:305 -#, python-format -msgid "" -"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" -" the instance is in this state." +#: cinder/exception.py:206 +msgid "Service is unavailable at this time." msgstr "" -#: cinder/exception.py:310 +#: cinder/exception.py:210 #, python-format -msgid "Instance %(instance_id)s is not running." +msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: cinder/exception.py:314 +#: cinder/exception.py:214 #, python-format -msgid "Instance %(instance_id)s is not suspended." +msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "" -#: cinder/exception.py:318 +#: cinder/exception.py:218 #, python-format -msgid "Instance %(instance_id)s is not in rescue mode" +msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: cinder/exception.py:322 -msgid "Failed to suspend instance" +#: cinder/exception.py:222 cinder/brick/exception.py:68 +msgid "Resource could not be found." msgstr "" -#: cinder/exception.py:326 -msgid "Failed to resume server" +#: cinder/exception.py:228 +#, python-format +msgid "Volume %(volume_id)s could not be found." msgstr "" -#: cinder/exception.py:330 -msgid "Failed to reboot instance" +#: cinder/exception.py:232 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:334 -msgid "Failed to terminate instance" +#: cinder/exception.py:237 +#, python-format +msgid "" +"Volume %(volume_id)s has no administration metadata with key " +"%(metadata_key)s." msgstr "" -#: cinder/exception.py:338 -msgid "Service is unavailable at this time." +#: cinder/exception.py:242 +#, python-format +msgid "Invalid metadata: %(reason)s" msgstr "" -#: cinder/exception.py:342 -msgid "Volume service is unavailable at this time." +#: cinder/exception.py:246 +#, python-format +msgid "Invalid metadata size: %(reason)s" msgstr "" -#: cinder/exception.py:346 -msgid "Compute service is unavailable at this time." +#: cinder/exception.py:250 +#, python-format +msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: cinder/exception.py:350 +#: cinder/exception.py:255 #, python-format -msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgid "Volume type %(volume_type_id)s could not be found." msgstr "" -#: cinder/exception.py:355 -msgid "Destination compute host is unavailable at this time." +#: cinder/exception.py:259 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "" -#: cinder/exception.py:359 -msgid "Original compute host is unavailable at this time." +#: cinder/exception.py:264 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." msgstr "" -#: cinder/exception.py:363 -msgid "The supplied hypervisor type of is invalid." +#: cinder/exception.py:269 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s deletion is not allowed with volumes " +"present with the type." msgstr "" -#: cinder/exception.py:367 -msgid "The instance requires a newer hypervisor version than has been provided." +#: cinder/exception.py:274 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: cinder/exception.py:372 +#: cinder/exception.py:278 #, python-format -msgid "" -"The supplied disk path (%(path)s) already exists, it is expected not to " -"exist." +msgid "deleting volume %(volume_name)s that has snapshot" msgstr "" -#: cinder/exception.py:377 +#: cinder/exception.py:282 #, python-format -msgid "The supplied device path (%(path)s) is invalid." +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "" -#: cinder/exception.py:381 +#: cinder/exception.py:287 #, python-format -msgid "The supplied device (%(device)s) is busy." +msgid "No target id found for volume %(volume_id)s." msgstr "" -#: cinder/exception.py:385 -msgid "Unacceptable CPU info" +#: cinder/exception.py:291 +#, python-format +msgid "Invalid image href %(image_href)s." msgstr "" -#: cinder/exception.py:389 +#: cinder/exception.py:295 #, python-format -msgid "%(address)s is not a valid IP v4/6 address." +msgid "Image %(image_id)s could not be found." msgstr "" -#: cinder/exception.py:393 +#: cinder/exception.py:299 #, python-format -msgid "" -"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " -"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgid "Service %(service_id)s could not be found." msgstr "" -#: cinder/exception.py:399 +#: cinder/exception.py:303 #, python-format -msgid "" -"vSwitch which contains the port group %(bridge)s is not associated with " -"the desired physical adapter. Expected vSwitch is %(expected)s, but the " -"one associated is %(actual)s." +msgid "Host %(host)s could not be found." msgstr "" -#: cinder/exception.py:406 +#: cinder/exception.py:307 #, python-format -msgid "Disk format %(disk_format)s is not acceptable" +msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: cinder/exception.py:410 +#: cinder/exception.py:311 #, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "" -#: cinder/exception.py:414 +#: cinder/exception.py:315 #, python-format -msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: cinder/exception.py:418 +#: cinder/exception.py:319 #, python-format -msgid "Ec2 id %(ec2_id)s is unacceptable." +msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: cinder/exception.py:422 -msgid "Resource could not be found." +#: cinder/exception.py:323 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" msgstr "" -#: cinder/exception.py:427 -#, python-format -msgid "Required flag %(flag)s not set." +#: cinder/exception.py:328 +msgid "Quota could not be found" msgstr "" -#: cinder/exception.py:431 +#: cinder/exception.py:332 #, python-format -msgid "Volume %(volume_id)s could not be found." +msgid "Unknown quota resources %(unknown)s." msgstr "" -#: cinder/exception.py:435 +#: cinder/exception.py:336 #, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" +msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:440 +#: cinder/exception.py:340 #, python-format -msgid "Volume not found for instance %(instance_id)s." +msgid "Quota class %(class_name)s could not be found." msgstr "" -#: cinder/exception.py:444 +#: cinder/exception.py:344 #, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: cinder/exception.py:449 -msgid "Zero volume types found." +#: cinder/exception.py:348 +#, python-format +msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: cinder/exception.py:453 +#: cinder/exception.py:352 #, python-format -msgid "Volume type %(volume_type_id)s could not be found." +msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: cinder/exception.py:457 +#: cinder/exception.py:356 #, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." +msgid "File %(file_path)s could not be found." msgstr "" -#: cinder/exception.py:462 +#: cinder/exception.py:365 #, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +msgid "Volume Type %(id)s already exists." msgstr "" -#: cinder/exception.py:467 +#: cinder/exception.py:369 #, python-format -msgid "Snapshot %(snapshot_id)s could not be found." +msgid "Volume type encryption for type %(type_id)s already exists." msgstr "" -#: cinder/exception.py:471 +#: cinder/exception.py:373 #, python-format -msgid "deleting volume %(volume_name)s that has snapshot" +msgid "Malformed message body: %(reason)s" msgstr "" -#: cinder/exception.py:475 +#: cinder/exception.py:377 #, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgid "Could not find config at %(path)s" msgstr "" -#: cinder/exception.py:480 +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "Could not find parameter %(param)s" +msgstr "找不到Volume %s" + +#: cinder/exception.py:385 #, python-format -msgid "No target id found for volume %(volume_id)s." +msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -#: cinder/exception.py:484 +#: cinder/exception.py:389 #, python-format -msgid "No disk at %(location)s" +msgid "No valid host was found. %(reason)s" msgstr "" -#: cinder/exception.py:488 +#: cinder/exception.py:398 #, python-format -msgid "Could not find a handler for %(driver_type)s volume." +msgid "Host %(host)s is not up or doesn't exist." msgstr "" -#: cinder/exception.py:492 +#: cinder/exception.py:402 #, python-format -msgid "Invalid image href %(image_href)s." +msgid "Quota exceeded: code=%(code)s" msgstr "" -#: cinder/exception.py:496 +#: cinder/exception.py:409 +#, python-format msgid "" -"Some images have been stored via hrefs. This version of the api does not " -"support displaying image hrefs." +"Requested volume or snapshot exceeds allowed Gigabytes quota. Requested " +"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" -#: cinder/exception.py:501 +#: cinder/exception.py:415 #, python-format -msgid "Image %(image_id)s could not be found." +msgid "Maximum number of volumes allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:505 +#: cinder/exception.py:419 #, python-format -msgid "Kernel not found for image %(image_id)s." +msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "" -#: cinder/exception.py:509 +#: cinder/exception.py:423 #, python-format -msgid "User %(user_id)s could not be found." +msgid "Detected more than one volume with name %(vol_name)s" msgstr "" -#: cinder/exception.py:513 +#: cinder/exception.py:427 #, python-format -msgid "Project %(project_id)s could not be found." +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" -#: cinder/exception.py:517 +#: cinder/exception.py:432 #, python-format -msgid "User %(user_id)s is not a member of project %(project_id)s." +msgid "Unknown or unsupported command %(cmd)s" msgstr "" -#: cinder/exception.py:521 +#: cinder/exception.py:436 #, python-format -msgid "Role %(role_id)s could not be found." +msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "" -#: cinder/exception.py:525 -msgid "Cannot find SR to read/write VDI." +#: cinder/exception.py:440 +#, python-format +msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "" -#: cinder/exception.py:529 +#: cinder/exception.py:444 #, python-format -msgid "%(req)s is required to create a network." +msgid "" +"Glance metadata cannot be updated, key %(key)s exists for volume id " +"%(volume_id)s" msgstr "" -#: cinder/exception.py:533 +#: cinder/exception.py:449 #, python-format -msgid "Network %(network_id)s could not be found." +msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" -#: cinder/exception.py:537 +#: cinder/exception.py:453 #, python-format -msgid "Network could not be found for bridge %(bridge)s" +msgid "Failed to export for volume: %(reason)s" msgstr "" -#: cinder/exception.py:541 +#: cinder/exception.py:457 #, python-format -msgid "Network could not be found for uuid %(uuid)s" +msgid "Failed to create metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:545 +#: cinder/exception.py:461 #, python-format -msgid "Network could not be found with cidr %(cidr)s." +msgid "Failed to update metadata for volume: %(reason)s" msgstr "" -#: cinder/exception.py:549 +#: cinder/exception.py:465 #, python-format -msgid "Network could not be found for instance %(instance_id)s." +msgid "Failed to copy metadata to volume: %(reason)s" msgstr "" -#: cinder/exception.py:553 -msgid "No networks defined." +#: cinder/exception.py:469 +#, fuzzy, python-format +msgid "Failed to copy image to volume: %(reason)s" +msgstr "找不到Volume %s" + +#: cinder/exception.py:473 +msgid "Invalid Ceph args provided for backup rbd operation" msgstr "" -#: cinder/exception.py:557 -#, python-format -msgid "" -"Either Network uuid %(network_uuid)s is not present or is not assigned to" -" the project %(project_id)s." +#: cinder/exception.py:477 +msgid "An error has occurred during backup operation" msgstr "" -#: cinder/exception.py:562 -#, python-format -msgid "Host is not set to the network (%(network_id)s)." +#: cinder/exception.py:481 +msgid "Backup RBD operation failed" msgstr "" -#: cinder/exception.py:566 +#: cinder/exception.py:485 #, python-format -msgid "Network %(network)s has active ports, cannot delete." +msgid "Backup %(backup_id)s could not be found." msgstr "" -#: cinder/exception.py:570 -msgid "Could not find the datastore reference(s) which the VM uses." +#: cinder/exception.py:489 +msgid "Failed to identify volume backend." msgstr "" -#: cinder/exception.py:574 +#: cinder/exception.py:493 #, python-format -msgid "No fixed IP associated with id %(id)s." +msgid "Invalid backup: %(reason)s" msgstr "" -#: cinder/exception.py:578 +#: cinder/exception.py:497 #, python-format -msgid "Fixed ip not found for address %(address)s." +msgid "Connection to swift failed: %(reason)s" msgstr "" -#: cinder/exception.py:582 +#: cinder/exception.py:501 #, python-format -msgid "Instance %(instance_id)s has zero fixed ips." +msgid "Transfer %(transfer_id)s could not be found." msgstr "" -#: cinder/exception.py:586 +#: cinder/exception.py:505 #, python-format -msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgid "Volume migration failed: %(reason)s" msgstr "" -#: cinder/exception.py:591 +#: cinder/exception.py:509 #, python-format -msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgid "SSH command injection detected: %(command)s" msgstr "" -#: cinder/exception.py:595 +#: cinder/exception.py:513 #, python-format -msgid "Host %(host)s has zero fixed ips." +msgid "QoS Specs %(specs_id)s already exists." msgstr "" -#: cinder/exception.py:599 +#: cinder/exception.py:517 #, python-format -msgid "" -"Fixed IP address (%(address)s) does not exist in network " -"(%(network_uuid)s)." +msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:604 +#: cinder/exception.py:522 #, python-format -msgid "Fixed IP address %(address)s is already in use." +msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" -#: cinder/exception.py:608 +#: cinder/exception.py:527 #, python-format -msgid "Fixed IP address %(address)s is invalid." +msgid "No such QoS spec %(specs_id)s." msgstr "" -#: cinder/exception.py:612 -msgid "Zero fixed ips available." +#: cinder/exception.py:531 +#, python-format +msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:616 -msgid "Zero fixed ips could be found." +#: cinder/exception.py:536 +#, python-format +msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" -#: cinder/exception.py:620 +#: cinder/exception.py:541 #, python-format -msgid "Floating ip not found for id %(id)s." +msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" -#: cinder/exception.py:624 +#: cinder/exception.py:546 #, python-format -msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgid "Invalid qos specs: %(reason)s" msgstr "" -#: cinder/exception.py:628 +#: cinder/exception.py:550 #, python-format -msgid "Floating ip not found for address %(address)s." +msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "" -#: cinder/exception.py:632 +#: cinder/exception.py:554 #, python-format -msgid "Floating ip not found for host %(host)s." +msgid "key manager error: %(reason)s" msgstr "" -#: cinder/exception.py:636 -msgid "Zero floating ips available." +#: cinder/exception.py:560 +msgid "Coraid Cinder Driver exception." msgstr "" -#: cinder/exception.py:640 -#, python-format -msgid "Floating ip %(address)s is associated." +#: cinder/exception.py:564 +msgid "Failed to encode json data." msgstr "" -#: cinder/exception.py:644 -#, python-format -msgid "Floating ip %(address)s is not associated." +#: cinder/exception.py:568 +msgid "Login on ESM failed." msgstr "" -#: cinder/exception.py:648 -msgid "Zero floating ips exist." +#: cinder/exception.py:572 +msgid "Relogin on ESM failed." msgstr "" -#: cinder/exception.py:652 +#: cinder/exception.py:576 #, python-format -msgid "Interface %(interface)s not found." +msgid "Group with name \"%(group_name)s\" not found." msgstr "" -#: cinder/exception.py:656 +#: cinder/exception.py:580 #, python-format -msgid "Keypair %(name)s not found for user %(user_id)s" +msgid "ESM configure request failed: %(message)s." msgstr "" -#: cinder/exception.py:660 +#: cinder/exception.py:584 #, python-format -msgid "Certificate %(certificate_id)s not found." +msgid "Coraid ESM not available with reason: %(reason)s." msgstr "" -#: cinder/exception.py:664 -#, python-format -msgid "Service %(service_id)s could not be found." +#: cinder/exception.py:589 +msgid "Zadara Cinder Driver exception." msgstr "" -#: cinder/exception.py:668 +#: cinder/exception.py:593 +#, fuzzy, python-format +msgid "Unable to create server object for initiator %(name)s" +msgstr "無法替 instance實例 %(instance_name)s , 建立 VDI 在SR %(sr_ref)s" + +#: cinder/exception.py:597 #, python-format -msgid "Host %(host)s could not be found." +msgid "Unable to find server object for initiator %(name)s" msgstr "" -#: cinder/exception.py:672 -#, python-format -msgid "Compute host %(host)s could not be found." +#: cinder/exception.py:601 +msgid "Unable to find any active VPSA controller" msgstr "" -#: cinder/exception.py:676 +#: cinder/exception.py:605 #, python-format -msgid "Could not find binary %(binary)s on host %(host)s." +msgid "Failed to retrieve attachments for volume %(name)s" msgstr "" -#: cinder/exception.py:680 +#: cinder/exception.py:609 #, python-format -msgid "Auth token %(token)s could not be found." +msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "" -#: cinder/exception.py:684 +#: cinder/exception.py:613 #, python-format -msgid "Access Key %(access_key)s could not be found." +msgid "Bad HTTP response status %(status)s" msgstr "" -#: cinder/exception.py:688 -msgid "Quota could not be found" +#: cinder/exception.py:618 +msgid "Bad response from SolidFire API" msgstr "" -#: cinder/exception.py:692 -#, python-format -msgid "Quota for project %(project_id)s could not be found." +#: cinder/exception.py:622 +msgid "SolidFire Cinder Driver exception" msgstr "" -#: cinder/exception.py:696 +#: cinder/exception.py:626 #, python-format -msgid "Quota class %(class_name)s could not be found." +msgid "Error in SolidFire API response: data=%(data)s" msgstr "" -#: cinder/exception.py:700 +#: cinder/exception.py:630 #, python-format -msgid "Security group %(security_group_id)s not found." +msgid "Unable to locate account %(account_name)s on Solidfire device" msgstr "" -#: cinder/exception.py:704 +#: cinder/exception.py:636 #, python-format -msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgid "Invalid 3PAR Domain: %(err)s" msgstr "" -#: cinder/exception.py:709 -#, python-format -msgid "Security group with rule %(rule_id)s not found." +#: cinder/exception.py:641 +#, fuzzy +msgid "Unknown NFS exception" +msgstr "發生一個未知例外" + +#: cinder/exception.py:645 +msgid "No mounted NFS shares found" msgstr "" -#: cinder/exception.py:713 +#: cinder/exception.py:649 cinder/exception.py:662 #, python-format -msgid "" -"Security group %(security_group_id)s is already associated with the " -"instance %(instance_id)s" +msgid "There is no share which can host %(volume_size)sG" msgstr "" -#: cinder/exception.py:718 -#, python-format -msgid "" -"Security group %(security_group_id)s is not associated with the instance " -"%(instance_id)s" +#: cinder/exception.py:654 +#, fuzzy +msgid "Unknown Gluster exception" +msgstr "發生一個未知例外" + +#: cinder/exception.py:658 +msgid "No mounted Gluster shares found" msgstr "" -#: cinder/exception.py:723 -#, python-format -msgid "Migration %(migration_id)s could not be found." +#: cinder/manager.py:133 +msgid "Notifying Schedulers of capabilities ..." msgstr "" -#: cinder/exception.py:727 -#, python-format -msgid "Migration not found for instance %(instance_id)s with status %(status)s." +#: cinder/policy.py:30 +msgid "JSON file representing policy" msgstr "" -#: cinder/exception.py:732 -#, python-format -msgid "Console pool %(pool_id)s could not be found." +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" msgstr "" -#: cinder/exception.py:736 +#: cinder/quota.py:105 #, python-format msgid "" -"Console pool of type %(console_type)s for compute host %(compute_host)s " -"on proxy host %(host)s not found." +"Default quota for resource: %(res)s is set by the default quota flag: " +"quota_%(res)s, it is now deprecated. Please use the the default quota " +"class for default quota." msgstr "" -#: cinder/exception.py:742 +#: cinder/quota.py:748 #, python-format -msgid "Console %(console_id)s could not be found." +msgid "Created reservations %s" msgstr "" -#: cinder/exception.py:746 +#: cinder/quota.py:770 #, python-format -msgid "Console for instance %(instance_id)s could not be found." +msgid "Failed to commit reservations %s" msgstr "" -#: cinder/exception.py:750 +#: cinder/quota.py:790 #, python-format -msgid "" -"Console for instance %(instance_id)s in pool %(pool_id)s could not be " -"found." +msgid "Failed to roll back reservations %s" msgstr "" -#: cinder/exception.py:755 -#, python-format -msgid "Invalid console type %(console_type)s " +#: cinder/quota.py:876 +msgid "Cannot register resource" msgstr "" -#: cinder/exception.py:759 -msgid "Zero instance types found." +#: cinder/quota.py:879 +msgid "Cannot register resources" msgstr "" -#: cinder/exception.py:763 +#: cinder/quota_utils.py:46 #, python-format -msgid "Instance type %(instance_type_id)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume - " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/exception.py:767 +#: cinder/quota_utils.py:56 cinder/transfer/api.py:168 +#: cinder/volume/flows/api/create_volume.py:520 #, python-format -msgid "Instance type with name %(instance_type_name)s could not be found." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create volume (%(d_consumed)d " +"volumes already consumed)" msgstr "" -#: cinder/exception.py:772 +#: cinder/service.py:95 #, python-format -msgid "Flavor %(flavor_id)s could not be found." +msgid "Starting %(topic)s node (version %(version_string)s)" msgstr "" -#: cinder/exception.py:776 +#: cinder/service.py:108 cinder/openstack/common/rpc/service.py:47 #, python-format -msgid "Cell %(cell_id)s could not be found." +msgid "Creating Consumer connection for Service %s" msgstr "" -#: cinder/exception.py:780 +#: cinder/service.py:148 #, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgid "" +"Report interval must be less than service down time. Current config " +"service_down_time: %(service_down_time)s, report_interval for this: " +"service is: %(report_interval)s. Setting global service_down_time to: " +"%(new_down_time)s" msgstr "" -#: cinder/exception.py:784 -#, python-format -msgid "Scheduler cost function %(cost_fn_str)s could not be found." +#: cinder/service.py:216 +msgid "Service killed that has no database entry" msgstr "" -#: cinder/exception.py:789 -#, python-format -msgid "Scheduler weight flag not found: %(flag_name)s" +#: cinder/service.py:255 +msgid "The service database object disappeared, Recreating it." msgstr "" -#: cinder/exception.py:793 -#, python-format -msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +#: cinder/service.py:270 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:276 +msgid "model server went away" msgstr "" -#: cinder/exception.py:798 +#: cinder/service.py:298 #, python-format msgid "" -"Instance Type %(instance_type_id)s has no extra specs with key " -"%(extra_specs_key)s." +"Value of config option %(name)s_workers must be integer greater than 1. " +"Input value ignored." msgstr "" -#: cinder/exception.py:803 -msgid "LDAP object could not be found" +#: cinder/service.py:373 +msgid "serve() can only be called once" msgstr "" -#: cinder/exception.py:807 -#, python-format -msgid "LDAP user %(user_id)s could not be found." +#: cinder/service.py:379 cinder/openstack/common/service.py:166 +#: cinder/openstack/common/service.py:384 +msgid "Full set of CONF:" msgstr "" -#: cinder/exception.py:811 +#: cinder/service.py:387 #, python-format -msgid "LDAP group %(group_id)s could not be found." +msgid "%s : FLAG SET " msgstr "" -#: cinder/exception.py:815 +#: cinder/utils.py:96 #, python-format -msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgid "Can not translate %s to integer." msgstr "" -#: cinder/exception.py:819 +#: cinder/utils.py:127 #, python-format -msgid "File %(file_path)s could not be found." +msgid "May specify only one of %s" msgstr "" -#: cinder/exception.py:823 -msgid "Zero files could be found." +#: cinder/utils.py:212 +msgid "Specify a password or private_key" msgstr "" -#: cinder/exception.py:827 +#: cinder/utils.py:228 #, python-format -msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgid "Error connecting via ssh: %s" msgstr "" -#: cinder/exception.py:832 +#: cinder/utils.py:412 #, python-format -msgid "Network adapter %(adapter)s could not be found." +msgid "Invalid backend: %s" msgstr "" -#: cinder/exception.py:836 +#: cinder/utils.py:423 #, python-format -msgid "Class %(class_name)s could not be found: %(exception)s" +msgid "backend %s" msgstr "" -#: cinder/exception.py:840 -msgid "Action not allowed." +#: cinder/utils.py:698 +#, python-format +msgid "Could not remove tmpdir: %s" msgstr "" -#: cinder/exception.py:844 +#: cinder/utils.py:759 #, python-format -msgid "Unable to use global role %(role_id)s" +msgid "Volume driver %s not initialized" msgstr "" -#: cinder/exception.py:848 -msgid "Rotation is not allowed for snapshots" +#: cinder/wsgi.py:127 cinder/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" msgstr "" -#: cinder/exception.py:852 -msgid "Rotation param is required for backup image_type" -msgstr "" +#: cinder/wsgi.py:130 cinder/openstack/common/sslutils.py:53 +#, fuzzy, python-format +msgid "Unable to find ca_file : %s" +msgstr "找不到Volume %s" -#: cinder/exception.py:861 +#: cinder/wsgi.py:133 cinder/openstack/common/sslutils.py:56 #, python-format -msgid "Key pair %(key_name)s already exists." +msgid "Unable to find key_file : %s" msgstr "" -#: cinder/exception.py:865 -#, python-format -msgid "User %(user)s already exists." +#: cinder/wsgi.py:136 cinder/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" msgstr "" -#: cinder/exception.py:869 +#: cinder/wsgi.py:169 #, python-format -msgid "LDAP user %(user)s already exists." +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" -#: cinder/exception.py:873 +#: cinder/wsgi.py:206 #, python-format -msgid "LDAP group %(group)s already exists." +msgid "Started %(name)s on %(host)s:%(port)s" msgstr "" -#: cinder/exception.py:877 -#, python-format -msgid "User %(uid)s is already a member of the group %(group_dn)s" +#: cinder/wsgi.py:226 +msgid "Stopping WSGI server." msgstr "" -#: cinder/exception.py:882 -#, python-format -msgid "Project %(project)s already exists." +#: cinder/wsgi.py:244 +msgid "WSGI server has stopped." msgstr "" -#: cinder/exception.py:886 -#, python-format -msgid "Instance %(name)s already exists." +#: cinder/wsgi.py:313 +msgid "You must implement __call__" msgstr "" -#: cinder/exception.py:890 -#, python-format -msgid "Instance Type %(name)s already exists." +#: cinder/api/auth.py:26 +msgid "" +"cinder.api.auth:CinderKeystoneContext is deprecated. Please use " +"cinder.api.middleware.auth:CinderKeystoneContext instead." msgstr "" -#: cinder/exception.py:894 -#, python-format -msgid "Volume Type %(name)s already exists." +#: cinder/api/auth.py:34 +msgid "" +"cinder.api.auth:pipeline_factory is deprecated. Please use " +"cinder.api.middleware.auth:pipeline_factory instead." msgstr "" -#: cinder/exception.py:898 -#, python-format -msgid "%(path)s is on shared storage: %(reason)s" +#: cinder/api/common.py:92 cinder/api/common.py:126 cinder/volume/api.py:266 +msgid "limit param must be an integer" msgstr "" -#: cinder/exception.py:902 -msgid "Migration error" +#: cinder/api/common.py:95 cinder/api/common.py:130 cinder/volume/api.py:263 +msgid "limit param must be positive" msgstr "" -#: cinder/exception.py:906 -#, python-format -msgid "Malformed message body: %(reason)s" +#: cinder/api/common.py:120 +msgid "offset param must be an integer" msgstr "" -#: cinder/exception.py:910 -#, python-format -msgid "Could not find config at %(path)s" +#: cinder/api/common.py:134 +msgid "offset param must be positive" msgstr "" -#: cinder/exception.py:914 +#: cinder/api/common.py:162 #, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "" - -#: cinder/exception.py:918 -msgid "When resizing, instances must change size!" -msgstr "" - -#: cinder/exception.py:922 -msgid "Image is larger than instance type allows" +msgid "marker [%s] not found" msgstr "" -#: cinder/exception.py:926 -msgid "1 or more Zones could not complete the request" +#: cinder/api/common.py:189 +#, python-format +msgid "href %s does not contain version" msgstr "" -#: cinder/exception.py:930 -msgid "Instance type's memory is too small for requested image." +#: cinder/api/extensions.py:182 +msgid "Initializing extension manager." msgstr "" -#: cinder/exception.py:934 -msgid "Instance type's disk is too small for requested image." +#: cinder/api/extensions.py:197 +#, python-format +msgid "Loaded extension: %s" msgstr "" -#: cinder/exception.py:938 +#: cinder/api/extensions.py:235 #, python-format -msgid "Insufficient free memory on compute node to start %(uuid)s." +msgid "Ext name: %s" msgstr "" -#: cinder/exception.py:942 -msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +#: cinder/api/extensions.py:236 +#, python-format +msgid "Ext alias: %s" msgstr "" -#: cinder/exception.py:946 +#: cinder/api/extensions.py:237 #, python-format -msgid "No valid host was found. %(reason)s" +msgid "Ext description: %s" msgstr "" -#: cinder/exception.py:950 +#: cinder/api/extensions.py:239 #, python-format -msgid "Host %(host)s is not up or doesn't exist." +msgid "Ext namespace: %s" msgstr "" -#: cinder/exception.py:954 -msgid "Quota exceeded" +#: cinder/api/extensions.py:240 +#, python-format +msgid "Ext updated: %s" msgstr "" -#: cinder/exception.py:958 +#: cinder/api/extensions.py:242 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " -"%(reason)s." +msgid "Exception loading extension: %s" msgstr "" -#: cinder/exception.py:963 +#: cinder/api/extensions.py:256 #, python-format -msgid "Aggregate %(aggregate_id)s could not be found." +msgid "Loading extension %s" msgstr "" -#: cinder/exception.py:967 +#: cinder/api/extensions.py:262 #, python-format -msgid "Aggregate %(aggregate_name)s already exists." +msgid "Calling extension factory %s" msgstr "" -#: cinder/exception.py:971 +#: cinder/api/extensions.py:276 #, python-format -msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgid "osapi_volume_extension is set to deprecated path: %s" msgstr "" -#: cinder/exception.py:975 +#: cinder/api/extensions.py:278 #, python-format -msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgid "" +"Please set your flag or cinder.conf settings for osapi_volume_extension " +"to: %s" msgstr "" -#: cinder/exception.py:980 +#: cinder/api/extensions.py:287 #, python-format -msgid "Host %(host)s already member of another aggregate." +msgid "Failed to load extension %(ext_factory)s: %(exc)s" msgstr "" -#: cinder/exception.py:984 +#: cinder/api/extensions.py:356 #, python-format -msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: cinder/exception.py:988 +#: cinder/api/extensions.py:381 #, python-format -msgid "Detected more than one volume with name %(vol_name)s" +msgid "Failed to load extension %(ext_name)s: %(exc)s" msgstr "" -#: cinder/exception.py:992 -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +#: cinder/api/sizelimit.py:25 +msgid "" +"cinder.api.sizelimit:RequestBodySizeLimiter is deprecated. Please use " +"cinder.api.middleware.sizelimit:RequestBodySizeLimiter instead" msgstr "" -#: cinder/exception.py:997 -msgid "Unable to create instance type" +#: cinder/api/xmlutil.py:266 +msgid "element is not a child" msgstr "" -#: cinder/exception.py:1001 -msgid "Bad response from SolidFire API" +#: cinder/api/xmlutil.py:463 +msgid "root element selecting a list" msgstr "" -#: cinder/exception.py:1005 +#: cinder/api/xmlutil.py:786 #, python-format -msgid "Error in SolidFire API response: status=%(status)s" +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" msgstr "" -#: cinder/exception.py:1009 -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" +#: cinder/api/xmlutil.py:907 +msgid "subclasses must implement construct()!" msgstr "" -#: cinder/exception.py:1013 +#: cinder/api/contrib/admin_actions.py:81 #, python-format -msgid "Detected existing vlan with id %(vlan)d" +msgid "Updating %(resource)s '%(id)s' with '%(update)r'" msgstr "" -#: cinder/exception.py:1017 +#: cinder/api/contrib/backups.py:128 #, python-format -msgid "Instance %(instance_id)s could not be found." +msgid "show called for member %s" msgstr "" -#: cinder/exception.py:1021 +#: cinder/api/contrib/backups.py:140 #, python-format -msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgid "delete called for member %s" msgstr "" -#: cinder/exception.py:1025 +#: cinder/api/contrib/backups.py:143 #, python-format -msgid "Could not fetch image %(image)s" +msgid "Delete backup with id: %s" msgstr "" -#: cinder/log.py:315 +#: cinder/api/contrib/backups.py:185 #, python-format -msgid "syslog facility must be one of: %s" +msgid "Creating new backup %s" +msgstr "" + +#: cinder/api/contrib/backups.py:195 cinder/api/contrib/backups.py:227 +#: cinder/api/contrib/volume_transfer.py:157 +#: cinder/api/contrib/volume_transfer.py:193 +msgid "Incorrect request body format" msgstr "" -#: cinder/manager.py:146 +#: cinder/api/contrib/backups.py:201 #, python-format -msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgid "Creating backup of volume %(volume_id)s in container %(container)s" msgstr "" -#: cinder/manager.py:152 +#: cinder/api/contrib/backups.py:224 #, python-format -msgid "Running periodic task %(full_task_name)s" +msgid "Restoring backup %(backup_id)s (%(body)s)" msgstr "" -#: cinder/manager.py:159 +#: cinder/api/contrib/backups.py:234 #, python-format -msgid "Error during %(full_task_name)s: %(e)s" +msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" msgstr "" -#: cinder/manager.py:203 -msgid "Notifying Schedulers of capabilities ..." +#: cinder/api/contrib/extended_snapshot_attributes.py:60 +msgid "Snapshot not found." msgstr "" -#: cinder/policy.py:30 -msgid "JSON file representing policy" +#: cinder/api/contrib/hosts.py:86 cinder/api/openstack/wsgi.py:245 +msgid "cannot understand XML" msgstr "" -#: cinder/policy.py:33 -msgid "Rule checked when requested rule is not found" +#: cinder/api/contrib/hosts.py:136 +#, python-format +msgid "Host '%s' could not be found." msgstr "" -#: cinder/service.py:137 -msgid "SIGTERM received" +#: cinder/api/contrib/hosts.py:165 +#, python-format +msgid "Invalid status: '%s'" msgstr "" -#: cinder/service.py:177 +#: cinder/api/contrib/hosts.py:168 #, python-format -msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgid "Invalid update setting: '%s'" msgstr "" -#: cinder/service.py:195 +#: cinder/api/contrib/hosts.py:180 #, python-format -msgid "Creating Consumer connection for Service %s" +msgid "Setting host %(host)s to %(state)s." msgstr "" -#: cinder/service.py:282 -msgid "Service killed that has no database entry" +#: cinder/api/contrib/hosts.py:206 +msgid "Describe-resource is admin only functionality" msgstr "" -#: cinder/service.py:319 -msgid "The service database object disappeared, Recreating it." +#: cinder/api/contrib/hosts.py:214 +msgid "Host not found" msgstr "" -#: cinder/service.py:334 -msgid "Recovered model server connection!" +#: cinder/api/contrib/qos_specs_manage.py:111 +msgid "Please specify a name for QoS specs." msgstr "" -#: cinder/service.py:340 -msgid "model server went away" +#: cinder/api/contrib/qos_specs_manage.py:220 +msgid "Failed to disassociate qos specs." msgstr "" -#: cinder/service.py:433 -msgid "Full set of FLAGS:" +#: cinder/api/contrib/qos_specs_manage.py:222 +msgid "Qos specs still in use." msgstr "" -#: cinder/service.py:440 -#, python-format -msgid "%(flag)s : FLAG SET " +#: cinder/api/contrib/qos_specs_manage.py:298 +#: cinder/api/contrib/qos_specs_manage.py:351 +msgid "Volume Type id must not be None." msgstr "" -#: cinder/utils.py:79 -#, python-format -msgid "Inner Exception: %s" +#: cinder/api/contrib/quota_classes.py:72 +msgid "Missing required element quota_class_set in request body." msgstr "" -#: cinder/utils.py:165 -#, python-format -msgid "Fetching %s" +#: cinder/api/contrib/quota_classes.py:81 +msgid "Quota class limit must be specified as an integer value." msgstr "" -#: cinder/utils.py:210 -#, python-format -msgid "Got unknown keyword args to utils.execute: %r" +#: cinder/api/contrib/quota_classes.py:85 +msgid "Quota class limit must be -1 or greater." msgstr "" -#: cinder/utils.py:220 -#, python-format -msgid "Running cmd (subprocess): %s" +#: cinder/api/contrib/quotas.py:60 +msgid "Quota limit must be specified as an integer value." msgstr "" -#: cinder/utils.py:236 cinder/utils.py:315 -#, python-format -msgid "Result was %s" +#: cinder/api/contrib/quotas.py:65 +msgid "Quota limit must be -1 or greater." msgstr "" -#: cinder/utils.py:249 -#, python-format -msgid "%r failed. Retrying." +#: cinder/api/contrib/quotas.py:100 +msgid "Missing required element quota_set in request body." msgstr "" -#: cinder/utils.py:291 +#: cinder/api/contrib/quotas.py:111 #, python-format -msgid "Running cmd (SSH): %s" +msgid "Bad key(s) in quota set: %s" msgstr "" -#: cinder/utils.py:293 -msgid "Environment not supported over SSH" +#: cinder/api/contrib/scheduler_hints.py:36 +msgid "Malformed scheduler_hints attribute" msgstr "" -#: cinder/utils.py:297 -msgid "process_input not supported over SSH" +#: cinder/api/contrib/services.py:84 +msgid "" +"Query by service parameter is deprecated. Please use binary parameter " +"instead." msgstr "" -#: cinder/utils.py:352 -#, python-format -msgid "debug in callback: %s" +#: cinder/api/contrib/snapshot_actions.py:51 +msgid "'status' must be specified." msgstr "" -#: cinder/utils.py:534 +#: cinder/api/contrib/snapshot_actions.py:61 #, python-format -msgid "Link Local address is not found.:%s" +msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" -#: cinder/utils.py:537 +#: cinder/api/contrib/snapshot_actions.py:67 #, python-format -msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgid "" +"Provided snapshot status %(provided)s not allowed for snapshot with " +"status %(current)s." msgstr "" -#: cinder/utils.py:648 -#, python-format -msgid "Invalid backend: %s" +#: cinder/api/contrib/snapshot_actions.py:79 +msgid "progress must be an integer percentage" msgstr "" -#: cinder/utils.py:659 -#, python-format -msgid "backend %s" +#: cinder/api/contrib/types_extra_specs.py:101 +msgid "Request body empty" msgstr "" -#: cinder/utils.py:709 -msgid "in looping call" +#: cinder/api/contrib/types_extra_specs.py:105 +#: cinder/api/v1/snapshot_metadata.py:75 cinder/api/v1/volume_metadata.py:75 +#: cinder/api/v2/snapshot_metadata.py:75 cinder/api/v2/volume_metadata.py:74 +msgid "Request body and URI mismatch" msgstr "" -#: cinder/utils.py:927 -#, python-format -msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/types_extra_specs.py:108 +#: cinder/api/v1/snapshot_metadata.py:79 cinder/api/v1/volume_metadata.py:79 +#: cinder/api/v2/snapshot_metadata.py:79 cinder/api/v2/volume_metadata.py:78 +msgid "Request body contains too many items" msgstr "" -#: cinder/utils.py:931 -#, python-format -msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/types_extra_specs.py:150 +msgid "" +"Key names can only contain alphanumeric characters, underscores, periods," +" colons and hyphens." msgstr "" -#: cinder/utils.py:935 +#: cinder/api/contrib/volume_actions.py:99 #, python-format -msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgid "" +"Invalid request to attach volume to an instance %(instance_uuid)s and a " +"host %(host_name)s simultaneously" msgstr "" -#: cinder/utils.py:942 -#, python-format -msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +#: cinder/api/contrib/volume_actions.py:107 +msgid "Invalid request to attach volume to an invalid target" msgstr "" -#: cinder/utils.py:1001 -#, python-format -msgid "Found sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_actions.py:111 +msgid "" +"Invalid request to attach volume with an invalid mode. Attaching mode " +"should be 'rw' or 'ro'" msgstr "" -#: cinder/utils.py:1008 -#, python-format -msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +#: cinder/api/contrib/volume_actions.py:196 +msgid "Unable to fetch connection information from backend." msgstr "" -#: cinder/utils.py:1023 -#, python-format -msgid "Found lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_actions.py:216 +msgid "Unable to terminate volume connection from backend." msgstr "" -#: cinder/utils.py:1028 -#, python-format -msgid "Cleaned lockfile %(file)s with link count %(count)d" +#: cinder/api/contrib/volume_actions.py:229 +msgid "No image_name was specified in request." msgstr "" -#: cinder/utils.py:1138 -#, python-format -msgid "Expected object of type: %s" +#: cinder/api/contrib/volume_actions.py:237 +msgid "Bad value for 'force' parameter." msgstr "" -#: cinder/utils.py:1169 -#, python-format -msgid "Invalid server_string: %s" +#: cinder/api/contrib/volume_actions.py:240 +msgid "'force' is not string or bool." msgstr "" -#: cinder/utils.py:1298 -#, python-format -msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#: cinder/api/contrib/volume_actions.py:280 +msgid "New volume size must be specified as an integer." msgstr "" -#: cinder/utils.py:1330 -msgid "Original exception being dropped" +#: cinder/api/contrib/volume_actions.py:299 +msgid "Must specify readonly in request." msgstr "" -#: cinder/utils.py:1461 -#, python-format -msgid "Class %(fullname)s is deprecated: %(msg)s" +#: cinder/api/contrib/volume_actions.py:307 +msgid "Bad value for 'readonly'" msgstr "" -#: cinder/utils.py:1463 -#, python-format -msgid "Class %(fullname)s is deprecated" +#: cinder/api/contrib/volume_actions.py:311 +msgid "'readonly' not string or bool" msgstr "" -#: cinder/utils.py:1495 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +#: cinder/api/contrib/volume_actions.py:325 +msgid "New volume type must be specified." msgstr "" -#: cinder/utils.py:1497 -#, python-format -msgid "Function %(name)s in %(location)s is deprecated" +#: cinder/api/contrib/volume_transfer.py:131 +msgid "Listing volume transfers" msgstr "" -#: cinder/utils.py:1681 +#: cinder/api/contrib/volume_transfer.py:147 #, python-format -msgid "Could not remove tmpdir: %s" +msgid "Creating new volume transfer %s" msgstr "" -#: cinder/wsgi.py:97 +#: cinder/api/contrib/volume_transfer.py:162 +#, fuzzy, python-format +msgid "Creating transfer of volume %s" +msgstr "找不到Volume %s" + +#: cinder/api/contrib/volume_transfer.py:183 #, python-format -msgid "Started %(name)s on %(host)s:%(port)s" +msgid "Accepting volume transfer %s" msgstr "" -#: cinder/wsgi.py:108 -msgid "Stopping WSGI server." +#: cinder/api/contrib/volume_transfer.py:196 +#, python-format +msgid "Accepting transfer %s" msgstr "" -#: cinder/wsgi.py:111 -msgid "Stopping raw TCP server." +#: cinder/api/contrib/volume_transfer.py:217 +#, python-format +msgid "Delete transfer with id: %s" msgstr "" -#: cinder/wsgi.py:117 -#, python-format -msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +#: cinder/api/contrib/volume_type_encryption.py:64 +msgid "key_size must be non-negative" msgstr "" -#: cinder/wsgi.py:133 -msgid "WSGI server has stopped." +#: cinder/api/contrib/volume_type_encryption.py:67 +msgid "key_size must be an integer" msgstr "" -#: cinder/wsgi.py:211 -msgid "You must implement __call__" +#: cinder/api/contrib/volume_type_encryption.py:73 +msgid "provider must be defined" msgstr "" -#: cinder/api/direct.py:218 -msgid "not available" +#: cinder/api/contrib/volume_type_encryption.py:75 +msgid "control_location must be defined" msgstr "" -#: cinder/api/direct.py:299 +#: cinder/api/contrib/volume_type_encryption.py:83 #, python-format -msgid "Returned non-serializeable type: %s" +msgid "Valid control location are: %s" msgstr "" -#: cinder/api/sizelimit.py:51 -msgid "Request is too large." +#: cinder/api/contrib/volume_type_encryption.py:111 +msgid "Cannot create encryption specs. Volume type in use." msgstr "" -#: cinder/api/validator.py:142 -#, python-format -msgid "%(key)s with value %(value)s failed validator %(validator)s" +#: cinder/api/contrib/volume_type_encryption.py:115 +msgid "Create body is not valid." msgstr "" -#: cinder/api/ec2/__init__.py:73 -#, python-format -msgid "%(code)s: %(message)s" +#: cinder/api/contrib/volume_type_encryption.py:157 +msgid "Cannot delete encryption specs. Volume type in use." msgstr "" -#: cinder/api/ec2/__init__.py:95 -#, python-format -msgid "FaultWrapper: %s" +#: cinder/api/middleware/auth.py:108 +msgid "Invalid service catalog json." msgstr "" -#: cinder/api/ec2/__init__.py:170 -msgid "Too many failed authentications." +#: cinder/api/middleware/fault.py:44 +#, python-format +msgid "Caught error: %s" msgstr "" -#: cinder/api/ec2/__init__.py:180 +#: cinder/api/middleware/fault.py:53 cinder/api/openstack/wsgi.py:984 #, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." +msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: cinder/api/ec2/__init__.py:267 -msgid "Signature not provided" +#: cinder/api/middleware/fault.py:69 +#, python-format +msgid "%(exception)s: %(explanation)s" msgstr "" -#: cinder/api/ec2/__init__.py:271 -msgid "Access key not provided" +#: cinder/api/middleware/sizelimit.py:55 cinder/api/middleware/sizelimit.py:64 +#: cinder/api/middleware/sizelimit.py:78 +msgid "Request is too large." msgstr "" -#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 -msgid "Failure communicating with keystone" +#: cinder/api/openstack/__init__.py:69 +msgid "Must specify an ExtensionManager class" msgstr "" -#: cinder/api/ec2/__init__.py:388 +#: cinder/api/openstack/__init__.py:80 #, python-format -msgid "Authentication Failure: %s" +msgid "Extended resource: %s" msgstr "" -#: cinder/api/ec2/__init__.py:404 +#: cinder/api/openstack/__init__.py:104 #, python-format -msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" msgstr "" -#: cinder/api/ec2/__init__.py:435 +#: cinder/api/openstack/__init__.py:110 #, python-format -msgid "action: %s" +msgid "Extension %(ext_name)s extending resource: %(collection)s" msgstr "" -#: cinder/api/ec2/__init__.py:437 -#, python-format -msgid "arg: %(key)s\t\tval: %(value)s" +#: cinder/api/openstack/__init__.py:126 +msgid "" +"cinder.api.openstack:FaultWrapper is deprecated. Please use " +"cinder.api.middleware.fault:FaultWrapper instead." msgstr "" -#: cinder/api/ec2/__init__.py:512 -#, python-format -msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +#: cinder/api/openstack/urlmap.py:25 +msgid "" +"cinder.api.openstack.urlmap:urlmap_factory is deprecated. Please use " +"cinder.api.urlmap:urlmap_factory instead." msgstr "" -#: cinder/api/ec2/__init__.py:584 -#, python-format -msgid "InstanceNotFound raised: %s" +#: cinder/api/openstack/wsgi.py:220 cinder/api/openstack/wsgi.py:634 +msgid "cannot understand JSON" msgstr "" -#: cinder/api/ec2/__init__.py:590 -#, python-format -msgid "VolumeNotFound raised: %s" +#: cinder/api/openstack/wsgi.py:639 +msgid "too many body keys" msgstr "" -#: cinder/api/ec2/__init__.py:596 +#: cinder/api/openstack/wsgi.py:677 #, python-format -msgid "SnapshotNotFound raised: %s" +msgid "Exception handling resource: %s" msgstr "" -#: cinder/api/ec2/__init__.py:602 +#: cinder/api/openstack/wsgi.py:682 #, python-format -msgid "NotFound raised: %s" +msgid "Fault thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:605 +#: cinder/api/openstack/wsgi.py:685 #, python-format -msgid "EC2APIError raised: %s" +msgid "HTTP exception thrown: %s" msgstr "" -#: cinder/api/ec2/__init__.py:613 -#, python-format -msgid "KeyPairExists raised: %s" +#: cinder/api/openstack/wsgi.py:793 +msgid "Empty body provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:617 -#, python-format -msgid "InvalidParameterValue raised: %s" +#: cinder/api/openstack/wsgi.py:799 +msgid "Unrecognized Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:621 -#, python-format -msgid "InvalidPortRange raised: %s" +#: cinder/api/openstack/wsgi.py:803 +msgid "No Content-Type provided in request" msgstr "" -#: cinder/api/ec2/__init__.py:625 +#: cinder/api/openstack/wsgi.py:914 #, python-format -msgid "NotAuthorized raised: %s" +msgid "There is no such action: %s" msgstr "" -#: cinder/api/ec2/__init__.py:629 -#, python-format -msgid "InvalidRequest raised: %s" +#: cinder/api/openstack/wsgi.py:917 cinder/api/openstack/wsgi.py:930 +#: cinder/api/v1/snapshot_metadata.py:53 cinder/api/v1/snapshot_metadata.py:71 +#: cinder/api/v1/snapshot_metadata.py:96 cinder/api/v1/snapshot_metadata.py:121 +#: cinder/api/v1/volume_metadata.py:53 cinder/api/v1/volume_metadata.py:71 +#: cinder/api/v1/volume_metadata.py:96 cinder/api/v1/volume_metadata.py:121 +#: cinder/api/v2/snapshot_metadata.py:53 cinder/api/v2/snapshot_metadata.py:71 +#: cinder/api/v2/snapshot_metadata.py:96 cinder/api/v2/snapshot_metadata.py:121 +#: cinder/api/v2/volume_metadata.py:52 cinder/api/v2/volume_metadata.py:70 +#: cinder/api/v2/volume_metadata.py:95 cinder/api/v2/volume_metadata.py:120 +msgid "Malformed request body" msgstr "" -#: cinder/api/ec2/__init__.py:633 -#, python-format -msgid "QuotaError raised: %s" +#: cinder/api/openstack/wsgi.py:927 +msgid "Unsupported Content-Type" msgstr "" -#: cinder/api/ec2/__init__.py:637 -#, python-format -msgid "Invalid id: bogus (expecting \"i-...\"): %s" +#: cinder/api/openstack/wsgi.py:939 +msgid "Malformed request url" msgstr "" -#: cinder/api/ec2/__init__.py:646 +#: cinder/api/openstack/wsgi.py:987 #, python-format -msgid "Unexpected error raised: %s" +msgid "%(url)s returned a fault: %(e)s" msgstr "" -#: cinder/api/ec2/__init__.py:647 -#, python-format -msgid "Environment: %s" +#: cinder/api/openstack/volume/__init__.py:25 +msgid "" +"cinder.api.openstack.volume:APIRouter is deprecated. Please use " +"cinder.api.v1.router:APIRouter instead." msgstr "" -#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 -msgid "An unknown error has occurred. Please try your request again." +#: cinder/api/openstack/volume/versions.py:26 +msgid "" +"cinder.api.openstack.volume.versions.Versions is deprecated. Please use " +"cinder.api.versions.Versions instead." msgstr "" -#: cinder/api/ec2/apirequest.py:64 +#: cinder/api/v1/limits.py:139 cinder/api/v2/limits.py:138 #, python-format -msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." msgstr "" -#: cinder/api/ec2/cloud.py:336 -#, python-format -msgid "Create snapshot of volume %s" +#: cinder/api/v1/limits.py:264 cinder/api/v2/limits.py:261 +msgid "This request was rate-limited." msgstr "" -#: cinder/api/ec2/cloud.py:372 -#, python-format -msgid "" -"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " -"character, spaces, dashes, and underscore." +#: cinder/api/v1/snapshot_metadata.py:37 cinder/api/v1/snapshot_metadata.py:117 +#: cinder/api/v1/snapshot_metadata.py:156 cinder/api/v2/snapshot_metadata.py:37 +#: cinder/api/v2/snapshot_metadata.py:117 +#: cinder/api/v2/snapshot_metadata.py:156 +msgid "snapshot does not exist" msgstr "" -#: cinder/api/ec2/cloud.py:378 -#, python-format -msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +#: cinder/api/v1/snapshot_metadata.py:139 +#: cinder/api/v1/snapshot_metadata.py:149 cinder/api/v1/volume_metadata.py:139 +#: cinder/api/v1/volume_metadata.py:149 cinder/api/v2/snapshot_metadata.py:139 +#: cinder/api/v2/snapshot_metadata.py:149 cinder/api/v2/volume_metadata.py:138 +#: cinder/api/v2/volume_metadata.py:148 +msgid "Metadata item was not found" msgstr "" -#: cinder/api/ec2/cloud.py:382 +#: cinder/api/v1/snapshots.py:119 cinder/api/v2/snapshots.py:120 #, python-format -msgid "Create key pair %s" +msgid "Delete snapshot with id: %s" msgstr "" -#: cinder/api/ec2/cloud.py:391 -#, python-format -msgid "Import key %s" +#: cinder/api/v1/snapshots.py:173 cinder/api/v2/snapshots.py:184 +msgid "'volume_id' must be specified" msgstr "" -#: cinder/api/ec2/cloud.py:409 +#: cinder/api/v1/snapshots.py:182 cinder/api/v2/snapshots.py:193 #, python-format -msgid "Delete key pair %s" +msgid "Create snapshot from volume %s" msgstr "" -#: cinder/api/ec2/cloud.py:551 -msgid "Invalid CIDR" +#: cinder/api/v1/snapshots.py:186 cinder/api/v2/snapshots.py:202 +#, python-format +msgid "Invalid value '%s' for force. " msgstr "" -#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 -#: cinder/api/ec2/cloud.py:800 -msgid "Not enough parameters, need group_name or group_id" +#: cinder/api/v1/volume_metadata.py:37 cinder/api/v1/volume_metadata.py:117 +#: cinder/api/v1/volume_metadata.py:156 cinder/api/v2/volume_metadata.py:36 +#: cinder/api/v2/volume_metadata.py:116 cinder/api/v2/volume_metadata.py:155 +msgid "volume does not exist" msgstr "" -#: cinder/api/ec2/cloud.py:654 -#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#: cinder/api/v1/volumes.py:111 #, python-format -msgid "Revoke security group ingress %s" +msgid "vol=%s" msgstr "" -#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#: cinder/api/v1/volumes.py:290 cinder/api/v2/volumes.py:228 #, python-format -msgid "%s Not enough parameters to build a valid rule" +msgid "Delete volume with id: %s" msgstr "" -#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 -msgid "No rule for the specified parameters." +#: cinder/api/v1/volumes.py:344 cinder/api/v1/volumes.py:348 +#: cinder/api/v2/volumes.py:298 cinder/api/v2/volumes.py:302 +msgid "Invalid imageRef provided." msgstr "" -#: cinder/api/ec2/cloud.py:708 -#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#: cinder/api/v1/volumes.py:388 cinder/api/v2/volumes.py:354 #, python-format -msgid "Authorize security group ingress %s" +msgid "snapshot id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:725 +#: cinder/api/v1/volumes.py:401 #, python-format -msgid "%s - This rule already exists in group" +msgid "source vol id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:769 +#: cinder/api/v1/volumes.py:412 cinder/api/v2/volumes.py:377 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Content limited to " -"Alphanumeric characters, spaces, dashes, and underscores." +msgid "Create volume of %s GB" msgstr "" -#: cinder/api/ec2/cloud.py:776 +#: cinder/api/v1/volumes.py:496 #, python-format -msgid "" -"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " -"255." +msgid "Removing options '%(bad_options)s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:780 -#: cinder/api/openstack/compute/contrib/security_groups.py:292 -#, python-format -msgid "Create Security Group %s" +#: cinder/api/v2/snapshots.py:111 cinder/api/v2/snapshots.py:126 +#: cinder/api/v2/snapshots.py:267 +msgid "Snapshot could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:783 +#: cinder/api/v2/snapshots.py:174 cinder/api/v2/snapshots.py:234 +#: cinder/api/v2/volumes.py:313 cinder/api/v2/volumes.py:419 #, python-format -msgid "group %s already exists" +msgid "Missing required element '%s' in request body" msgstr "" -#: cinder/api/ec2/cloud.py:815 -#: cinder/api/openstack/compute/contrib/security_groups.py:245 -#, python-format -msgid "Delete security group %s" +#: cinder/api/v2/snapshots.py:190 cinder/api/v2/volumes.py:217 +#: cinder/api/v2/volumes.py:234 cinder/api/v2/volumes.py:449 +msgid "Volume could not be found" msgstr "" -#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 -#, python-format -msgid "Get console output for instance %s" +#: cinder/api/v2/snapshots.py:230 cinder/api/v2/volumes.py:415 +msgid "Missing request body" msgstr "" -#: cinder/api/ec2/cloud.py:894 -#, python-format -msgid "Create volume from snapshot %s" +#: cinder/api/v2/types.py:70 +msgid "Volume type not found" msgstr "" -#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 -#: cinder/api/openstack/volume/volumes.py:222 -#, python-format -msgid "Create volume of %s GB" +#: cinder/api/v2/volumes.py:237 +msgid "Volume cannot be deleted while in attached state" msgstr "" -#: cinder/api/ec2/cloud.py:921 -msgid "Delete Failed" +#: cinder/api/v2/volumes.py:343 +msgid "Volume type not found." msgstr "" -#: cinder/api/ec2/cloud.py:931 +#: cinder/api/v2/volumes.py:366 #, python-format -msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -msgstr "" - -#: cinder/api/ec2/cloud.py:939 -msgid "Attach Failed." +msgid "source volume id:%s not found" msgstr "" -#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#: cinder/api/v2/volumes.py:472 #, python-format -msgid "Detach volume %s" +msgid "Removing options '%s' from query" msgstr "" -#: cinder/api/ec2/cloud.py:959 -msgid "Detach Volume Failed." +#: cinder/backup/api.py:66 +#, fuzzy +msgid "Backup status must be available or error" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:105 +#, fuzzy +msgid "Volume to be backed up must be available" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:140 +#, fuzzy +msgid "Backup status must be available" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:145 +msgid "Backup to be restored has invalid size" msgstr "" -#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 -#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#: cinder/backup/api.py:154 #, python-format -msgid "attribute not supported: %s" +msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:1107 +#: cinder/backup/api.py:170 +#, fuzzy +msgid "Volume to be restored to must be available" +msgstr "Volume 狀態需要可被使用" + +#: cinder/backup/api.py:176 #, python-format -msgid "vol = %s\n" +msgid "" +"volume size %(volume_size)d is too small to restore backup of size " +"%(size)d." msgstr "" -#: cinder/api/ec2/cloud.py:1255 -msgid "Allocate address" +#: cinder/backup/api.py:181 +#, python-format +msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" msgstr "" -#: cinder/api/ec2/cloud.py:1267 -#, python-format -msgid "Release address %s" +#: cinder/backup/manager.py:97 +msgid "NULL host not allowed for volume backend lookup." msgstr "" -#: cinder/api/ec2/cloud.py:1272 +#: cinder/backup/manager.py:100 #, python-format -msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgid "Checking hostname '%s' for backend info." msgstr "" -#: cinder/api/ec2/cloud.py:1282 +#: cinder/backup/manager.py:107 #, python-format -msgid "Disassociate address %s" +msgid "Backend not found in hostname (%s) so using default." msgstr "" -#: cinder/api/ec2/cloud.py:1308 -msgid "Image must be available" +#: cinder/backup/manager.py:117 +#, python-format +msgid "Manager requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:1329 -msgid "Going to start terminating instances" +#: cinder/backup/manager.py:120 cinder/backup/manager.py:132 +msgid "Fetching default backend." msgstr "" -#: cinder/api/ec2/cloud.py:1343 +#: cinder/backup/manager.py:123 #, python-format -msgid "Reboot instance %r" +msgid "Volume manager for backend '%s' does not exist." msgstr "" -#: cinder/api/ec2/cloud.py:1354 -msgid "Going to stop instances" +#: cinder/backup/manager.py:129 +#, python-format +msgid "Driver requested for volume_backend '%s'." msgstr "" -#: cinder/api/ec2/cloud.py:1365 -msgid "Going to start instances" +#: cinder/backup/manager.py:147 +#, python-format +msgid "" +"Registering backend %(backend)s (host=%(host)s " +"backend_name=%(backend_name)s)." msgstr "" -#: cinder/api/ec2/cloud.py:1455 +#: cinder/backup/manager.py:154 #, python-format -msgid "De-registering image %s" +msgid "Registering default backend %s." msgstr "" -#: cinder/api/ec2/cloud.py:1471 -msgid "imageLocation is required" +#: cinder/backup/manager.py:158 +#, python-format +msgid "Starting volume driver %(driver_name)s (%(version)s)." msgstr "" -#: cinder/api/ec2/cloud.py:1490 +#: cinder/backup/manager.py:165 #, python-format -msgid "Registered image %(image_location)s with id %(image_id)s" +msgid "Error encountered during initialization of driver: %(name)s." msgstr "" -#: cinder/api/ec2/cloud.py:1536 -msgid "user or group not specified" +#: cinder/backup/manager.py:184 +msgid "Cleaning up incomplete backup operations." msgstr "" -#: cinder/api/ec2/cloud.py:1538 -msgid "only group \"all\" is supported" +#: cinder/backup/manager.py:189 +#, python-format +msgid "Resetting volume %s to available (was backing-up)." msgstr "" -#: cinder/api/ec2/cloud.py:1540 -msgid "operation_type must be add or remove" +#: cinder/backup/manager.py:194 +#, python-format +msgid "Resetting volume %s to error_restoring (was restoring-backup)." msgstr "" -#: cinder/api/ec2/cloud.py:1542 +#: cinder/backup/manager.py:206 #, python-format -msgid "Updating image %s publicity" +msgid "Resetting backup %s to error (was creating)." msgstr "" -#: cinder/api/ec2/cloud.py:1555 +#: cinder/backup/manager.py:212 #, python-format -msgid "Not allowed to modify attributes for image %s" +msgid "Resetting backup %s to available (was restoring)." msgstr "" -#: cinder/api/ec2/cloud.py:1603 +#: cinder/backup/manager.py:217 #, python-format -msgid "Couldn't stop instance with in %d sec" +msgid "Resuming delete on backup: %s." msgstr "" -#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#: cinder/backup/manager.py:225 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/__init__.py:43 +#: cinder/backup/manager.py:237 #, python-format -msgid "Caught error: %s" +msgid "" +"Create backup aborted, expected volume status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#: cinder/backup/manager.py:249 #, python-format -msgid "%(url)s returned with HTTP %(status)d" +msgid "" +"Create backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/__init__.py:94 -msgid "Must specify an ExtensionManager class" +#: cinder/backup/manager.py:282 +#, python-format +msgid "Create backup finished. backup: %s." msgstr "" -#: cinder/api/openstack/__init__.py:105 +#: cinder/backup/manager.py:286 #, python-format -msgid "Extended resource: %s" +msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." msgstr "" -#: cinder/api/openstack/__init__.py:130 +#: cinder/backup/manager.py:299 #, python-format msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" +"Restore backup aborted: expected volume status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/__init__.py:135 +#: cinder/backup/manager.py:310 #, python-format -msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgid "" +"Restore backup aborted: expected backup status %(expected_status)s but " +"got %(actual_status)s." msgstr "" -#: cinder/api/openstack/auth.py:90 +#: cinder/backup/manager.py:329 #, python-format -msgid "%(user_id)s could not be found with token '%(token)s'" +msgid "" +"Restore backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/auth.py:134 +#: cinder/backup/manager.py:360 #, python-format -msgid "%(user_id)s must be an admin or a member of %(project_id)s" -msgstr "" - -#: cinder/api/openstack/auth.py:152 -msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgid "" +"Restore backup finished, backup %(backup_id)s restored to volume " +"%(volume_id)s." msgstr "" -#: cinder/api/openstack/auth.py:167 +#: cinder/backup/manager.py:379 #, python-format -msgid "Could not find %s in request." +msgid "Delete backup started, backup: %s." msgstr "" -#: cinder/api/openstack/auth.py:191 +#: cinder/backup/manager.py:386 #, python-format -msgid "Successfully authenticated '%s'" +msgid "" +"Delete_backup aborted, expected backup status %(expected_status)s but got" +" %(actual_status)s." msgstr "" -#: cinder/api/openstack/auth.py:241 -msgid "User not found with provided API key." +#: cinder/backup/manager.py:399 +#, python-format +msgid "" +"Delete backup aborted, the backup service currently configured " +"[%(configured_service)s] is not the backup service that was used to " +"create this backup [%(backup_service)s]." msgstr "" -#: cinder/api/openstack/auth.py:258 +#: cinder/backup/manager.py:422 #, python-format -msgid "Provided API key is valid, but not for user '%(username)s'" +msgid "Delete backup finished, backup %s deleted." msgstr "" -#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 -msgid "limit param must be an integer" +#: cinder/backup/drivers/ceph.py:116 +msgid "" +"rbd striping not supported - ignoring configuration settings for rbd " +"striping" msgstr "" -#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 -msgid "limit param must be positive" +#: cinder/backup/drivers/ceph.py:147 +#, python-format +msgid "invalid user '%s'" msgstr "" -#: cinder/api/openstack/common.py:161 -msgid "offset param must be an integer" +#: cinder/backup/drivers/ceph.py:213 +msgid "backup_id required" msgstr "" -#: cinder/api/openstack/common.py:175 -msgid "offset param must be positive" +#: cinder/backup/drivers/ceph.py:224 +#, python-format +msgid "discarding %(length)s bytes from offset %(offset)s" msgstr "" -#: cinder/api/openstack/common.py:203 +#: cinder/backup/drivers/ceph.py:232 #, python-format -msgid "marker [%s] not found" +msgid "writing zeroes chunk %d" msgstr "" -#: cinder/api/openstack/common.py:243 +#: cinder/backup/drivers/ceph.py:246 #, python-format -msgid "href %s does not contain version" +msgid "transferring data between '%(src)s' and '%(dest)s'" msgstr "" -#: cinder/api/openstack/common.py:278 -msgid "Image metadata limit exceeded" +#: cinder/backup/drivers/ceph.py:250 +#, python-format +msgid "%(chunks)s chunks of %(bytes)s bytes to be transferred" msgstr "" -#: cinder/api/openstack/common.py:295 +#: cinder/backup/drivers/ceph.py:269 #, python-format -msgid "Converting nw_info: %s" +msgid "transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s)" msgstr "" -#: cinder/api/openstack/common.py:305 +#: cinder/backup/drivers/ceph.py:279 #, python-format -msgid "Converted networks: %s" +msgid "transferring remaining %s bytes" msgstr "" -#: cinder/api/openstack/common.py:338 +#: cinder/backup/drivers/ceph.py:295 #, python-format -msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgid "creating base image '%s'" msgstr "" -#: cinder/api/openstack/common.py:341 +#: cinder/backup/drivers/ceph.py:322 cinder/backup/drivers/ceph.py:603 #, python-format -msgid "Instance is in an invalid state for '%(action)s'" +msgid "deleting backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/common.py:421 -msgid "Rejecting snapshot request, snapshots currently disabled" +#: cinder/backup/drivers/ceph.py:325 +msgid "no backup snapshot to delete" msgstr "" -#: cinder/api/openstack/common.py:423 -msgid "Instance snapshots are not permitted at this time." +#: cinder/backup/drivers/ceph.py:361 +#, python-format +msgid "trying diff format name format basename='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:188 +#: cinder/backup/drivers/ceph.py:369 #, python-format -msgid "Loaded extension: %s" +msgid "image %s not found" msgstr "" -#: cinder/api/openstack/extensions.py:225 +#: cinder/backup/drivers/ceph.py:377 #, python-format -msgid "Ext name: %s" +msgid "base image still has %s snapshots so skipping base image delete" msgstr "" -#: cinder/api/openstack/extensions.py:226 +#: cinder/backup/drivers/ceph.py:382 #, python-format -msgid "Ext alias: %s" +msgid "deleting base image='%s'" msgstr "" -#: cinder/api/openstack/extensions.py:227 +#: cinder/backup/drivers/ceph.py:389 #, python-format -msgid "Ext description: %s" +msgid "image busy, retrying %(retries)s more time(s) in %(delay)ss" msgstr "" -#: cinder/api/openstack/extensions.py:229 -#, python-format -msgid "Ext namespace: %s" +#: cinder/backup/drivers/ceph.py:394 +msgid "max retries reached - raising error" msgstr "" -#: cinder/api/openstack/extensions.py:230 +#: cinder/backup/drivers/ceph.py:397 #, python-format -msgid "Ext updated: %s" +msgid "base backup image='%s' deleted)" msgstr "" -#: cinder/api/openstack/extensions.py:232 +#: cinder/backup/drivers/ceph.py:407 #, python-format -msgid "Exception loading extension: %s" +msgid "deleting source snap '%s'" msgstr "" -#: cinder/api/openstack/extensions.py:246 +#: cinder/backup/drivers/ceph.py:453 #, python-format -msgid "Loading extension %s" +msgid "performing differential transfer from '%(src)s' to '%(dest)s'" msgstr "" -#: cinder/api/openstack/extensions.py:252 +#: cinder/backup/drivers/ceph.py:478 #, python-format -msgid "Calling extension factory %s" +msgid "rbd diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "" -#: cinder/api/openstack/extensions.py:264 +#: cinder/backup/drivers/ceph.py:488 #, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgid "image '%s' not found - trying diff format name" msgstr "" -#: cinder/api/openstack/extensions.py:344 +#: cinder/backup/drivers/ceph.py:493 #, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" +msgid "diff format image '%s' not found" msgstr "" -#: cinder/api/openstack/extensions.py:368 +#: cinder/backup/drivers/ceph.py:528 #, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgid "using --from-snap '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 -msgid "cannot understand JSON" +#: cinder/backup/drivers/ceph.py:543 +#, python-format +msgid "source snap '%s' is stale so deleting" msgstr "" -#: cinder/api/openstack/wsgi.py:159 -#: cinder/api/openstack/compute/contrib/hosts.py:86 -msgid "cannot understand XML" +#: cinder/backup/drivers/ceph.py:555 +#, python-format +msgid "" +"snap='%(snap)s' does not exist in base image='%(base)s' - aborting " +"incremental backup" msgstr "" -#: cinder/api/openstack/wsgi.py:543 -msgid "too many body keys" +#: cinder/backup/drivers/ceph.py:566 +#, python-format +msgid "creating backup snapshot='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:582 +#: cinder/backup/drivers/ceph.py:586 #, python-format -msgid "Exception handling resource: %s" +msgid "differential backup transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/wsgi.py:586 -#, python-format -msgid "Fault thrown: %s" +#: cinder/backup/drivers/ceph.py:595 +msgid "differential backup transfer failed" msgstr "" -#: cinder/api/openstack/wsgi.py:589 +#: cinder/backup/drivers/ceph.py:625 #, python-format -msgid "HTTP exception thrown: %s" +msgid "creating base image='%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:697 -msgid "Unrecognized Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:634 +msgid "copying data" msgstr "" -#: cinder/api/openstack/wsgi.py:701 -msgid "No Content-Type provided in request" +#: cinder/backup/drivers/ceph.py:694 +#, python-format +msgid "looking for snapshot of backup base '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:705 -msgid "Empty body provided in request" +#: cinder/backup/drivers/ceph.py:697 +#, python-format +msgid "backup base '%s' has no snapshots" msgstr "" -#: cinder/api/openstack/wsgi.py:816 +#: cinder/backup/drivers/ceph.py:704 #, python-format -msgid "There is no such action: %s" +msgid "backup '%s' has no snapshot" msgstr "" -#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 -#: cinder/api/openstack/compute/server_metadata.py:58 -#: cinder/api/openstack/compute/server_metadata.py:76 -#: cinder/api/openstack/compute/server_metadata.py:101 -#: cinder/api/openstack/compute/server_metadata.py:126 -#: cinder/api/openstack/compute/contrib/admin_actions.py:211 -#: cinder/api/openstack/compute/contrib/console_output.py:52 -msgid "Malformed request body" +#: cinder/backup/drivers/ceph.py:708 +#, python-format +msgid "backup should only have one snapshot but instead has %s" msgstr "" -#: cinder/api/openstack/wsgi.py:829 -msgid "Unsupported Content-Type" +#: cinder/backup/drivers/ceph.py:713 +#, python-format +msgid "found snapshot '%s'" msgstr "" -#: cinder/api/openstack/wsgi.py:841 -msgid "Malformed request url" +#: cinder/backup/drivers/ceph.py:734 +msgid "need non-zero volume size" msgstr "" -#: cinder/api/openstack/wsgi.py:889 +#: cinder/backup/drivers/ceph.py:751 #, python-format -msgid "%(url)s returned a fault: %(e)s" -msgstr "" - -#: cinder/api/openstack/xmlutil.py:265 -msgid "element is not a child" +msgid "Starting backup of volume='%s'" msgstr "" -#: cinder/api/openstack/xmlutil.py:414 -msgid "root element selecting a list" +#: cinder/backup/drivers/ceph.py:764 +msgid "forcing full backup" msgstr "" -#: cinder/api/openstack/xmlutil.py:739 +#: cinder/backup/drivers/ceph.py:776 #, python-format -msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgid "backup '%s' finished." msgstr "" -#: cinder/api/openstack/xmlutil.py:858 -msgid "subclasses must implement construct()!" +#: cinder/backup/drivers/ceph.py:834 +msgid "adjusting restore vol size" msgstr "" -#: cinder/api/openstack/compute/extensions.py:29 -#: cinder/api/openstack/volume/extensions.py:29 -msgid "Initializing extension manager." +#: cinder/backup/drivers/ceph.py:846 +#, python-format +msgid "trying incremental restore from base='%(base)s' snap='%(snap)s'" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:40 -#: cinder/api/openstack/compute/images.py:146 -#: cinder/api/openstack/compute/images.py:161 -msgid "Image not found." +#: cinder/backup/drivers/ceph.py:858 +msgid "differential restore failed, trying full restore" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:79 -msgid "Incorrect request body format" +#: cinder/backup/drivers/ceph.py:869 +#, python-format +msgid "restore transfer completed in %.4fs" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:83 -#: cinder/api/openstack/compute/server_metadata.py:80 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 -#: cinder/api/openstack/compute/contrib/volumetypes.py:188 -msgid "Request body and URI mismatch" +#: cinder/backup/drivers/ceph.py:916 +#, python-format +msgid "rbd has %s extents" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:86 -#: cinder/api/openstack/compute/server_metadata.py:84 -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 -#: cinder/api/openstack/compute/contrib/volumetypes.py:191 -msgid "Request body contains too many items" +#: cinder/backup/drivers/ceph.py:938 +msgid "dest volume is original volume - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/image_metadata.py:111 -msgid "Invalid metadata key" +#: cinder/backup/drivers/ceph.py:959 +msgid "destination has extents - forcing full copy" msgstr "" -#: cinder/api/openstack/compute/ips.py:74 -msgid "Instance does not exist" +#: cinder/backup/drivers/ceph.py:964 +#, python-format +msgid "no restore point found for backup='%s', forcing full copy" msgstr "" -#: cinder/api/openstack/compute/ips.py:97 -msgid "Instance is not a member of specified network" +#: cinder/backup/drivers/ceph.py:995 +msgid "forcing full restore" msgstr "" -#: cinder/api/openstack/compute/limits.py:140 +#: cinder/backup/drivers/ceph.py:1005 #, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." +msgid "starting restore from Ceph backup=%(src)s to volume=%(dest)s" msgstr "" -#: cinder/api/openstack/compute/limits.py:266 -msgid "This request was rate-limited." -msgstr "" - -#: cinder/api/openstack/compute/server_metadata.py:38 -#: cinder/api/openstack/compute/server_metadata.py:122 -#: cinder/api/openstack/compute/server_metadata.py:159 -msgid "Server does not exist" +#: cinder/backup/drivers/ceph.py:1016 +msgid "volume_file does not support fileno() so skipping fsync()" msgstr "" -#: cinder/api/openstack/compute/server_metadata.py:141 -#: cinder/api/openstack/compute/server_metadata.py:152 -msgid "Metadata item was not found" +#: cinder/backup/drivers/ceph.py:1021 +msgid "restore finished successfully." msgstr "" -#: cinder/api/openstack/compute/servers.py:425 +#: cinder/backup/drivers/ceph.py:1023 #, python-format -msgid "Invalid server status: %(status)s" +msgid "restore finished with error - %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:433 -msgid "Invalid changes-since value" +#: cinder/backup/drivers/ceph.py:1029 +#, python-format +msgid "delete started for backup=%s" msgstr "" -#: cinder/api/openstack/compute/servers.py:483 -msgid "Personality file limit exceeded" +#: cinder/backup/drivers/ceph.py:1034 +msgid "rbd image not found but continuing anyway so that db entry can be removed" msgstr "" -#: cinder/api/openstack/compute/servers.py:485 -msgid "Personality file path too long" +#: cinder/backup/drivers/ceph.py:1037 +#, python-format +msgid "delete '%s' finished with warning" msgstr "" -#: cinder/api/openstack/compute/servers.py:487 -msgid "Personality file content too long" +#: cinder/backup/drivers/ceph.py:1039 +#, python-format +msgid "delete '%s' finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:501 -msgid "Server name is not a string or unicode" +#: cinder/backup/drivers/swift.py:106 +#, python-format +msgid "unsupported compression algorithm: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:505 -msgid "Server name is an empty string" +#: cinder/backup/drivers/swift.py:123 +#, python-format +msgid "single_user auth mode enabled, but %(param)s not set" msgstr "" -#: cinder/api/openstack/compute/servers.py:509 -msgid "Server name must be less than 256 characters." +#: cinder/backup/drivers/swift.py:141 +#, python-format +msgid "_check_container_exists: container: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:527 +#: cinder/backup/drivers/swift.py:146 #, python-format -msgid "Bad personality format: missing %s" +msgid "container %s does not exist" msgstr "" -#: cinder/api/openstack/compute/servers.py:530 -msgid "Bad personality format" +#: cinder/backup/drivers/swift.py:151 +#, python-format +msgid "container %s exists" msgstr "" -#: cinder/api/openstack/compute/servers.py:535 +#: cinder/backup/drivers/swift.py:157 #, python-format -msgid "Personality content for %s cannot be decoded" +msgid "_create_container started, container: %(container)s,backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:550 +#: cinder/backup/drivers/swift.py:173 #, python-format -msgid "Bad networks format: network uuid is not in proper format (%s)" +msgid "_generate_swift_object_name_prefix: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:559 +#: cinder/backup/drivers/swift.py:182 #, python-format -msgid "Invalid fixed IP address (%s)" +msgid "generated object list: %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:566 +#: cinder/backup/drivers/swift.py:192 #, python-format -msgid "Duplicate networks (%s) are not allowed" +msgid "" +"_write_metadata started, container name: %(container)s, metadata " +"filename: %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:572 +#: cinder/backup/drivers/swift.py:209 #, python-format -msgid "Bad network format: missing %s" +msgid "" +"error writing metadata file to swift, MD5 of metadata file in swift " +"[%(etag)s] is not the same as MD5 of metadata file sent to swift " +"[%(md5)s]" msgstr "" -#: cinder/api/openstack/compute/servers.py:575 -msgid "Bad networks format" +#: cinder/backup/drivers/swift.py:214 +msgid "_write_metadata finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:587 -msgid "Userdata content cannot be decoded" +#: cinder/backup/drivers/swift.py:219 +#, python-format +msgid "" +"_read_metadata started, container name: %(container)s, metadata filename:" +" %(filename)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:594 -msgid "accessIPv4 is not proper IPv4 format" +#: cinder/backup/drivers/swift.py:224 +#, python-format +msgid "_read_metadata finished (%s)" msgstr "" -#: cinder/api/openstack/compute/servers.py:601 -msgid "accessIPv6 is not proper IPv6 format" -msgstr "" +#: cinder/backup/drivers/swift.py:234 +#, fuzzy, python-format +msgid "volume size %d is invalid." +msgstr "無效的Keypair" -#: cinder/api/openstack/compute/servers.py:633 -msgid "Server name is not defined" +#: cinder/backup/drivers/swift.py:248 +#, python-format +msgid "" +"starting backup of volume: %(volume_id)s to swift, volume size: " +"%(volume_size_bytes)d, swift object names prefix %(object_prefix)s, " +"availability zone: %(availability_zone)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:675 -#: cinder/api/openstack/compute/servers.py:740 -msgid "Invalid flavorRef provided." +#: cinder/backup/drivers/swift.py:271 +msgid "reading chunk of data from volume" msgstr "" -#: cinder/api/openstack/compute/servers.py:737 -msgid "Can not find requested image" +#: cinder/backup/drivers/swift.py:278 +#, python-format +msgid "" +"compressed %(data_size_bytes)d bytes of data to %(comp_size_bytes)d bytes" +" using %(algorithm)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:743 -msgid "Invalid key_name provided." +#: cinder/backup/drivers/swift.py:287 +msgid "not compressing data" msgstr "" -#: cinder/api/openstack/compute/servers.py:829 -#: cinder/api/openstack/compute/servers.py:849 -msgid "Instance has not been resized." +#: cinder/backup/drivers/swift.py:291 +msgid "About to put_object" msgstr "" -#: cinder/api/openstack/compute/servers.py:835 +#: cinder/backup/drivers/swift.py:297 #, python-format -msgid "Error in confirm-resize %s" +msgid "swift MD5 for %(object_name)s: %(etag)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:855 +#: cinder/backup/drivers/swift.py:301 #, python-format -msgid "Error in revert-resize %s" +msgid "backup MD5 for %(object_name)s: %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:868 -msgid "Argument 'type' for reboot is not HARD or SOFT" +#: cinder/backup/drivers/swift.py:304 +#, python-format +msgid "" +"error writing object to swift, MD5 of object in swift %(etag)s is not the" +" same as MD5 of object sent to swift %(md5)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:872 -msgid "Missing argument 'type' for reboot" +#: cinder/backup/drivers/swift.py:312 +msgid "Calling eventlet.sleep(0)" msgstr "" -#: cinder/api/openstack/compute/servers.py:885 +#: cinder/backup/drivers/swift.py:328 cinder/backup/drivers/tsm.py:324 #, python-format -msgid "Error in reboot %s" +msgid "backup %s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:897 -msgid "Unable to locate requested flavor." +#: cinder/backup/drivers/swift.py:345 +#, python-format +msgid "v1 swift volume backup restore of %s started" msgstr "" -#: cinder/api/openstack/compute/servers.py:900 -msgid "Resize requires a change in size." +#: cinder/backup/drivers/swift.py:350 +#, python-format +msgid "metadata_object_names = %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:924 -msgid "Malformed server entity" +#: cinder/backup/drivers/swift.py:356 +msgid "" +"restore_backup aborted, actual swift object list in swift does not match " +"object list stored in metadata" msgstr "" -#: cinder/api/openstack/compute/servers.py:931 -msgid "Missing imageRef attribute" +#: cinder/backup/drivers/swift.py:362 +#, python-format +msgid "" +"restoring object from swift. backup: %(backup_id)s, container: " +"%(container)s, swift object name: %(object_name)s, volume: %(volume_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:940 -msgid "Invalid imageRef provided." +#: cinder/backup/drivers/swift.py:378 +#, python-format +msgid "decompressing data using %s algorithm" msgstr "" -#: cinder/api/openstack/compute/servers.py:949 -msgid "Missing flavorRef attribute" +#: cinder/backup/drivers/swift.py:401 +#, python-format +msgid "v1 swift volume backup restore of %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:962 -msgid "No adminPass was specified" +#: cinder/backup/drivers/swift.py:409 +#, python-format +msgid "" +"starting restore of backup %(object_prefix)s from swift container: " +"%(container)s, to volume %(volume_id)s, backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:966 -#: cinder/api/openstack/compute/servers.py:1144 -msgid "Invalid adminPass" +#: cinder/backup/drivers/swift.py:423 +#, python-format +msgid "Restoring swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:980 -msgid "Unable to parse metadata key/value pairs." +#: cinder/backup/drivers/swift.py:428 +#, python-format +msgid "No support to restore swift backup version %s" msgstr "" -#: cinder/api/openstack/compute/servers.py:993 -msgid "Resize request has invalid 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:432 cinder/backup/drivers/tsm.py:378 +#, python-format +msgid "restore %(backup_id)s to %(volume_id)s finished." msgstr "" -#: cinder/api/openstack/compute/servers.py:996 -msgid "Resize requests require 'flavorRef' attribute." +#: cinder/backup/drivers/swift.py:446 +msgid "swift error while listing objects, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:1014 -#: cinder/api/openstack/compute/contrib/aggregates.py:142 -#: cinder/api/openstack/compute/contrib/networks.py:65 -msgid "Invalid request body" +#: cinder/backup/drivers/swift.py:455 +#, python-format +msgid "swift error while deleting object %s, continuing with delete" msgstr "" -#: cinder/api/openstack/compute/servers.py:1019 -msgid "Could not parse imageRef from request." +#: cinder/backup/drivers/swift.py:458 +#, python-format +msgid "deleted swift object: %(swift_object_name)s in container: %(container)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1071 -msgid "Instance could not be found" +#: cinder/backup/drivers/swift.py:468 cinder/backup/drivers/tsm.py:440 +#, python-format +msgid "delete %s finished" msgstr "" -#: cinder/api/openstack/compute/servers.py:1074 -msgid "Cannot find image for rebuild" +#: cinder/backup/drivers/tsm.py:85 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to create device hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1103 -msgid "createImage entity requires name attribute" +#: cinder/backup/drivers/tsm.py:143 +#, python-format +msgid "" +"backup: %(vol_id)s Failed to obtain backup success notification from " +"server.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1112 -#: cinder/api/openstack/compute/contrib/admin_actions.py:238 -msgid "Invalid metadata" +#: cinder/backup/drivers/tsm.py:173 +#, python-format +msgid "" +"restore: %(vol_id)s Failed.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/servers.py:1167 +#: cinder/backup/drivers/tsm.py:199 #, python-format -msgid "Removing options '%(unk_opt_str)s' from query" +msgid "backup: %(vol_id)s Failed. %(path)s is not a block device." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#: cinder/backup/drivers/tsm.py:206 #, python-format -msgid "Compute.api::pause %s" +msgid "backup: %(vol_id)s Failed. Cannot obtain real path to device %(path)s." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#: cinder/backup/drivers/tsm.py:213 #, python-format -msgid "Compute.api::unpause %s" +msgid "backup: %(vol_id)s Failed. %(path)s is not a file." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#: cinder/backup/drivers/tsm.py:260 #, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::暫停 %s" +msgid "" +"backup: %(vol_id)s Failed to remove backup hardlink from %(vpath)s to " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#: cinder/backup/drivers/tsm.py:286 #, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::繼續 %s" +msgid "" +"starting backup of volume: %(volume_id)s to TSM, volume path: " +"%(volume_path)s," +msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#: cinder/backup/drivers/tsm.py:298 #, python-format -msgid "Error in migrate %s" +msgid "" +"backup: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#: cinder/backup/drivers/tsm.py:308 #, python-format -msgid "Compute.api::reset_network %s" +msgid "" +"backup: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:154 -#: cinder/api/openstack/compute/contrib/admin_actions.py:170 -#: cinder/api/openstack/compute/contrib/admin_actions.py:186 -#: cinder/api/openstack/compute/contrib/multinic.py:41 -#: cinder/api/openstack/compute/contrib/rescue.py:44 -msgid "Server not found" +#: cinder/backup/drivers/tsm.py:338 +#, python-format +msgid "" +"restore: starting restore of backup from TSM to volume %(volume_id)s, " +"backup: %(backup_id)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#: cinder/backup/drivers/tsm.py:352 #, python-format -msgid "Compute.api::inject_network_info %s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc on %(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#: cinder/backup/drivers/tsm.py:362 #, python-format -msgid "Compute.api::lock %s" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments on " +"%(bpath)s.\n" +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#: cinder/backup/drivers/tsm.py:413 #, python-format -msgid "Compute.api::unlock %s" +msgid "" +"delete: %(vol_id)s Failed to run dsmc with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#: cinder/backup/drivers/tsm.py:421 #, python-format -msgid "createBackup entity requires %s attribute" +msgid "" +"restore: %(vol_id)s Failed to run dsmc due to invalid arguments with " +"stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:223 -msgid "Malformed createBackup entity" +#: cinder/backup/drivers/tsm.py:432 +#, python-format +msgid "" +"delete: %(vol_id)s Failed with stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:229 -msgid "createBackup attribute 'rotation' must be an integer" +#: cinder/brick/exception.py:55 +#, python-format +msgid "Exception in string format operation. msg='%s'" msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:244 -#: cinder/api/openstack/compute/contrib/console_output.py:47 -#: cinder/api/openstack/compute/contrib/console_output.py:59 -#: cinder/api/openstack/compute/contrib/consoles.py:49 -#: cinder/api/openstack/compute/contrib/consoles.py:60 -#: cinder/api/openstack/compute/contrib/server_action_list.py:49 -#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 -#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 -msgid "Instance not found" +#: cinder/brick/exception.py:85 +msgid "We are unable to locate any Fibre Channel devices." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:272 -msgid "host and block_migration must be specified." +#: cinder/brick/exception.py:89 +msgid "Unable to find a Fibre Channel volume device." msgstr "" -#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#: cinder/brick/exception.py:93 #, python-format -msgid "Live migration of instance %(id)s to host %(host)s failed" +msgid "Volume device not found at %(device)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#: cinder/brick/exception.py:97 #, python-format -msgid "" -"Cannot create aggregate with name %(name)s and availability zone " -"%(avail_zone)s" +msgid "Unable to find Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#: cinder/brick/exception.py:101 #, python-format -msgid "Cannot show aggregate: %(id)s" +msgid "Failed to create Volume Group: %(vg_name)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#: cinder/brick/exception.py:105 #, python-format -msgid "Cannot update aggregate: %(id)s" +msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#: cinder/brick/exception.py:109 #, python-format -msgid "Cannot delete aggregate: %(id)s" +msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#: cinder/brick/exception.py:113 #, python-format -msgid "Aggregates does not have %s action" +msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:152 -#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#: cinder/brick/exception.py:117 #, python-format -msgid "Cannot add host %(host)s in aggregate %(id)s" +msgid "Connect to volume via protocol %(protocol)s not supported." msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:171 -#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#: cinder/brick/initiator/connector.py:127 #, python-format -msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgid "Invalid InitiatorConnector protocol specified %(protocol)s" msgstr "" -#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#: cinder/brick/initiator/connector.py:140 #, python-format -msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" -msgstr "" - -#: cinder/api/openstack/compute/contrib/certificates.py:75 -msgid "Only root certificate can be retrieved." +msgid "Failed to access the device on the path %(path)s: %(error)s %(info)s." msgstr "" -#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +#: cinder/brick/initiator/connector.py:229 +#, python-format msgid "" -"Unable to claim IP for VPN instances, ensure it isn't running, and try " -"again in a few minutes" -msgstr "" - -#: cinder/api/openstack/compute/contrib/consoles.py:44 -msgid "Missing type specification" +"ISCSI volume not yet found at: %(host_device)s. Will rescan & retry. Try" +" number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/consoles.py:56 -msgid "Invalid type specification" -msgstr "" - -#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#: cinder/brick/initiator/connector.py:242 #, python-format -msgid "%s must be either 'MANUAL' or 'AUTO'." -msgstr "" - -#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 -#: cinder/api/openstack/compute/contrib/extended_status.py:61 -msgid "Server not found." +msgid "Found iSCSI node %(host_device)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 -#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 -msgid "Flavor not found." +#: cinder/brick/initiator/connector.py:317 +#, python-format +msgid "Could not find the iSCSI Initiator File %s" msgstr "" -#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 -#: cinder/api/openstack/compute/contrib/volumetypes.py:158 -msgid "No Request Body" +#: cinder/brick/initiator/connector.py:609 +msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#: cinder/brick/initiator/connector.py:619 #, python-format -msgid "No more floating ips in pool %s." -msgstr "" - -#: cinder/api/openstack/compute/contrib/floating_ips.py:161 -msgid "No more floating ips available." +msgid "Looking for Fibre Channel dev %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:201 -#: cinder/api/openstack/compute/contrib/floating_ips.py:230 -#: cinder/api/openstack/compute/contrib/security_groups.py:571 -#: cinder/api/openstack/compute/contrib/security_groups.py:604 -msgid "Missing parameter dict" +#: cinder/brick/initiator/connector.py:629 +msgid "Fibre Channel volume device not found." msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:204 -#: cinder/api/openstack/compute/contrib/floating_ips.py:233 -msgid "Address not specified" +#: cinder/brick/initiator/connector.py:633 +#, python-format +msgid "Fibre volume not yet found. Will rescan & retry. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:213 -msgid "No fixed ips associated to instance" +#: cinder/brick/initiator/connector.py:649 +#, python-format +msgid "Found Fibre Channel volume %(name)s (after %(tries)s rescans)" msgstr "" -#: cinder/api/openstack/compute/contrib/floating_ips.py:216 -msgid "Associate floating ip failed" +#: cinder/brick/initiator/connector.py:658 +#, python-format +msgid "Multipath device discovered %(device)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:144 +#: cinder/brick/initiator/connector.py:776 #, python-format -msgid "Invalid status: '%s'" +msgid "AoE volume not yet found at: %(path)s. Try number: %(tries)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:148 +#: cinder/brick/initiator/connector.py:789 #, python-format -msgid "Invalid mode: '%s'" +msgid "Found AoE device %(path)s (after %(tries)s rediscover)" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:152 +#: cinder/brick/initiator/connector.py:815 #, python-format -msgid "Invalid update setting: '%s'" +msgid "aoe-discover: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:170 +#: cinder/brick/initiator/connector.py:825 #, python-format -msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgid "aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:181 +#: cinder/brick/initiator/connector.py:834 #, python-format -msgid "Setting host %(host)s to %(state)s." +msgid "aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s" msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:230 -msgid "Describe-resource is admin only functionality" +#: cinder/brick/initiator/connector.py:858 +msgid "" +"Connection details not present. RemoteFsClient may not initialize " +"properly." msgstr "" -#: cinder/api/openstack/compute/contrib/hosts.py:238 -msgid "Host not found" +#: cinder/brick/initiator/connector.py:915 +msgid "Invalid connection_properties specified no device_path attribute" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:70 -msgid "Keypair name contains unsafe characters" +#: cinder/brick/initiator/linuxfc.py:50 cinder/brick/initiator/linuxfc.py:56 +msgid "systool is not installed" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:95 -msgid "Keypair name must be between 1 and 255 characters long" +#: cinder/brick/initiator/linuxscsi.py:99 +#: cinder/brick/initiator/linuxscsi.py:107 +#: cinder/brick/initiator/linuxscsi.py:124 +#, python-format +msgid "multipath call failed exit (%(code)s)" msgstr "" -#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#: cinder/brick/initiator/linuxscsi.py:145 #, python-format -msgid "Key pair '%s' already exists." +msgid "Couldn't find multipath device %(line)s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:52 -msgid "Missing 'networkId' argument for addFixedIp" +#: cinder/brick/initiator/linuxscsi.py:149 +#, python-format +msgid "Found multipath device = %(mdev)s" msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:68 -msgid "Missing 'address' argument for removeFixedIp" +#: cinder/brick/iscsi/iscsi.py:140 +msgid "Attempting recreate of backing lun..." msgstr "" -#: cinder/api/openstack/compute/contrib/multinic.py:77 +#: cinder/brick/iscsi/iscsi.py:158 #, python-format -msgid "Unable to find address %r" +msgid "" +"Failed to recover attempt to create iscsi backing lun for volume " +"id:%(vol_id)s: %(e)s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:62 +#: cinder/brick/iscsi/iscsi.py:177 #, python-format -msgid "Network does not have %s action" +msgid "Creating iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:70 +#: cinder/brick/iscsi/iscsi.py:184 #, python-format -msgid "Disassociating network with id %s" +msgid "" +"Created volume path %(vp)s,\n" +"content: %(vc)%" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:74 -#: cinder/api/openstack/compute/contrib/networks.py:91 -#: cinder/api/openstack/compute/contrib/networks.py:101 -msgid "Network not found" -msgstr "" +#: cinder/brick/iscsi/iscsi.py:216 cinder/brick/iscsi/iscsi.py:365 +#, fuzzy, python-format +msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "找不到Volume %s" -#: cinder/api/openstack/compute/contrib/networks.py:87 +#: cinder/brick/iscsi/iscsi.py:227 #, python-format -msgid "Showing network with id %s" +msgid "" +"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure " +"your tgtd config file contains 'include %(volumes_dir)s/*'" msgstr "" -#: cinder/api/openstack/compute/contrib/networks.py:97 +#: cinder/brick/iscsi/iscsi.py:258 #, python-format -msgid "Deleting network with id %s" +msgid "Removing iscsi_target for: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 -msgid "Malformed scheduler_hints attribute" +#: cinder/brick/iscsi/iscsi.py:262 +#, python-format +msgid "Volume path %s does not exist, nothing to remove." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:222 -msgid "Security group id should be integer" -msgstr "" +#: cinder/brick/iscsi/iscsi.py:280 +#, fuzzy, python-format +msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" +msgstr "找不到Volume %s" -#: cinder/api/openstack/compute/contrib/security_groups.py:243 -msgid "Security group is still in use" +#: cinder/brick/iscsi/iscsi.py:290 cinder/brick/iscsi/iscsi.py:550 +msgid "valid iqn needed for show_target" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#: cinder/brick/iscsi/iscsi.py:375 #, python-format -msgid "Security group %s already exists" +msgid "Removing iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:315 -#, python-format -msgid "Security group %s is not a string or unicode" +#: cinder/brick/iscsi/iscsi.py:469 +msgid "cinder-rtstool is not installed correctly" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#: cinder/brick/iscsi/iscsi.py:489 #, python-format -msgid "Security group %s cannot be empty." +msgid "Creating iscsi_target for volume: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#: cinder/brick/iscsi/iscsi.py:513 cinder/brick/iscsi/iscsi.py:522 #, python-format -msgid "Security group %s should not be greater than 255 characters." -msgstr "" - -#: cinder/api/openstack/compute/contrib/security_groups.py:348 -msgid "Parent group id is not integer" +msgid "Failed to create iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#: cinder/brick/iscsi/iscsi.py:532 #, python-format -msgid "Security group (%s) not found" +msgid "Removing iscsi_target: %s" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:369 -msgid "Not enough parameters to build a valid rule." -msgstr "" - -#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#: cinder/brick/iscsi/iscsi.py:542 #, python-format -msgid "This rule already exists in group %s" -msgstr "" - -#: cinder/api/openstack/compute/contrib/security_groups.py:414 -msgid "Parent or group id is not integer" +msgid "Failed to remove iscsi target for volume id:%s." msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:507 -msgid "Rule id is not integer" -msgstr "" - -#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#: cinder/brick/iscsi/iscsi.py:571 #, python-format -msgid "Rule (%s) not found" -msgstr "" - -#: cinder/api/openstack/compute/contrib/security_groups.py:574 -#: cinder/api/openstack/compute/contrib/security_groups.py:607 -msgid "Security group not specified" +msgid "Failed to add initiator iqn %s to target" msgstr "" -#: cinder/api/openstack/compute/contrib/security_groups.py:578 -#: cinder/api/openstack/compute/contrib/security_groups.py:611 -msgid "Security group name cannot be empty" +#: cinder/brick/local_dev/lvm.py:75 +msgid "Error creating Volume Group" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#: cinder/brick/local_dev/lvm.py:76 cinder/brick/local_dev/lvm.py:158 +#: cinder/brick/local_dev/lvm.py:474 cinder/brick/local_dev/lvm.py:503 +#: cinder/brick/local_dev/lvm.py:546 cinder/brick/local_dev/lvm.py:609 #, python-format -msgid "start instance %r" +msgid "Cmd :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#: cinder/brick/local_dev/lvm.py:77 cinder/brick/local_dev/lvm.py:159 +#: cinder/brick/local_dev/lvm.py:475 cinder/brick/local_dev/lvm.py:504 +#: cinder/brick/local_dev/lvm.py:547 cinder/brick/local_dev/lvm.py:610 #, python-format -msgid "stop instance %r" +msgid "StdOut :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:73 -#: cinder/api/openstack/volume/volumes.py:106 +#: cinder/brick/local_dev/lvm.py:78 cinder/brick/local_dev/lvm.py:160 +#: cinder/brick/local_dev/lvm.py:476 cinder/brick/local_dev/lvm.py:505 +#: cinder/brick/local_dev/lvm.py:548 cinder/brick/local_dev/lvm.py:611 #, python-format -msgid "vol=%s" +msgid "StdErr :%s" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:146 -#: cinder/api/openstack/volume/volumes.py:184 -#, python-format -msgid "Delete volume with id: %s" +#: cinder/brick/local_dev/lvm.py:82 +#, fuzzy, python-format +msgid "Unable to locate Volume Group %s" +msgstr "找不到Volume %s" + +#: cinder/brick/local_dev/lvm.py:157 +msgid "Error querying thin pool about data_percent" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:329 -#, python-format -msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +#: cinder/brick/local_dev/lvm.py:370 +#, fuzzy, python-format +msgid "Unable to find VG: %s" +msgstr "無法卸載 Volume %s" + +#: cinder/brick/local_dev/lvm.py:420 +msgid "" +"Requested to setup thin provisioning, however current LVM version does " +"not support it." msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:481 -#: cinder/api/openstack/volume/snapshots.py:110 -#, python-format -msgid "Delete snapshot with id: %s" +#: cinder/brick/local_dev/lvm.py:473 +msgid "Error creating Volume" msgstr "" -#: cinder/api/openstack/compute/contrib/volumes.py:524 -#: cinder/api/openstack/volume/snapshots.py:150 -#, python-format -msgid "Create snapshot from volume %s" +#: cinder/brick/local_dev/lvm.py:489 +#, fuzzy, python-format +msgid "Unable to find LV: %s" +msgstr "無法卸載 Volume %s" + +#: cinder/brick/local_dev/lvm.py:502 +msgid "Error creating snapshot" msgstr "" -#: cinder/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: cinder/brick/local_dev/lvm.py:545 +msgid "Error activating LV" msgstr "" -#: cinder/auth/ldapdriver.py:650 +#: cinder/brick/local_dev/lvm.py:563 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +msgid "Error reported running lvremove: CMD: %(command)s, RESPONSE: %(response)s" msgstr "" -#: cinder/auth/manager.py:298 -#, python-format -msgid "Looking up user: %r" +#: cinder/brick/local_dev/lvm.py:568 +msgid "Attempting udev settle and retry of lvremove..." msgstr "" -#: cinder/auth/manager.py:302 -#, python-format -msgid "Failed authorization for access key %s" +#: cinder/brick/local_dev/lvm.py:608 +msgid "Error extending Volume" msgstr "" -#: cinder/auth/manager.py:308 -#, python-format -msgid "Using project name = user name (%s)" +#: cinder/brick/remotefs/remotefs.py:39 +msgid "nfs_mount_point_base required" msgstr "" -#: cinder/auth/manager.py:315 -#, python-format -msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +#: cinder/brick/remotefs/remotefs.py:45 +msgid "glusterfs_mount_point_base required" msgstr "" -#: cinder/auth/manager.py:324 +#: cinder/brick/remotefs/remotefs.py:86 #, python-format -msgid "" -"Failed authorization: user %(uname)s not admin and not member of project " -"%(pjname)s" +msgid "Already mounted: %s" msgstr "" -#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 -#, python-format -msgid "user.secret: %s" +#: cinder/common/config.py:125 +msgid "Deploy v1 of the Cinder API." msgstr "" -#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 -#, python-format -msgid "expected_signature: %s" +#: cinder/common/config.py:128 +msgid "Deploy v2 of the Cinder API." msgstr "" -#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 -#, python-format -msgid "signature: %s" +#: cinder/common/sqlalchemyutils.py:66 +#: cinder/openstack/common/db/sqlalchemy/utils.py:72 +msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" -#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 -#, python-format -msgid "Invalid signature for user %s" +#: cinder/common/sqlalchemyutils.py:114 +#: cinder/openstack/common/db/sqlalchemy/utils.py:120 +msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: cinder/auth/manager.py:353 +#: cinder/compute/nova.py:97 #, python-format -msgid "host_only_signature: %s" +msgid "Novaclient connection created using URL: %s" msgstr "" -#: cinder/auth/manager.py:449 -msgid "Must specify project" +#: cinder/db/sqlalchemy/api.py:63 +msgid "Use of empty request context is deprecated" msgstr "" -#: cinder/auth/manager.py:490 +#: cinder/db/sqlalchemy/api.py:190 #, python-format -msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: cinder/auth/manager.py:493 +#: cinder/db/sqlalchemy/api.py:843 #, python-format -msgid "Adding sitewide role %(role)s to user %(uid)s" +msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: cinder/auth/manager.py:519 +#: cinder/db/sqlalchemy/api.py:1842 #, python-format -msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgid "VolumeType %s deletion failed, VolumeType in use." msgstr "" -#: cinder/auth/manager.py:522 +#: cinder/db/sqlalchemy/api.py:2530 #, python-format -msgid "Removing sitewide role %(role)s from user %(uid)s" +msgid "No backup with id %s" msgstr "" -#: cinder/auth/manager.py:595 -#, python-format -msgid "Created project %(name)s with manager %(manager_user)s" -msgstr "" +#: cinder/db/sqlalchemy/api.py:2615 +#, fuzzy +msgid "Volume must be available" +msgstr "Volume 狀態需要可被使用" -#: cinder/auth/manager.py:613 +#: cinder/db/sqlalchemy/api.py:2639 #, python-format -msgid "modifying project %s" +msgid "Volume in unexpected state %s, expected awaiting-transfer" msgstr "" -#: cinder/auth/manager.py:625 +#: cinder/db/sqlalchemy/api.py:2662 #, python-format -msgid "Adding user %(uid)s to project %(pid)s" +msgid "" +"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " +"%(status)s, expected awaiting-transfer" msgstr "" -#: cinder/auth/manager.py:646 -#, python-format -msgid "Remove user %(uid)s from project %(pid)s" +#: cinder/db/sqlalchemy/migration.py:37 +msgid "version should be an integer" msgstr "" -#: cinder/auth/manager.py:676 -#, python-format -msgid "Deleting project %s" +#: cinder/db/sqlalchemy/migration.py:64 +msgid "Upgrade DB using Essex release first." msgstr "" -#: cinder/auth/manager.py:734 -#, python-format -msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:240 +msgid "Exception while creating table." msgstr "" -#: cinder/auth/manager.py:743 -#, python-format -msgid "Deleting user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py:269 +msgid "Downgrade from initial Cinder install is unsupported." msgstr "" -#: cinder/auth/manager.py:753 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:49 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:74 +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:105 +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:45 +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:48 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:46 #, python-format -msgid "Access Key change for user %s" +msgid "Table |%s| not created!" msgstr "" -#: cinder/auth/manager.py:755 -#, python-format -msgid "Secret Key change for user %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:127 +msgid "Dropping foreign key reservations_ibfk_1 failed." msgstr "" -#: cinder/auth/manager.py:757 -#, python-format -msgid "Admin status set to %(admin)r for user %(uid)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:133 +msgid "quota_classes table not dropped" msgstr "" -#: cinder/auth/manager.py:802 -#, python-format -msgid "No vpn data for project %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:140 +msgid "quota_usages table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:46 -msgid "Instance type for vpn instances" +#: cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py:147 +msgid "reservations table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:49 -msgid "Template for cloudpipe instance boot script" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:60 +msgid "Exception while creating table 'volume_glance_metadata'" msgstr "" -#: cinder/cloudpipe/pipelib.py:52 -msgid "Network to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py:75 +msgid "volume_glance_metadata table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:55 -msgid "Netmask to push into openvpn config" +#: cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py:68 +msgid "backups table not dropped" msgstr "" -#: cinder/cloudpipe/pipelib.py:107 -#, python-format -msgid "Launching VPN for %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py:58 +msgid "snapshot_metadata table not dropped" msgstr "" -#: cinder/compute/api.py:141 -msgid "No compute host specified" +#: cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py:61 +msgid "transfers table not dropped" msgstr "" -#: cinder/compute/api.py:144 -#, python-format -msgid "Unable to find host for Instance %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:31 +msgid "migrations table not dropped" msgstr "" -#: cinder/compute/api.py:192 +#: cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py:61 #, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" +msgid "Table |%s| not created" msgstr "" -#: cinder/compute/api.py:203 +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:37 #, python-format -msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgid "Exception while dropping table %s." msgstr "" -#: cinder/compute/api.py:257 -msgid "Cannot run any more instances of this type." +#: cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py:100 +#, python-format +msgid "Exception while creating table %s." msgstr "" -#: cinder/compute/api.py:259 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:34 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:43 +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:49 #, python-format -msgid "Can only run %s more instances of this type." +msgid "Column |%s| not created!" msgstr "" -#: cinder/compute/api.py:261 -#, python-format -msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:92 +msgid "encryption_key_id column not dropped from volumes" msgstr "" -#: cinder/compute/api.py:310 -msgid "Creating a raw instance" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:100 +msgid "encryption_key_id column not dropped from snapshots" msgstr "" -#: cinder/compute/api.py:312 -#, python-format -msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:105 +msgid "volume_type_id column not dropped from snapshots" msgstr "" -#: cinder/compute/api.py:383 -#, python-format -msgid "Going to run %s instances..." +#: cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py:113 +msgid "encryption table not dropped" msgstr "" -#: cinder/compute/api.py:447 -#, python-format -msgid "bdm %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:49 +msgid "Table quality_of_service_specs not created!" msgstr "" -#: cinder/compute/api.py:474 -#, python-format -msgid "block_device_mapping %s" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:60 +msgid "Added qos_specs_id column to volume type table failed." msgstr "" -#: cinder/compute/api.py:591 -#, python-format -msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:85 +msgid "Dropping foreign key volume_types_ibfk_1 failed" msgstr "" -#: cinder/compute/api.py:871 -msgid "Going to try to soft delete instance" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:93 +msgid "Dropping qos_specs_id column failed." msgstr "" -#: cinder/compute/api.py:891 -msgid "No host for instance, deleting immediately" +#: cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py:100 +msgid "Dropping quality_of_service_specs table failed." msgstr "" -#: cinder/compute/api.py:939 -msgid "Going to try to terminate instance" +#: cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py:59 +msgid "volume_admin_metadata table not dropped" msgstr "" -#: cinder/compute/api.py:977 -msgid "Going to try to stop instance" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:48 +msgid "" +"Found existing 'default' entries in the quota_classes table. Skipping " +"insertion of default values." msgstr "" -#: cinder/compute/api.py:996 -msgid "Going to try to start instance" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:72 +msgid "Added default quota class data into the DB." msgstr "" -#: cinder/compute/api.py:1000 -#, python-format -msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +#: cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py:74 +msgid "Default quota class data not inserted into the DB." msgstr "" -#: cinder/compute/api.py:1071 cinder/volume/api.py:173 -#: cinder/volume/volume_types.py:64 +#: cinder/image/glance.py:161 cinder/image/glance.py:169 #, python-format -msgid "Searching by: %s" +msgid "Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." msgstr "" -#: cinder/compute/api.py:1201 -#, python-format -msgid "Image type not recognized %s" +#: cinder/image/image_utils.py:94 cinder/image/image_utils.py:199 +msgid "'qemu-img info' parsing failed." msgstr "" -#: cinder/compute/api.py:1369 -msgid "flavor_id is None. Assuming migration." +#: cinder/image/image_utils.py:101 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: cinder/compute/api.py:1377 +#: cinder/image/image_utils.py:109 cinder/image/image_utils.py:192 #, python-format msgid "" -"Old instance type %(current_instance_type_name)s, new instance type " -"%(new_instance_type_name)s" +"Size is %(image_size)dGB and doesn't fit in a volume of size " +"%(volume_size)dGB." msgstr "" -#: cinder/compute/api.py:1644 +#: cinder/image/image_utils.py:157 #, python-format -msgid "multiple fixedips exist, using the first: %s" +msgid "" +"qemu-img is not installed and image is of type %s. Only RAW images can " +"be used if qemu-img is not installed." msgstr "" -#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 -msgid "create arguments must be positive integers" +#: cinder/image/image_utils.py:164 +msgid "" +"qemu-img is not installed and the disk format is not specified. Only RAW" +" images can be used if qemu-img is not installed." msgstr "" -#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#: cinder/image/image_utils.py:178 #, python-format -msgid "DB error: %s" +msgid "Copying image from %(tmp)s to volume %(dest)s - size: %(size)s" msgstr "" -#: cinder/compute/instance_types.py:86 +#: cinder/image/image_utils.py:206 #, python-format -msgid "Instance type %s not found for deletion" +msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "" -#: cinder/compute/manager.py:138 +#: cinder/image/image_utils.py:224 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "" -#: cinder/compute/manager.py:140 +#: cinder/image/image_utils.py:260 #, python-format -msgid "" -"check_instance_lock: arguments: |%(self)s| |%(context)s| " -"|%(instance_uuid)s|" +msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "" -#: cinder/compute/manager.py:144 -#, python-format -msgid "check_instance_lock: locked: |%s|" +#: cinder/keymgr/conf_key_mgr.py:78 +msgid "" +"config option keymgr.fixed_key has not been defined: some operations may " +"fail unexpectedly" msgstr "" -#: cinder/compute/manager.py:146 -#, python-format -msgid "check_instance_lock: admin: |%s|" +#: cinder/keymgr/conf_key_mgr.py:80 +msgid "keymgr.fixed_key not defined" msgstr "" -#: cinder/compute/manager.py:151 +#: cinder/keymgr/conf_key_mgr.py:134 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "Not deleting key %s" msgstr "" -#: cinder/compute/manager.py:155 +#: cinder/openstack/common/eventlet_backdoor.py:140 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: cinder/compute/manager.py:201 +#: cinder/openstack/common/excutils.py:48 #, python-format -msgid "Unable to load the virtualization driver: %s" +msgid "Original exception being dropped: %s" msgstr "" -#: cinder/compute/manager.py:223 +#: cinder/openstack/common/fileutils.py:64 #, python-format -msgid "" -"Instance %(instance_uuid)s has been destroyed from under us while trying " -"to set it to ERROR" +msgid "Reloading cached file %s" msgstr "" -#: cinder/compute/manager.py:240 -#, python-format -msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +#: cinder/openstack/common/gettextutils.py:252 +msgid "Message objects do not support addition." msgstr "" -#: cinder/compute/manager.py:245 -msgid "Rebooting instance after cinder-compute restart." +#: cinder/openstack/common/gettextutils.py:261 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." msgstr "" -#: cinder/compute/manager.py:255 -msgid "Hypervisor driver does not support firewall rules" +#: cinder/openstack/common/imageutils.py:96 +msgid "Snapshot list encountered but no header found!" msgstr "" -#: cinder/compute/manager.py:260 -msgid "Checking state" +#: cinder/openstack/common/lockutils.py:102 +#, python-format +msgid "Could not release the acquired lock `%s`" msgstr "" -#: cinder/compute/manager.py:329 +#: cinder/openstack/common/lockutils.py:189 #, python-format -msgid "Setting up bdm %s" +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:400 +#: cinder/openstack/common/lockutils.py:200 #, python-format -msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:406 +#: cinder/openstack/common/lockutils.py:227 #, python-format -msgid "Exception encountered while terminating the instance %s" +msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:444 +#: cinder/openstack/common/lockutils.py:235 #, python-format -msgid "Instance %s not found." +msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..." msgstr "" -#: cinder/compute/manager.py:480 -msgid "Instance has already been created" +#: cinder/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" msgstr "" -#: cinder/compute/manager.py:523 +#: cinder/openstack/common/log.py:402 #, python-format -msgid "" -"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " -"allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: cinder/compute/manager.py:528 +#: cinder/openstack/common/log.py:453 #, python-format -msgid "" -"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " -"size %(allowed_size_bytes)d" +msgid "syslog facility must be one of: %s" msgstr "" -#: cinder/compute/manager.py:538 -msgid "Starting instance..." +#: cinder/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: cinder/compute/manager.py:548 -msgid "Skipping network allocation for instance" +#: cinder/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" msgstr "" -#: cinder/compute/manager.py:561 -msgid "Instance failed network setup" +#: cinder/openstack/common/loopingcall.py:89 +#: cinder/tests/brick/test_brick_connector.py:466 +msgid "in fixed duration looping call" msgstr "" -#: cinder/compute/manager.py:565 +#: cinder/openstack/common/loopingcall.py:129 #, python-format -msgid "Instance network_info: |%s|" -msgstr "" - -#: cinder/compute/manager.py:578 -msgid "Instance failed block device setup" +msgid "Dynamic looping call sleeping for %.02f seconds" msgstr "" -#: cinder/compute/manager.py:594 -msgid "Instance failed to spawn" +#: cinder/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" msgstr "" -#: cinder/compute/manager.py:615 -msgid "Deallocating network for instance" +#: cinder/openstack/common/periodic_task.py:43 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" -#: cinder/compute/manager.py:672 +#: cinder/openstack/common/periodic_task.py:134 #, python-format -msgid "%(action_str)s instance" +msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: cinder/compute/manager.py:699 +#: cinder/openstack/common/periodic_task.py:139 #, python-format -msgid "Ignoring DiskNotFound: %s" +msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" -#: cinder/compute/manager.py:708 +#: cinder/openstack/common/periodic_task.py:177 #, python-format -msgid "terminating bdm %s" +msgid "Running periodic task %(full_task_name)s" msgstr "" -#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 -#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#: cinder/openstack/common/periodic_task.py:186 #, python-format -msgid "%s. Setting instance vm_state to ERROR" +msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: cinder/compute/manager.py:811 +#: cinder/openstack/common/policy.py:149 #, python-format msgid "" -"Cannot rebuild instance [%(instance_uuid)s], because the given image does" -" not exist." +"Inheritance-based rules are deprecated; use the default brain instead of " +"%s." msgstr "" -#: cinder/compute/manager.py:816 +#: cinder/openstack/common/policy.py:163 #, python-format -msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgid "Failed to understand rule %(match)r" msgstr "" -#: cinder/compute/manager.py:823 +#: cinder/openstack/common/policy.py:173 #, python-format -msgid "Rebuilding instance %s" +msgid "Inheritance-based rules are deprecated; update _check_%s" msgstr "" -#: cinder/compute/manager.py:876 +#: cinder/openstack/common/policy.py:180 #, python-format -msgid "Rebooting instance %s" +msgid "No handler for matches of kind %s" msgstr "" -#: cinder/compute/manager.py:891 +#: cinder/openstack/common/processutils.py:127 #, python-format -msgid "" -"trying to reboot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Got unknown keyword args to utils.execute: %r" msgstr "" -#: cinder/compute/manager.py:933 +#: cinder/openstack/common/processutils.py:142 #, python-format -msgid "instance %s: snapshotting" +msgid "Running cmd (subprocess): %s" msgstr "" -#: cinder/compute/manager.py:939 +#: cinder/openstack/common/processutils.py:167 +#: cinder/openstack/common/processutils.py:239 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:345 #, python-format -msgid "" -"trying to snapshot a non-running instance: %(instance_uuid)s (state: " -"%(state)s expected: %(running)s)" +msgid "Result was %s" msgstr "" -#: cinder/compute/manager.py:995 +#: cinder/openstack/common/processutils.py:179 #, python-format -msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgid "%r failed. Retrying." msgstr "" -#: cinder/compute/manager.py:1001 +#: cinder/openstack/common/processutils.py:218 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:318 #, python-format -msgid "Rotating out %d backups" +msgid "Running cmd (SSH): %s" msgstr "" -#: cinder/compute/manager.py:1005 -#, python-format -msgid "Deleting image %s" +#: cinder/openstack/common/processutils.py:220 +msgid "Environment not supported over SSH" msgstr "" -#: cinder/compute/manager.py:1035 -#, python-format -msgid "Failed to set admin password. Instance %s is not running" +#: cinder/openstack/common/processutils.py:224 +msgid "process_input not supported over SSH" msgstr "" -#: cinder/compute/manager.py:1041 +#: cinder/openstack/common/service.py:175 +#: cinder/openstack/common/service.py:269 #, python-format -msgid "Instance %s: Root password set" +msgid "Caught %s, exiting" msgstr "" -#: cinder/compute/manager.py:1050 -msgid "set_admin_password is not implemented by this driver." +#: cinder/openstack/common/service.py:187 +msgid "Exception during rpc cleanup." msgstr "" -#: cinder/compute/manager.py:1064 -msgid "Error setting admin password" +#: cinder/openstack/common/service.py:238 +msgid "Parent process has died unexpectedly, exiting" msgstr "" -#: cinder/compute/manager.py:1079 -#, python-format -msgid "" -"trying to inject a file into a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +#: cinder/openstack/common/service.py:275 +msgid "Unhandled exception" msgstr "" -#: cinder/compute/manager.py:1084 -#, python-format -msgid "instance %(instance_uuid)s: injecting file to %(path)s" +#: cinder/openstack/common/service.py:308 +msgid "Forking too fast, sleeping" msgstr "" -#: cinder/compute/manager.py:1098 +#: cinder/openstack/common/service.py:327 #, python-format -msgid "" -"trying to update agent on a non-running instance: %(instance_uuid)s " -"(state: %(current_power_state)s expected: %(expected_state)s)" +msgid "Started child %d" msgstr "" -#: cinder/compute/manager.py:1103 +#: cinder/openstack/common/service.py:337 #, python-format -msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgid "Starting %d workers" msgstr "" -#: cinder/compute/manager.py:1116 +#: cinder/openstack/common/service.py:354 #, python-format -msgid "instance %s: rescuing" +msgid "Child %(pid)d killed by signal %(sig)d" msgstr "" -#: cinder/compute/manager.py:1141 +#: cinder/openstack/common/service.py:358 #, python-format -msgid "instance %s: unrescuing" +msgid "Child %(pid)s exited with status %(code)d" msgstr "" -#: cinder/compute/manager.py:1270 -msgid "destination same as source!" +#: cinder/openstack/common/service.py:362 +#, python-format +msgid "pid %d not in child list" msgstr "" -#: cinder/compute/manager.py:1287 +#: cinder/openstack/common/service.py:392 #, python-format -msgid "instance %s: migrating" +msgid "Caught %s, stopping children" msgstr "" -#: cinder/compute/manager.py:1471 +#: cinder/openstack/common/service.py:410 #, python-format -msgid "instance %s: pausing" +msgid "Waiting on %d children to exit" msgstr "" -#: cinder/compute/manager.py:1489 +#: cinder/openstack/common/sslutils.py:98 #, python-format -msgid "instance %s: unpausing" +msgid "Invalid SSL version : %s" msgstr "" -#: cinder/compute/manager.py:1525 +#: cinder/openstack/common/strutils.py:86 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: cinder/compute/manager.py:1534 +#: cinder/openstack/common/strutils.py:182 #, python-format -msgid "instance %s: suspending" +msgid "Invalid string format: %s" msgstr "" -#: cinder/compute/manager.py:1556 +#: cinder/openstack/common/strutils.py:189 #, python-format -msgid "instance %s: resuming" +msgid "Unknown byte multiplier: %s" msgstr "" -#: cinder/compute/manager.py:1579 +#: cinder/openstack/common/versionutils.py:69 #, python-format -msgid "instance %s: locking" +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." msgstr "" -#: cinder/compute/manager.py:1588 +#: cinder/openstack/common/versionutils.py:73 #, python-format -msgid "instance %s: unlocking" +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." msgstr "" -#: cinder/compute/manager.py:1596 -#, python-format -msgid "instance %s: getting locked state" +#: cinder/openstack/common/crypto/utils.py:29 +msgid "An unknown error occurred in crypto utils." msgstr "" -#: cinder/compute/manager.py:1606 +#: cinder/openstack/common/crypto/utils.py:36 #, python-format -msgid "instance %s: reset network" +msgid "Block size of %(given)d is too big, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1614 +#: cinder/openstack/common/crypto/utils.py:45 #, python-format -msgid "instance %s: inject network info" +msgid "Length of %(given)d is too long, max = %(maximum)d" msgstr "" -#: cinder/compute/manager.py:1618 -#, python-format -msgid "network_info to inject: |%s|" +#: cinder/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." msgstr "" -#: cinder/compute/manager.py:1655 -#, python-format -msgid "instance %s: getting vnc console" +#: cinder/openstack/common/db/sqlalchemy/session.py:487 +msgid "DB exception wrapped." msgstr "" -#: cinder/compute/manager.py:1685 +#: cinder/openstack/common/db/sqlalchemy/session.py:538 #, python-format -msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgid "Got mysql server has gone away: %s" msgstr "" -#: cinder/compute/manager.py:1703 +#: cinder/openstack/common/db/sqlalchemy/session.py:610 #, python-format -msgid "" -"instance %(instance_uuid)s: attaching volume %(volume_id)s to " -"%(mountpoint)s" +msgid "SQL connection failed. %s attempts left." msgstr "" -#: cinder/compute/manager.py:1705 -#, python-format -msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +#: cinder/openstack/common/db/sqlalchemy/utils.py:33 +msgid "Sort key supplied was not valid." msgstr "" -#: cinder/compute/manager.py:1714 +#: cinder/openstack/common/notifier/api.py:129 #, python-format -msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgid "%s not in valid priorities" msgstr "" -#: cinder/compute/manager.py:1724 +#: cinder/openstack/common/notifier/api.py:145 #, python-format -msgid "Attach failed %(mountpoint)s, removing" +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" msgstr "" -#: cinder/compute/manager.py:1752 +#: cinder/openstack/common/notifier/api.py:164 #, python-format -msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgid "Failed to load notifier %s. These notifications will not be sent." +msgstr "" + +#: cinder/openstack/common/notifier/rabbit_notifier.py:27 +msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead." msgstr "" -#: cinder/compute/manager.py:1756 +#: cinder/openstack/common/notifier/rpc_notifier.py:45 +#: cinder/openstack/common/notifier/rpc_notifier2.py:51 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Could not send notification to %(topic)s. Payload=%(message)s" msgstr "" -#: cinder/compute/manager.py:1822 +#: cinder/openstack/common/rpc/__init__.py:105 #, python-format msgid "" -"Creating tmpfile %s to notify to other compute nodes that they should " -"mount the same storage." +"A RPC is being made while holding a lock. The locks currently held are " +"%(locks)s. This is probably a bug. Please report it. Include the " +"following: [%(stack)s]." msgstr "" -#: cinder/compute/manager.py:1884 -msgid "Instance has no volume." +#: cinder/openstack/common/rpc/amqp.py:83 +msgid "Pool creating new connection" msgstr "" -#: cinder/compute/manager.py:1916 +#: cinder/openstack/common/rpc/amqp.py:208 #, python-format -msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgid "no calling threads waiting for msg_id : %s, message : %s" msgstr "" -#: cinder/compute/manager.py:1973 +#: cinder/openstack/common/rpc/amqp.py:216 #, python-format -msgid "Pre live migration failed at %(dest)s" +msgid "" +"Number of call waiters is greater than warning threshhold: %d. There " +"could be a MulticallProxyWaiter leak." msgstr "" -#: cinder/compute/manager.py:2000 -msgid "post_live_migration() is started.." +#: cinder/openstack/common/rpc/amqp.py:299 +#, python-format +msgid "unpacked context: %s" msgstr "" -#: cinder/compute/manager.py:2030 -msgid "No floating_ip found" +#: cinder/openstack/common/rpc/amqp.py:345 +#, python-format +msgid "UNIQUE_ID is %s." msgstr "" -#: cinder/compute/manager.py:2038 -msgid "No floating_ip found." +#: cinder/openstack/common/rpc/amqp.py:414 +#, python-format +msgid "received %s" msgstr "" -#: cinder/compute/manager.py:2040 +#: cinder/openstack/common/rpc/amqp.py:422 #, python-format -msgid "" -"Live migration: Unexpected error: cannot inherit floating ip.\n" -"%(e)s" +msgid "no method for message: %s" msgstr "" -#: cinder/compute/manager.py:2073 +#: cinder/openstack/common/rpc/amqp.py:423 #, python-format -msgid "Migrating instance to %(dest)s finished successfully." +msgid "No method for message: %s" msgstr "" -#: cinder/compute/manager.py:2075 -msgid "" -"You may see the error \"libvirt: QEMU error: Domain not found: no domain " -"with matching name.\" This error can be safely ignored." +#: cinder/openstack/common/rpc/amqp.py:451 +#: cinder/openstack/common/rpc/impl_zmq.py:280 +#, python-format +msgid "Expected exception during message handling (%s)" +msgstr "" + +#: cinder/openstack/common/rpc/amqp.py:459 +#: cinder/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" msgstr "" -#: cinder/compute/manager.py:2090 -msgid "Post operation of migration started" +#: cinder/openstack/common/rpc/amqp.py:594 +#, python-format +msgid "Making synchronous call on %s ..." msgstr "" -#: cinder/compute/manager.py:2226 +#: cinder/openstack/common/rpc/amqp.py:597 #, python-format -msgid "Updated the info_cache for instance %s" +msgid "MSG_ID is %s" msgstr "" -#: cinder/compute/manager.py:2255 -msgid "Updating bandwidth usage cache" +#: cinder/openstack/common/rpc/amqp.py:631 +#, python-format +msgid "Making asynchronous cast on %s..." msgstr "" -#: cinder/compute/manager.py:2277 -msgid "Updating host status" +#: cinder/openstack/common/rpc/amqp.py:640 +msgid "Making asynchronous fanout cast..." msgstr "" -#: cinder/compute/manager.py:2305 +#: cinder/openstack/common/rpc/amqp.py:668 #, python-format -msgid "" -"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " -"the hypervisor." +msgid "Sending %(event_type)s on %(topic)s" msgstr "" -#: cinder/compute/manager.py:2331 +#: cinder/openstack/common/rpc/common.py:74 +#, fuzzy +msgid "An unknown RPC related exception occurred." +msgstr "發生一個未知例外" + +#: cinder/openstack/common/rpc/common.py:104 #, python-format msgid "" -"During the sync_power process the instance %(uuid)s has moved from host " -"%(src)s to host %(dst)s" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." msgstr "" -#: cinder/compute/manager.py:2344 +#: cinder/openstack/common/rpc/common.py:121 #, python-format msgid "" -"Instance %s is in the process of migrating to this host. Wait next " -"sync_power cycle before setting power state to NOSTATE" +"Timeout while waiting on RPC response - topic: \"%(topic)s\", RPC method:" +" \"%(method)s\" info: \"%(info)s\"" msgstr "" -#: cinder/compute/manager.py:2350 -msgid "" -"Instance found in database but not known by hypervisor. Setting power " -"state to NOSTATE" +#: cinder/openstack/common/rpc/common.py:137 +#: cinder/openstack/common/rpc/common.py:138 +#: cinder/openstack/common/rpc/common.py:139 +msgid "" msgstr "" -#: cinder/compute/manager.py:2380 -msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +#: cinder/openstack/common/rpc/common.py:143 +#, python-format +msgid "Found duplicate message(%(msg_id)s). Skipping it." msgstr "" -#: cinder/compute/manager.py:2392 -msgid "Reclaiming deleted instance" +#: cinder/openstack/common/rpc/common.py:147 +msgid "Invalid reuse of an RPC connection." msgstr "" -#: cinder/compute/manager.py:2458 +#: cinder/openstack/common/rpc/common.py:151 #, python-format -msgid "" -"Detected instance with name label '%(name)s' which is marked as DELETED " -"but still present on host." +msgid "Specified RPC version, %(version)s, not supported by this endpoint." msgstr "" -#: cinder/compute/manager.py:2465 +#: cinder/openstack/common/rpc/common.py:156 #, python-format msgid "" -"Destroying instance with name label '%(name)s' which is marked as DELETED" -" but still present on host." +"Specified RPC envelope version, %(version)s, not supported by this " +"endpoint." msgstr "" -#: cinder/compute/manager.py:2472 +#: cinder/openstack/common/rpc/common.py:280 #, python-format -msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgid "Failed to sanitize %(item)s. Key error %(err)s" msgstr "" -#: cinder/compute/manager.py:2542 +#: cinder/openstack/common/rpc/common.py:302 #, python-format -msgid "" -"Aggregate %(aggregate_id)s: unrecoverable state during operation on " -"%(host)s" -msgstr "" - -#: cinder/compute/utils.py:142 -msgid "v4 subnets are required for legacy nw_info" +msgid "Returning exception %s to caller" msgstr "" -#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 -msgid "Adding console" +#: cinder/openstack/common/rpc/impl_kombu.py:166 +#: cinder/openstack/common/rpc/impl_qpid.py:164 +msgid "Failed to process message... skipping it." msgstr "" -#: cinder/console/manager.py:97 +#: cinder/openstack/common/rpc/impl_kombu.py:477 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/console/vmrc_manager.py:122 +#: cinder/openstack/common/rpc/impl_kombu.py:499 #, python-format -msgid "Tried to remove non-existent console %(console_id)s." +msgid "Connected to AMQP server on %(hostname)s:%(port)d" msgstr "" -#: cinder/console/vmrc_manager.py:125 +#: cinder/openstack/common/rpc/impl_kombu.py:536 #, python-format -msgid "Removing console %(console_id)s." -msgstr "" - -#: cinder/console/xvp.py:98 -msgid "Rebuilding xvp conf" +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" msgstr "" -#: cinder/console/xvp.py:116 +#: cinder/openstack/common/rpc/impl_kombu.py:552 #, python-format -msgid "Re-wrote %s" +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." msgstr "" -#: cinder/console/xvp.py:121 -msgid "Stopping xvp" +#: cinder/openstack/common/rpc/impl_kombu.py:606 +#: cinder/openstack/common/rpc/impl_qpid.py:507 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/console/xvp.py:134 -msgid "Starting xvp" +#: cinder/openstack/common/rpc/impl_kombu.py:624 +#: cinder/openstack/common/rpc/impl_qpid.py:522 +#, python-format +msgid "Timed out waiting for RPC response: %s" msgstr "" -#: cinder/console/xvp.py:141 +#: cinder/openstack/common/rpc/impl_kombu.py:628 +#: cinder/openstack/common/rpc/impl_qpid.py:526 #, python-format -msgid "Error starting xvp: %s" +msgid "Failed to consume message from queue: %s" msgstr "" -#: cinder/console/xvp.py:144 -msgid "Restarting xvp" +#: cinder/openstack/common/rpc/impl_kombu.py:667 +#: cinder/openstack/common/rpc/impl_qpid.py:561 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" msgstr "" -#: cinder/console/xvp.py:146 -msgid "xvp not running..." +#: cinder/openstack/common/rpc/impl_qpid.py:84 +#, python-format +msgid "Invalid value for qpid_topology_version: %d" msgstr "" -#: cinder/consoleauth/manager.py:63 +#: cinder/openstack/common/rpc/impl_qpid.py:455 #, python-format -msgid "Deleting Expired Token: (%s)" +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" msgstr "" -#: cinder/consoleauth/manager.py:75 +#: cinder/openstack/common/rpc/impl_qpid.py:461 #, python-format -msgid "Received Token: %(token)s, %(token_dict)s)" +msgid "Connected to AMQP server on %s" msgstr "" -#: cinder/consoleauth/manager.py:79 -#, python-format -msgid "Checking Token: %(token)s, %(token_valid)s)" +#: cinder/openstack/common/rpc/impl_qpid.py:474 +msgid "Re-established AMQP queues" msgstr "" -#: cinder/db/sqlalchemy/api.py:57 -msgid "Use of empty request context is deprecated" +#: cinder/openstack/common/rpc/impl_qpid.py:534 +msgid "Error processing message. Skipping it." msgstr "" -#: cinder/db/sqlalchemy/api.py:198 -#, python-format -msgid "Unrecognized read_deleted value '%s'" +#: cinder/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." msgstr "" -#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#: cinder/openstack/common/rpc/impl_zmq.py:101 #, python-format -msgid "No ComputeNode for %(host)s" +msgid "Deserializing: %s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#: cinder/openstack/common/rpc/impl_zmq.py:136 #, python-format -msgid "No backend config with id %(sm_backend_id)s" +msgid "Connecting to %(addr)s with %(type)s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4103 +#: cinder/openstack/common/rpc/impl_zmq.py:137 #, python-format -msgid "No sm_flavor called %(sm_flavor)s" +msgid "-> Subscribed to %(subscribe)s" msgstr "" -#: cinder/db/sqlalchemy/api.py:4147 +#: cinder/openstack/common/rpc/impl_zmq.py:138 #, python-format -msgid "No sm_volume with id %(volume_id)s" +msgid "-> bind: %(bind)s" msgstr "" -#: cinder/db/sqlalchemy/migration.py:66 -msgid "python-migrate is not installed. Exiting." +#: cinder/openstack/common/rpc/impl_zmq.py:146 +msgid "Could not open socket." msgstr "" -#: cinder/db/sqlalchemy/migration.py:78 -msgid "version should be an integer" +#: cinder/openstack/common/rpc/impl_zmq.py:158 +#, python-format +msgid "Subscribing to %s" msgstr "" -#: cinder/db/sqlalchemy/session.py:137 -#, python-format -msgid "SQL connection failed. %s attempts left." +#: cinder/openstack/common/rpc/impl_zmq.py:200 +msgid "You cannot recv on this socket." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 -msgid "interface column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:205 +msgid "You cannot send on this socket." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#: cinder/openstack/common/rpc/impl_zmq.py:267 #, python-format -msgid "Table |%s| not created!" +msgid "Running func with context: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 -msgid "VIF column not added to fixed_ips table" +#: cinder/openstack/common/rpc/impl_zmq.py:305 +msgid "Sending reply" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 -#, python-format -msgid "join list for moving mac_addresses |%s|" +#: cinder/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 -msgid "foreign key constraint couldn't be added" +#: cinder/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 -msgid "foreign key constraint couldn't be dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 -msgid "priority column not added to networks table" +#: cinder/openstack/common/rpc/impl_zmq.py:387 +msgid "Consuming socket" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 -#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 -#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 -#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 -msgid "foreign key constraint couldn't be removed" +#: cinder/openstack/common/rpc/impl_zmq.py:437 +#, python-format +msgid "Creating proxy for topic: %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 -msgid "progress column not added to instances table" +#: cinder/openstack/common/rpc/impl_zmq.py:443 +msgid "Topic contained dangerous characters." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 -#, python-format -msgid "" -"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " -"string to downgrade." +#: cinder/openstack/common/rpc/impl_zmq.py:475 +msgid "Topic socket file creation failed." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 -msgid "instance_info_caches tables not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:481 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 -msgid "progress column not added to compute_nodes table" +#: cinder/openstack/common/rpc/impl_zmq.py:497 +#, python-format +msgid "Required IPC directory does not exist at %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 -msgid "dns_domains table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:506 +#, python-format +msgid "Permission denied to IPC directory at %s" msgstr "" -#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 -msgid "quota_classes table not dropped" +#: cinder/openstack/common/rpc/impl_zmq.py:509 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." msgstr "" -#: cinder/image/glance.py:147 -msgid "Connection error contacting glance server, retrying" +#: cinder/openstack/common/rpc/impl_zmq.py:543 +#, python-format +msgid "CONSUMER RECEIVED DATA: %s" msgstr "" -#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 -msgid "Maximum attempts reached" +#: cinder/openstack/common/rpc/impl_zmq.py:562 +msgid "ZMQ Envelope version unsupported or unknown." msgstr "" -#: cinder/image/glance.py:278 -#, python-format -msgid "Creating image in Glance. Metadata passed in %s" +#: cinder/openstack/common/rpc/impl_zmq.py:590 +msgid "Skipping topic registration. Already registered." msgstr "" -#: cinder/image/glance.py:281 +#: cinder/openstack/common/rpc/impl_zmq.py:597 #, python-format -msgid "Metadata after formatting for Glance %s" +msgid "Consumer is a zmq.%s" msgstr "" -#: cinder/image/glance.py:289 -#, python-format -msgid "Metadata returned from Glance formatted for Base %s" +#: cinder/openstack/common/rpc/impl_zmq.py:649 +msgid "Creating payload" msgstr "" -#: cinder/image/glance.py:331 cinder/image/glance.py:335 -msgid "Not the image owner" +#: cinder/openstack/common/rpc/impl_zmq.py:662 +msgid "Creating queue socket for reply waiter" msgstr "" -#: cinder/image/glance.py:410 -#, python-format -msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +#: cinder/openstack/common/rpc/impl_zmq.py:675 +msgid "Sending cast" msgstr "" -#: cinder/image/s3.py:309 -#, python-format -msgid "Failed to download %(image_location)s to %(image_path)s" +#: cinder/openstack/common/rpc/impl_zmq.py:678 +msgid "Cast sent; Waiting reply" msgstr "" -#: cinder/image/s3.py:328 +#: cinder/openstack/common/rpc/impl_zmq.py:681 #, python-format -msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgid "Received message: %s" msgstr "" -#: cinder/image/s3.py:340 -#, python-format -msgid "Failed to untar %(image_location)s to %(image_path)s" +#: cinder/openstack/common/rpc/impl_zmq.py:682 +msgid "Unpacking response" msgstr "" -#: cinder/image/s3.py:353 -#, python-format -msgid "Failed to upload %(image_location)s to %(image_path)s" +#: cinder/openstack/common/rpc/impl_zmq.py:691 +msgid "Unsupported or unknown ZMQ envelope returned." msgstr "" -#: cinder/image/s3.py:379 -#, python-format -msgid "Failed to decrypt private key: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:698 +msgid "RPC Message Invalid." msgstr "" -#: cinder/image/s3.py:387 +#: cinder/openstack/common/rpc/impl_zmq.py:721 #, python-format -msgid "Failed to decrypt initialization vector: %s" +msgid "%(msg)s" msgstr "" -#: cinder/image/s3.py:398 +#: cinder/openstack/common/rpc/impl_zmq.py:724 #, python-format -msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgid "Sending message(s) to: %s" msgstr "" -#: cinder/image/s3.py:410 -msgid "Unsafe filenames in image" +#: cinder/openstack/common/rpc/impl_zmq.py:728 +msgid "No matchmaker results. Not casting." msgstr "" -#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 -#, python-format -msgid "Bad mac for to_global_ipv6: %s" +#: cinder/openstack/common/rpc/impl_zmq.py:731 +msgid "No match from matchmaker." msgstr "" -#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#: cinder/openstack/common/rpc/impl_zmq.py:771 #, python-format -msgid "Bad prefix for to_global_ipv6: %s" +msgid "topic is %s." msgstr "" -#: cinder/ipv6/account_identifier.py:42 +#: cinder/openstack/common/rpc/impl_zmq.py:815 #, python-format -msgid "Bad project_id for to_global_ipv6: %s" +msgid "rpc_zmq_matchmaker = %(orig)s is deprecated; use %(new)s instead" msgstr "" -#: cinder/network/ldapdns.py:321 -msgid "This driver only supports type 'a' entries." +#: cinder/openstack/common/rpc/matchmaker.py:53 +msgid "Match not found by MatchMaker." msgstr "" -#: cinder/network/linux_net.py:166 -#, python-format -msgid "Attempted to remove chain %s which does not exist" +#: cinder/openstack/common/rpc/matchmaker.py:89 +msgid "Matchmaker does not implement registration or heartbeat." msgstr "" -#: cinder/network/linux_net.py:192 +#: cinder/openstack/common/rpc/matchmaker.py:239 #, python-format -msgid "Unknown chain: %r" +msgid "Matchmaker unregistered: %s, %s" msgstr "" -#: cinder/network/linux_net.py:215 -#, python-format -msgid "" -"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " -"%(top)r" +#: cinder/openstack/common/rpc/matchmaker.py:250 +msgid "Register before starting heartbeat." msgstr "" -#: cinder/network/linux_net.py:335 -msgid "IPTablesManager.apply completed with success" +#: cinder/openstack/common/rpc/matchmaker.py:343 +#: cinder/openstack/common/rpc/matchmaker.py:361 +#: cinder/openstack/common/rpc/matchmaker_ring.py:79 +#: cinder/openstack/common/rpc/matchmaker_ring.py:97 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" msgstr "" -#: cinder/network/linux_net.py:694 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:54 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "extra_spec requirement '%(req)s' does not match '%(cap)s'" msgstr "" -#: cinder/network/linux_net.py:696 +#: cinder/openstack/common/scheduler/filters/capabilities_filter.py:67 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "%(host_state)s fails resource_type extra_specs requirements" msgstr "" -#: cinder/network/linux_net.py:756 -#, python-format -msgid "killing radvd threw %s" +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:43 +msgid "Re-scheduling is disabled." msgstr "" -#: cinder/network/linux_net.py:758 +#: cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py:52 #, python-format -msgid "Pid %d is stale, relaunching radvd" +msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s" msgstr "" -#: cinder/network/linux_net.py:967 -#, python-format -msgid "Starting VLAN inteface %s" +#: cinder/scheduler/driver.py:69 +msgid "Must implement host_passes_filters" msgstr "" -#: cinder/network/linux_net.py:999 -#, python-format -msgid "Starting Bridge interface for %s" +#: cinder/scheduler/driver.py:74 +msgid "Must implement find_retype_host" msgstr "" -#: cinder/network/linux_net.py:1142 -#, python-format -msgid "Starting bridge %s " +#: cinder/scheduler/driver.py:78 +msgid "Must implement a fallback schedule" msgstr "" -#: cinder/network/linux_net.py:1149 -#, python-format -msgid "Done starting bridge %s" +#: cinder/scheduler/driver.py:82 +msgid "Must implement schedule_create_volume" msgstr "" -#: cinder/network/linux_net.py:1167 +#: cinder/scheduler/filter_scheduler.py:98 #, python-format -msgid "Failed unplugging gateway interface '%s'" +msgid "cannot place volume %(id)s on %(host)s" msgstr "" -#: cinder/network/linux_net.py:1170 +#: cinder/scheduler/filter_scheduler.py:114 #, python-format -msgid "Unplugged gateway interface '%s'" +msgid "No valid hosts for volume %(id)s with type %(type)s" msgstr "" -#: cinder/network/manager.py:291 +#: cinder/scheduler/filter_scheduler.py:125 #, python-format -msgid "Fixed ip %(fixed_ip_id)s not found" +msgid "" +"Current host not valid for volume %(id)s with type %(type)s, migration " +"not allowed" msgstr "" -#: cinder/network/manager.py:300 cinder/network/manager.py:496 -#, python-format -msgid "Interface %(interface)s not found" +#: cinder/scheduler/filter_scheduler.py:156 +msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "" -#: cinder/network/manager.py:315 +#: cinder/scheduler/filter_scheduler.py:174 #, python-format -msgid "floating IP allocation for instance |%s|" +msgid "" +"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : " +"%(exc)s" msgstr "" -#: cinder/network/manager.py:353 +#: cinder/scheduler/filter_scheduler.py:207 #, python-format -msgid "floating IP deallocation for instance |%s|" +msgid "Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" msgstr "" -#: cinder/network/manager.py:386 +#: cinder/scheduler/filter_scheduler.py:259 #, python-format -msgid "Address |%(address)s| is not allocated" +msgid "Filtered %s" msgstr "" -#: cinder/network/manager.py:390 +#: cinder/scheduler/filter_scheduler.py:276 #, python-format -msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgid "Choosing %s" msgstr "" -#: cinder/network/manager.py:402 +#: cinder/scheduler/host_manager.py:264 #, python-format -msgid "Quota exceeded for %s, tried to allocate address" +msgid "Ignoring %(service_name)s service update from %(host)s" msgstr "" -#: cinder/network/manager.py:614 +#: cinder/scheduler/host_manager.py:269 #, python-format -msgid "" -"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " -"not visible to either the floating or instance DNS driver. It will be " -"ignored." +msgid "Received %(service_name)s service update from %(host)s." msgstr "" -#: cinder/network/manager.py:660 +#: cinder/scheduler/host_manager.py:294 #, python-format -msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgid "volume service is down or disabled. (host: %s)" msgstr "" -#: cinder/network/manager.py:670 -#, python-format -msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +#: cinder/scheduler/manager.py:63 +msgid "" +"ChanceScheduler and SimpleScheduler have been deprecated due to lack of " +"support for advanced features like: volume types, volume encryption, QoS " +"etc. These two schedulers can be fully replaced by FilterScheduler with " +"certain combination of filters and weighers." msgstr "" -#: cinder/network/manager.py:778 -#, python-format -msgid "Disassociated %s stale fixed ip(s)" +#: cinder/scheduler/manager.py:98 cinder/scheduler/manager.py:100 +msgid "Failed to create scheduler manager volume flow" msgstr "" -#: cinder/network/manager.py:782 -msgid "setting network host" +#: cinder/scheduler/manager.py:159 +msgid "New volume type not specified in request_spec." msgstr "" -#: cinder/network/manager.py:896 +#: cinder/scheduler/manager.py:174 #, python-format -msgid "network allocations for instance |%s|" +msgid "Could not find a host for volume %(volume_id)s with type %(type_id)s." msgstr "" -#: cinder/network/manager.py:901 +#: cinder/scheduler/manager.py:192 #, python-format -msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgid "Failed to schedule_%(method)s: %(ex)s" msgstr "" -#: cinder/network/manager.py:930 +#: cinder/scheduler/scheduler_options.py:68 #, python-format -msgid "network deallocation for instance |%s|" +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" msgstr "" -#: cinder/network/manager.py:1152 +#: cinder/scheduler/scheduler_options.py:78 #, python-format -msgid "" -"instance-dns-zone is |%(domain)s|, which is in availability zone " -"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" -" will be created." +msgid "Could not decode scheduler options: '%s'" msgstr "" -#: cinder/network/manager.py:1227 -#, python-format -msgid "Unable to release %s because vif doesn't exist." +#: cinder/scheduler/filters/capacity_filter.py:43 +msgid "Free capacity not set: volume node info collection broken." msgstr "" -#: cinder/network/manager.py:1244 +#: cinder/scheduler/filters/capacity_filter.py:57 #, python-format -msgid "Leased IP |%(address)s|" +msgid "" +"Insufficient free space for volume creation (requested / avail): " +"%(requested)s/%(available)s" msgstr "" -#: cinder/network/manager.py:1248 -#, python-format -msgid "IP %s leased that is not associated" +#: cinder/scheduler/flows/create_volume.py:53 +msgid "No volume_id provided to populate a request_spec from" msgstr "" -#: cinder/network/manager.py:1256 +#: cinder/scheduler/flows/create_volume.py:116 #, python-format -msgid "IP |%s| leased that isn't allocated" +msgid "Failed to schedule_create_volume: %(cause)s" msgstr "" -#: cinder/network/manager.py:1261 +#: cinder/scheduler/flows/create_volume.py:135 #, python-format -msgid "Released IP |%(address)s|" +msgid "Failed notifying on %(topic)s payload %(payload)s" msgstr "" -#: cinder/network/manager.py:1265 +#: cinder/tests/fake_driver.py:57 cinder/volume/driver.py:784 #, python-format -msgid "IP %s released that is not associated" +msgid "FAKE ISCSI: %s" msgstr "" -#: cinder/network/manager.py:1268 +#: cinder/tests/fake_driver.py:76 cinder/volume/driver.py:884 #, python-format -msgid "IP %s released that was not leased" +msgid "FAKE ISER: %s" msgstr "" -#: cinder/network/manager.py:1331 -msgid "cidr already in use" +#: cinder/tests/fake_driver.py:97 +msgid "local_path not implemented" msgstr "" -#: cinder/network/manager.py:1334 +#: cinder/tests/fake_driver.py:124 cinder/tests/fake_driver.py:129 #, python-format -msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgid "LoggingVolumeDriver: %s" msgstr "" -#: cinder/network/manager.py:1345 +#: cinder/tests/fake_utils.py:70 #, python-format -msgid "" -"requested cidr (%(cidr)s) conflicts with existing smaller cidr " -"(%(smaller)s)" +msgid "Faking execution of cmd (subprocess): %s" msgstr "" -#: cinder/network/manager.py:1404 -msgid "Network already exists!" +#: cinder/tests/fake_utils.py:78 +#, python-format +msgid "Faked command matched %s" msgstr "" -#: cinder/network/manager.py:1423 +#: cinder/tests/fake_utils.py:94 #, python-format -msgid "Network must be disassociated from project %s before delete" +msgid "Faked command raised an exception %s" msgstr "" -#: cinder/network/manager.py:1832 -msgid "" -"The sum between the number of networks and the vlan start cannot be " -"greater than 4094" +#: cinder/tests/fake_utils.py:97 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" msgstr "" -#: cinder/network/manager.py:1839 +#: cinder/tests/test_misc.py:58 #, python-format msgid "" -"The network range is not big enough to fit %(num_networks)s. Network size" -" is %(network_size)s" +"The following migrations are missing a downgrade:\n" +"\t%s" msgstr "" -#: cinder/network/minidns.py:65 -msgid "This driver only supports type 'a'" +#: cinder/tests/test_netapp_nfs.py:360 +#, python-format +msgid "Share %(share)s and file name %(file_name)s" msgstr "" -#: cinder/network/quantum/client.py:154 -msgid "Tenant ID not set" +#: cinder/tests/test_rbd.py:768 cinder/volume/drivers/rbd.py:175 +msgid "flush() not supported in this version of librbd" msgstr "" -#: cinder/network/quantum/client.py:180 +#: cinder/tests/test_storwize_svc.py:260 #, python-format -msgid "Quantum Client Request: %(method)s %(action)s" +msgid "unrecognized argument %s" msgstr "" -#: cinder/network/quantum/client.py:196 +#: cinder/tests/test_storwize_svc.py:1507 #, python-format -msgid "Quantum entity not found: %s" +msgid "Run CLI command: %s" msgstr "" -#: cinder/network/quantum/client.py:206 +#: cinder/tests/test_storwize_svc.py:1510 #, python-format -msgid "Server %(status_code)s error: %(data)s" +msgid "" +"CLI output:\n" +" stdout: %(stdout)s\n" +" stderr: %(stderr)s" msgstr "" -#: cinder/network/quantum/client.py:210 +#: cinder/tests/test_storwize_svc.py:1515 #, python-format -msgid "Unable to connect to server. Got error: %s" +msgid "" +"CLI Exception output:\n" +" stdout: %(out)s\n" +" stderr: %(err)s" msgstr "" -#: cinder/network/quantum/client.py:228 +#: cinder/tests/test_volume_types.py:60 #, python-format -msgid "unable to deserialize object of type = '%s'" +msgid "Given data: %s" msgstr "" -#: cinder/network/quantum/manager.py:162 -msgid "QuantumManager does not use 'multi_host' parameter." +#: cinder/tests/test_volume_types.py:61 +#, python-format +msgid "Result data: %s" msgstr "" -#: cinder/network/quantum/manager.py:166 -msgid "QuantumManager requires that only one network is created per call" +#: cinder/tests/test_xiv_ds8k.py:102 +#, python-format +msgid "Volume not found for instance %(instance_id)s." msgstr "" -#: cinder/network/quantum/manager.py:176 -msgid "QuantumManager does not use 'vlan_start' parameter." -msgstr "" +#: cinder/tests/api/contrib/test_backups.py:741 +#, fuzzy +msgid "Invalid input" +msgstr "無效的快照(snapshot)" -#: cinder/network/quantum/manager.py:182 -msgid "QuantumManager does not use 'vpn_start' parameter." +#: cinder/tests/integrated/test_login.py:29 +#, python-format +msgid "volume: %s" msgstr "" -#: cinder/network/quantum/manager.py:186 -msgid "QuantumManager does not use 'bridge' parameter." +#: cinder/tests/integrated/api/client.py:32 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" msgstr "" -#: cinder/network/quantum/manager.py:190 -msgid "QuantumManager does not use 'bridge_interface' parameter." +#: cinder/tests/integrated/api/client.py:42 +msgid "Authentication error" msgstr "" -#: cinder/network/quantum/manager.py:195 -msgid "QuantumManager requires a valid (.1) gateway address." +#: cinder/tests/integrated/api/client.py:50 +msgid "Authorization error" msgstr "" -#: cinder/network/quantum/manager.py:204 -#, python-format -msgid "" -"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" -" net-id '%(quantum_net_id)s'" +#: cinder/tests/integrated/api/client.py:58 +msgid "Item not found" msgstr "" -#: cinder/network/quantum/manager.py:301 +#: cinder/tests/integrated/api/client.py:100 #, python-format -msgid "network allocations for instance %s" +msgid "Doing %(method)s on %(relative_url)s" msgstr "" -#: cinder/network/quantum/manager.py:588 +#: cinder/tests/integrated/api/client.py:103 #, python-format -msgid "" -"port deallocation failed for instance: |%(instance_id)s|, port_id: " -"|%(port_id)s|" +msgid "Body: %s" msgstr "" -#: cinder/network/quantum/manager.py:606 +#: cinder/tests/integrated/api/client.py:121 #, python-format -msgid "" -"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " -"|%(vif_uuid)s|" +msgid "%(auth_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/quantum/melange_connection.py:96 +#: cinder/tests/integrated/api/client.py:148 #, python-format -msgid "Server returned error: %s" +msgid "%(relative_uri)s => code %(http_status)s" msgstr "" -#: cinder/network/quantum/melange_connection.py:98 -msgid "Connection error contacting melange service, retrying" +#: cinder/tests/integrated/api/client.py:159 +msgid "Unexpected status code" msgstr "" -#: cinder/network/quantum/melange_connection.py:108 +#: cinder/tests/integrated/api/client.py:166 #, python-format -msgid "" -"allocate IP on network |%(network_id)s| belonging to " -"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " -"|%(mac_address)s| belonging to |%(project_id)s| " +msgid "Decoding JSON: %s" msgstr "" -#: cinder/network/quantum/melange_ipam_lib.py:133 -msgid "get_project_and_global_net_ids must be called with a non-null project_id" +#: cinder/transfer/api.py:68 +msgid "Volume in unexpected state" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:75 -msgid "Error creating network entry" +#: cinder/transfer/api.py:102 cinder/volume/api.py:367 +msgid "status must be available" msgstr "" -#: cinder/network/quantum/cinder_ipam_lib.py:90 -#, python-format -msgid "No network with net_id = %s" -msgstr "" +#: cinder/transfer/api.py:119 +#, fuzzy, python-format +msgid "Failed to create transfer record for %s" +msgstr "找不到Volume %s" -#: cinder/network/quantum/cinder_ipam_lib.py:221 +#: cinder/transfer/api.py:136 #, python-format -msgid "No fixed IPs to deallocate for vif %s" +msgid "Attempt to transfer %s with invalid auth key." msgstr "" -#: cinder/network/quantum/quantum_connection.py:99 +#: cinder/transfer/api.py:156 cinder/volume/flows/api/create_volume.py:508 #, python-format -msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG volume " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/network/quantum/quantum_connection.py:113 +#: cinder/transfer/api.py:182 #, python-format -msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgid "Failed to update quota donating volumetransfer id %s" msgstr "" -#: cinder/notifier/api.py:115 +#: cinder/transfer/api.py:199 #, python-format -msgid "%s not in valid priorities" +msgid "Volume %s has been transferred." msgstr "" -#: cinder/notifier/api.py:130 +#: cinder/volume/api.py:143 #, python-format -msgid "" -"Problem '%(e)s' attempting to send to notification system. " -"Payload=%(payload)s" +msgid "Unable to query if %s is in the availability zone set" msgstr "" -#: cinder/notifier/list_notifier.py:65 -#, python-format -msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +#: cinder/volume/api.py:171 cinder/volume/api.py:173 +msgid "Failed to create api volume flow" msgstr "" -#: cinder/notifier/rabbit_notifier.py:46 -#, python-format -msgid "Could not send notification to %(topic)s. Payload=%(message)s" +#: cinder/volume/api.py:202 +msgid "Failed to update quota for deleting volume" msgstr "" -#: cinder/rpc/amqp.py:146 +#: cinder/volume/api.py:214 #, python-format -msgid "Returning exception %s to caller" +msgid "Volume status must be available or error, but current status is: %s" msgstr "" -#: cinder/rpc/amqp.py:188 -#, python-format -msgid "unpacked context: %s" +#: cinder/volume/api.py:224 +msgid "Volume cannot be deleted while migrating" msgstr "" -#: cinder/rpc/amqp.py:231 +#: cinder/volume/api.py:229 #, python-format -msgid "received %s" +msgid "Volume still has %d dependent snapshots" msgstr "" -#: cinder/rpc/amqp.py:236 +#: cinder/volume/api.py:285 cinder/volume/api.py:350 +#: cinder/volume/qos_specs.py:240 cinder/volume/volume_types.py:67 #, python-format -msgid "no method for message: %s" +msgid "Searching by: %s" msgstr "" -#: cinder/rpc/amqp.py:237 -#, python-format -msgid "No method for message: %s" +#: cinder/volume/api.py:370 +msgid "already attached" msgstr "" -#: cinder/rpc/amqp.py:321 -#, python-format -msgid "Making asynchronous call on %s ..." +#: cinder/volume/api.py:377 +msgid "status must be in-use to detach" msgstr "" -#: cinder/rpc/amqp.py:324 -#, python-format -msgid "MSG_ID is %s" -msgstr "" +#: cinder/volume/api.py:388 +#, fuzzy +msgid "Volume status must be available to reserve" +msgstr "Volume 狀態需要可被使用" -#: cinder/rpc/amqp.py:346 -#, python-format -msgid "Making asynchronous cast on %s..." +#: cinder/volume/api.py:464 +msgid "Snapshot cannot be created while volume is migrating" msgstr "" -#: cinder/rpc/amqp.py:354 -msgid "Making asynchronous fanout cast..." +#: cinder/volume/api.py:468 +msgid "must be available" msgstr "" -#: cinder/rpc/amqp.py:379 +#: cinder/volume/api.py:490 #, python-format -msgid "Sending notification on %s..." +msgid "" +"Quota exceeded for %(s_pid)s, tried to create %(s_size)sG snapshot " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/rpc/common.py:54 +#: cinder/volume/api.py:502 #, python-format msgid "" -"Remote error: %(exc_type)s %(value)s\n" -"%(traceback)s." +"Quota exceeded for %(s_pid)s, tried to create snapshot (%(d_consumed)d " +"snapshots already consumed)" msgstr "" -#: cinder/rpc/common.py:71 -msgid "Timeout while waiting on RPC response." -msgstr "" +#: cinder/volume/api.py:553 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume 狀態需要可被使用" -#: cinder/rpc/impl_kombu.py:111 -msgid "Failed to process message... skipping it." +#: cinder/volume/api.py:581 cinder/volume/flows/api/create_volume.py:208 +msgid "Metadata property key blank" msgstr "" -#: cinder/rpc/impl_kombu.py:407 -#, python-format -msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +#: cinder/volume/api.py:585 +msgid "Metadata property key greater than 255 characters" msgstr "" -#: cinder/rpc/impl_kombu.py:430 -#, python-format -msgid "Connected to AMQP server on %(hostname)s:%(port)d" +#: cinder/volume/api.py:589 +msgid "Metadata property value greater than 255 characters" msgstr "" -#: cinder/rpc/impl_kombu.py:466 -#, python-format -msgid "" -"Unable to connect to AMQP server on %(hostname)s:%(port)d after " -"%(max_retries)d tries: %(err_str)s" -msgstr "" +#: cinder/volume/api.py:720 cinder/volume/api.py:772 +#, fuzzy +msgid "Volume status must be available/in-use." +msgstr "Volume 狀態需要可被使用" -#: cinder/rpc/impl_kombu.py:482 -#, python-format -msgid "" -"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " -"again in %(sleep_time)d seconds." +#: cinder/volume/api.py:723 +msgid "Volume status is in-use." msgstr "" -#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 -#, python-format -msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +#: cinder/volume/api.py:752 +msgid "Volume status must be available to extend." msgstr "" -#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#: cinder/volume/api.py:757 #, python-format -msgid "Timed out waiting for RPC response: %s" +msgid "" +"New size for extend must be greater than current size. (current: " +"%(size)s, extended: %(new_size)s)" msgstr "" -#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 -#, python-format -msgid "Failed to consume message from queue: %s" +#: cinder/volume/api.py:778 +msgid "Volume is already part of an active migration" msgstr "" -#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 -#, python-format -msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +#: cinder/volume/api.py:784 +msgid "volume must not have snapshots" msgstr "" -#: cinder/rpc/impl_qpid.py:341 +#: cinder/volume/api.py:797 #, python-format -msgid "Unable to connect to AMQP server: %s " +msgid "No available service named %s" msgstr "" -#: cinder/rpc/impl_qpid.py:346 -#, python-format -msgid "Connected to AMQP server on %s" +#: cinder/volume/api.py:803 +msgid "Destination host must be different than current host" msgstr "" -#: cinder/rpc/impl_qpid.py:354 -msgid "Re-established AMQP queues" +#: cinder/volume/api.py:833 +msgid "Source volume not mid-migration." msgstr "" -#: cinder/rpc/impl_qpid.py:412 -msgid "Error processing message. Skipping it." +#: cinder/volume/api.py:837 +msgid "Destination volume not mid-migration." msgstr "" -#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 -#: cinder/scheduler/simple.py:143 -msgid "Is the appropriate service running?" +#: cinder/volume/api.py:842 +#, python-format +msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" -#: cinder/scheduler/chance.py:52 -msgid "Could not find another compute" +#: cinder/volume/api.py:853 +msgid "Volume status must be available to update readonly flag." msgstr "" -#: cinder/scheduler/driver.py:63 +#: cinder/volume/api.py:862 #, python-format -msgid "Casted '%(method)s' to volume '%(host)s'" +msgid "Unable to update type due to incorrect status on volume: %s" msgstr "" -#: cinder/scheduler/driver.py:80 +#: cinder/volume/api.py:868 #, python-format -msgid "Casted '%(method)s' to compute '%(host)s'" +msgid "Volume %s is already part of an active migration." msgstr "" -#: cinder/scheduler/driver.py:89 +#: cinder/volume/api.py:874 #, python-format -msgid "Casted '%(method)s' to network '%(host)s'" +msgid "migration_policy must be 'on-demand' or 'never', passed: %s" msgstr "" -#: cinder/scheduler/driver.py:107 +#: cinder/volume/api.py:887 #, python-format -msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgid "Invalid volume_type passed: %s" msgstr "" -#: cinder/scheduler/driver.py:181 -msgid "Must implement a fallback schedule" +#: cinder/volume/api.py:900 +#, python-format +msgid "New volume_type same as original: %s" msgstr "" -#: cinder/scheduler/driver.py:185 -msgid "Driver must implement schedule_prep_resize" +#: cinder/volume/api.py:915 +msgid "Retype cannot change encryption requirements" msgstr "" -#: cinder/scheduler/driver.py:190 -msgid "Driver must implement schedule_run_instance" +#: cinder/volume/api.py:927 +msgid "Retype cannot change front-end qos specs for in-use volumes" msgstr "" -#: cinder/scheduler/driver.py:325 -msgid "Block migration can not be used with shared storage." +#: cinder/volume/driver.py:189 cinder/volume/drivers/netapp/nfs.py:174 +#, python-format +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: cinder/scheduler/driver.py:330 -msgid "Live migration can not be used without shared storage." +#: cinder/volume/driver.py:282 +#, python-format +msgid "copy_data_between_volumes %(src)s -> %(dest)s." msgstr "" -#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#: cinder/volume/driver.py:295 cinder/volume/driver.py:309 #, python-format -msgid "host %(dest)s is not compatible with original host %(src)s." +msgid "Failed to attach volume %(vol)s" msgstr "" -#: cinder/scheduler/driver.py:416 +#: cinder/volume/driver.py:327 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgid "Failed to copy volume %(src)s to %(dest)d" msgstr "" -#: cinder/scheduler/driver.py:472 +#: cinder/volume/driver.py:340 #, python-format -msgid "" -"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " -"disk(host:%(available)s <= instance:%(necessary)s)" +msgid "copy_image_to_volume %s." msgstr "" -#: cinder/scheduler/filter_scheduler.py:51 +#: cinder/volume/driver.py:358 #, python-format -msgid "No host selection for %s defined." +msgid "copy_volume_to_image %s." msgstr "" -#: cinder/scheduler/filter_scheduler.py:64 +#: cinder/volume/driver.py:394 #, python-format -msgid "Attempting to build %(num_instances)d instance(s)" +msgid "Unable to access the backend storage via the path %(path)s." msgstr "" -#: cinder/scheduler/filter_scheduler.py:157 -msgid "Scheduler only understands Compute nodes (for now)" +#: cinder/volume/driver.py:433 +#, python-format +msgid "Creating a new backup for volume %s." msgstr "" -#: cinder/scheduler/filter_scheduler.py:199 +#: cinder/volume/driver.py:451 #, python-format -msgid "Filtered %(hosts)s" +msgid "Restoring backup %(backup)s to volume %(volume)s." msgstr "" -#: cinder/scheduler/filter_scheduler.py:209 -#, python-format -msgid "Weighted %(weighted_host)s" +#: cinder/volume/driver.py:474 +msgid "Extend volume not implemented" msgstr "" -#: cinder/scheduler/host_manager.py:144 -#, python-format -msgid "Host filter fails for ignored host %(host)s" +#: cinder/volume/driver.py:533 cinder/volume/drivers/emc/emc_smis_iscsi.py:113 +msgid "ISCSI provider_location not stored, using discovery" msgstr "" -#: cinder/scheduler/host_manager.py:151 +#: cinder/volume/driver.py:546 #, python-format -msgid "Host filter fails for non-forced host %(host)s" +msgid "ISCSI discovery attempt failed for:%s" msgstr "" -#: cinder/scheduler/host_manager.py:157 +#: cinder/volume/driver.py:548 #, python-format -msgid "Host filter function %(func)s failed for %(host)s" +msgid "Error from iscsiadm -m discovery: %s" msgstr "" -#: cinder/scheduler/host_manager.py:163 +#: cinder/volume/driver.py:595 +#, fuzzy, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "找不到Volume %s" + +#: cinder/volume/driver.py:599 cinder/volume/drivers/emc/emc_smis_iscsi.py:156 #, python-format -msgid "Host filter passes for %(host)s" +msgid "ISCSI Discovery: Found %s" msgstr "" -#: cinder/scheduler/host_manager.py:272 -#, python-format -msgid "Received %(service_name)s service update from %(host)s." +#: cinder/volume/driver.py:696 +msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" -#: cinder/scheduler/host_manager.py:313 -msgid "host_manager only implemented for 'compute'" +#: cinder/volume/driver.py:726 cinder/volume/driver.py:845 +#: cinder/volume/drivers/eqlx.py:247 cinder/volume/drivers/lvm.py:359 +#: cinder/volume/drivers/zadara.py:650 +#: cinder/volume/drivers/emc/emc_smis_common.py:859 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:235 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:602 +#: cinder/volume/drivers/netapp/iscsi.py:1032 +#: cinder/volume/drivers/netapp/iscsi.py:1419 +#: cinder/volume/drivers/nexenta/iscsi.py:538 +#: cinder/volume/drivers/windows/windows.py:205 +msgid "Updating volume stats" msgstr "" -#: cinder/scheduler/host_manager.py:323 -#, python-format -msgid "No service for compute ID %s" +#: cinder/volume/driver.py:924 +msgid "Driver must implement initialize_connection" msgstr "" -#: cinder/scheduler/manager.py:85 +#: cinder/volume/manager.py:203 #, python-format +msgid "Driver path %s is deprecated, update your configuration to the new path." +msgstr "" + +#: cinder/volume/manager.py:209 msgid "" -"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " -"schedule()" +"ThinLVMVolumeDriver is deprecated, please configure LVMISCSIDriver and " +"lvm_type=thin. Continuing with those settings." msgstr "" -#: cinder/scheduler/manager.py:150 +#: cinder/volume/manager.py:228 #, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" +msgid "Starting volume driver %(driver_name)s (%(version)s)" msgstr "" -#: cinder/scheduler/manager.py:159 +#: cinder/volume/manager.py:235 #, python-format -msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgid "Error encountered during initialization of driver: %(name)s" msgstr "" -#: cinder/scheduler/scheduler_options.py:66 +#: cinder/volume/manager.py:244 #, python-format -msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:257 +#, python-format +msgid "Failed to re-export volume %s: setting to error state" msgstr "" -#: cinder/scheduler/scheduler_options.py:75 +#: cinder/volume/manager.py:264 #, python-format -msgid "Could not decode scheduler options: '%(e)s'" +msgid "volume %s stuck in a downloading state" msgstr "" -#: cinder/scheduler/simple.py:87 -msgid "Not enough allocatable CPU cores remaining" +#: cinder/volume/manager.py:271 +#, python-format +msgid "volume %s: skipping export" msgstr "" -#: cinder/scheduler/simple.py:137 -msgid "Not enough allocatable volume gigabytes remaining" +#: cinder/volume/manager.py:273 +#, python-format +msgid "" +"Error encountered during re-exporting phase of driver initialization: " +"%(name)s" msgstr "" -#: cinder/scheduler/filters/core_filter.py:45 -msgid "VCPUs not set; assuming CPU collection broken" +#: cinder/volume/manager.py:283 +msgid "Resuming any in progress delete operations" msgstr "" -#: cinder/tests/fake_utils.py:72 +#: cinder/volume/manager.py:286 #, python-format -msgid "Faking execution of cmd (subprocess): %s" +msgid "Resuming delete on volume: %s" +msgstr "" + +#: cinder/volume/manager.py:328 cinder/volume/manager.py:330 +msgid "Failed to create manager volume flow" msgstr "" -#: cinder/tests/fake_utils.py:80 +#: cinder/volume/manager.py:374 cinder/volume/manager.py:391 #, python-format -msgid "Faked command matched %s" +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:380 +msgid "volume is not local to this node" msgstr "" -#: cinder/tests/fake_utils.py:96 +#: cinder/volume/manager.py:389 #, python-format -msgid "Faked command raised an exception %s" +msgid "volume %s: removing export" msgstr "" -#: cinder/tests/fake_utils.py:101 +#: cinder/volume/manager.py:394 #, python-format -msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgid "Cannot delete volume %s: volume is busy" msgstr "" -#: cinder/tests/fakelibvirt.py:784 -msgid "Please extend mock libvirt module to support flags" +#: cinder/volume/manager.py:422 +msgid "Failed to update usages deleting volume" msgstr "" -#: cinder/tests/fakelibvirt.py:790 -msgid "Please extend fake libvirt module to support this auth method" +#: cinder/volume/manager.py:427 +#, python-format +msgid "volume %s: glance metadata deleted" msgstr "" -#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#: cinder/volume/manager.py:430 #, python-format -msgid "Running instances: %s" +msgid "no glance metadata found for volume %s" msgstr "" -#: cinder/tests/test_compute.py:371 +#: cinder/volume/manager.py:434 #, python-format -msgid "After terminating instances: %s" +msgid "volume %s: deleted successfully" msgstr "" -#: cinder/tests/test_compute.py:589 -msgid "Internal error" +#: cinder/volume/manager.py:451 +#, python-format +msgid "snapshot %s: creating" msgstr "" -#: cinder/tests/test_compute.py:1430 +#: cinder/volume/manager.py:462 #, python-format -msgid "After force-killing instances: %s" +msgid "snapshot %(snap_id)s: creating" msgstr "" -#: cinder/tests/test_misc.py:92 +#: cinder/volume/manager.py:490 #, python-format msgid "" -"The following migrations are missing a downgrade:\n" -"\t%s" +"Failed updating %(snapshot_id)s metadata using the provided volumes " +"%(volume_id)s metadata" msgstr "" -#: cinder/tests/test_cinder_manage.py:169 -msgid "id" +#: cinder/volume/manager.py:496 +#, python-format +msgid "snapshot %s: created successfully" msgstr "" -#: cinder/tests/test_cinder_manage.py:170 -msgid "IPv4" +#: cinder/volume/manager.py:508 cinder/volume/manager.py:518 +#, python-format +msgid "snapshot %s: deleting" msgstr "" -#: cinder/tests/test_cinder_manage.py:171 -msgid "IPv6" +#: cinder/volume/manager.py:526 +#, python-format +msgid "Cannot delete snapshot %s: snapshot is busy" msgstr "" -#: cinder/tests/test_cinder_manage.py:172 -msgid "start address" +#: cinder/volume/manager.py:556 +msgid "Failed to update usages deleting snapshot" msgstr "" -#: cinder/tests/test_cinder_manage.py:173 -msgid "DNS1" +#: cinder/volume/manager.py:559 +#, python-format +msgid "snapshot %s: deleted successfully" msgstr "" -#: cinder/tests/test_cinder_manage.py:174 -msgid "DNS2" +#: cinder/volume/manager.py:579 +msgid "being attached by another instance" msgstr "" -#: cinder/tests/test_cinder_manage.py:175 -msgid "VlanID" +#: cinder/volume/manager.py:583 +msgid "being attached by another host" msgstr "" -#: cinder/tests/test_cinder_manage.py:176 -msgid "project" +#: cinder/volume/manager.py:587 +msgid "being attached by different mode" msgstr "" -#: cinder/tests/test_cinder_manage.py:177 -msgid "uuid" +#: cinder/volume/manager.py:590 +msgid "status must be available or attaching" msgstr "" -#: cinder/tests/test_volume.py:216 +#: cinder/volume/manager.py:698 #, python-format -msgid "Target %s allocated" +msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully" msgstr "" -#: cinder/tests/test_volume.py:468 +#: cinder/volume/manager.py:760 #, python-format -msgid "Cannot confirm exported volume id:%s." +msgid "Unable to fetch connection information from backend: %(err)s" msgstr "" -#: cinder/tests/test_volume_types.py:58 +#: cinder/volume/manager.py:807 #, python-format -msgid "Given data: %s" +msgid "Unable to terminate volume connection: %(err)s" msgstr "" -#: cinder/tests/test_volume_types.py:59 -#, python-format -msgid "Result data: %s" +#: cinder/volume/manager.py:854 +msgid "failed to create new_volume on destination host" msgstr "" -#: cinder/tests/test_xenapi.py:626 -#, python-format -msgid "Creating files in %s to simulate guest agent" +#: cinder/volume/manager.py:857 +msgid "timeout creating new_volume on destination host" msgstr "" -#: cinder/tests/test_xenapi.py:637 +#: cinder/volume/manager.py:880 #, python-format -msgid "Removing simulated guest agent files in %s" +msgid "Failed to copy volume %(vol1)s to %(vol2)s" msgstr "" -#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#: cinder/volume/manager.py:909 #, python-format -msgid "Quota exceeded: code=%(code)s" +msgid "" +"migrate_volume_completion: completing migration for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#: cinder/volume/manager.py:921 #, python-format -msgid "_create: %s" +msgid "" +"migrate_volume_completion is cleaning up an error for volume %(vol1)s " +"(temporary volume %(vol2)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#: cinder/volume/manager.py:940 #, python-format -msgid "_delete: %s" +msgid "Failed to delete migration source vol %(vol)s: %(err)s" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#: cinder/volume/manager.py:976 #, python-format -msgid "_get: %s" +msgid "volume %s: calling driver migrate_volume" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 -#, python-format -msgid "_get_all: %s" +#: cinder/volume/manager.py:1016 +msgid "Updating volume status" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#: cinder/volume/manager.py:1024 #, python-format -msgid "test_snapshot_create: param=%s" +msgid "" +"Unable to update stats, %(driver_name)s -%(driver_version)s " +"%(config_group)s driver is uninitialized." msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#: cinder/volume/manager.py:1044 #, python-format -msgid "test_snapshot_create: resp_dict=%s" +msgid "Notification {%s} received" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#: cinder/volume/manager.py:1091 #, python-format -msgid "test_snapshot_create_force: param=%s" +msgid "" +"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " +"(%(d_consumed)dG of %(d_quota)dG already consumed)" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#: cinder/volume/manager.py:1103 #, python-format -msgid "test_snapshot_create_force: resp_dict=%s" +msgid "volume %s: extending" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#: cinder/volume/manager.py:1105 #, python-format -msgid "test_snapshot_show: resp=%s" +msgid "volume %s: extended successfully" msgstr "" -#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#: cinder/volume/manager.py:1107 #, python-format -msgid "test_snapshot_detail: resp_dict=%s" +msgid "volume %s: Error trying to extend volume" +msgstr "" + +#: cinder/volume/manager.py:1169 +msgid "Failed to update usages while retyping volume." +msgstr "" + +#: cinder/volume/manager.py:1170 +msgid "Failed to get old volume type quota reservations" msgstr "" -#: cinder/tests/integrated/test_login.py:31 +#: cinder/volume/manager.py:1190 #, python-format -msgid "flavor: %s" +msgid "Volume %s: retyped succesfully" msgstr "" -#: cinder/tests/integrated/api/client.py:38 +#: cinder/volume/manager.py:1193 #, python-format msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" +"Volume %s: driver error when trying to retype, falling back to generic " +"mechanism." msgstr "" -#: cinder/tests/integrated/api/client.py:47 -msgid "Authentication error" +#: cinder/volume/manager.py:1204 +msgid "Retype requires migration but is not allowed." msgstr "" -#: cinder/tests/integrated/api/client.py:55 -msgid "Authorization error" +#: cinder/volume/manager.py:1212 +msgid "Volume must not have snapshots." msgstr "" -#: cinder/tests/integrated/api/client.py:63 -msgid "Item not found" +#: cinder/volume/qos_specs.py:57 +#, python-format +msgid "Valid consumer of QoS specs are: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:105 +#: cinder/volume/qos_specs.py:84 cinder/volume/qos_specs.py:105 +#: cinder/volume/qos_specs.py:155 cinder/volume/qos_specs.py:197 +#: cinder/volume/qos_specs.py:211 cinder/volume/qos_specs.py:225 +#: cinder/volume/volume_types.py:43 #, python-format -msgid "Doing %(method)s on %(relative_url)s" +msgid "DB error: %s" msgstr "" -#: cinder/tests/integrated/api/client.py:107 +#: cinder/volume/qos_specs.py:123 cinder/volume/qos_specs.py:140 +#: cinder/volume/qos_specs.py:272 cinder/volume/volume_types.py:52 +#: cinder/volume/volume_types.py:99 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/qos_specs.py:156 #, python-format -msgid "Body: %s" +msgid "Failed to get all associations of qos specs %s" msgstr "" -#: cinder/tests/integrated/api/client.py:125 +#: cinder/volume/qos_specs.py:189 #, python-format -msgid "%(auth_uri)s => code %(http_status)s" +msgid "" +"Type %(type_id)s is already associated with another qos specs: " +"%(qos_specs_id)s" msgstr "" -#: cinder/tests/integrated/api/client.py:151 +#: cinder/volume/qos_specs.py:198 #, python-format -msgid "%(relative_uri)s => code %(http_status)s" +msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/integrated/api/client.py:161 -msgid "Unexpected status code" +#: cinder/volume/qos_specs.py:212 +#, python-format +msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" msgstr "" -#: cinder/tests/integrated/api/client.py:168 +#: cinder/volume/qos_specs.py:226 #, python-format -msgid "Decoding JSON: %s" +msgid "Failed to disassociate qos specs %s." +msgstr "" + +#: cinder/volume/qos_specs.py:284 cinder/volume/volume_types.py:111 +msgid "name cannot be None" msgstr "" -#: cinder/tests/rpc/common.py:133 +#: cinder/volume/utils.py:144 #, python-format -msgid "Nested received %(queue)s, %(value)s" +msgid "" +"Incorrect value error: %(blocksize)s, it may indicate that " +"'volume_dd_blocksize' was configured incorrectly. Fall back to default." msgstr "" -#: cinder/tests/rpc/common.py:142 +#: cinder/volume/volume_types.py:130 #, python-format -msgid "Nested return %s" +msgid "" +"Default volume type is not found, please check default_volume_type " +"config: %s" msgstr "" -#: cinder/tests/rpc/common.py:160 -msgid "RPC backend does not support timeouts" +#: cinder/volume/drivers/block_device.py:131 +#: cinder/volume/drivers/block_device.py:143 cinder/volume/drivers/lvm.py:654 +#: cinder/volume/drivers/lvm.py:669 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#: cinder/volume/drivers/block_device.py:157 cinder/volume/drivers/lvm.py:687 #, python-format -msgid "Received %s" +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %s" msgstr "" -#: cinder/virt/connection.py:85 -msgid "Failed to open connection to the hypervisor" +#: cinder/volume/drivers/block_device.py:183 cinder/volume/drivers/lvm.py:483 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provision for volume: %s" msgstr "" -#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 -#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#: cinder/volume/drivers/block_device.py:200 cinder/volume/drivers/lvm.py:504 #, python-format -msgid "Compute_service record created for %s " +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s" msgstr "" -#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 -#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#: cinder/volume/drivers/block_device.py:272 cinder/volume/drivers/lvm.py:227 #, python-format -msgid "Compute_service record updated for %s " +msgid "Performing secure delete on volume: %s" msgstr "" -#: cinder/virt/firewall.py:130 +#: cinder/volume/drivers/block_device.py:287 #, python-format -msgid "Attempted to unfilter instance %s which is not filtered" +msgid "Error unrecognized volume_clear option: %s" msgstr "" -#: cinder/virt/firewall.py:137 +#: cinder/volume/drivers/block_device.py:311 cinder/volume/drivers/lvm.py:300 +#: cinder/volume/drivers/zadara.py:509 cinder/volume/drivers/nexenta/nfs.py:189 #, python-format -msgid "Filters added to instance %s" +msgid "Creating clone of volume: %s" msgstr "" -#: cinder/virt/firewall.py:139 -msgid "Provider Firewall Rules refreshed" +#: cinder/volume/drivers/block_device.py:380 +msgid "No free disk" msgstr "" -#: cinder/virt/firewall.py:291 -#, python-format -msgid "Adding security group rule: %r" +#: cinder/volume/drivers/block_device.py:393 +msgid "No big enough free disk" msgstr "" -#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#: cinder/volume/drivers/coraid.py:84 #, python-format -msgid "Adding provider rule: %s" +msgid "Invalid ESM url scheme \"%s\". Supported https only." msgstr "" -#: cinder/virt/images.py:86 -msgid "'qemu-img info' parsing failed." +#: cinder/volume/drivers/coraid.py:111 +msgid "Invalid REST handle name. Expected path." msgstr "" -#: cinder/virt/images.py:92 +#: cinder/volume/drivers/coraid.py:134 #, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgid "Call to json.loads() failed: %(ex)s. Response: %(resp)s" msgstr "" -#: cinder/virt/images.py:104 -#, python-format -msgid "Converted to raw, but format is now %s" +#: cinder/volume/drivers/coraid.py:224 +msgid "Session is expired. Relogin on ESM." msgstr "" -#: cinder/virt/vmwareapi_conn.py:105 -msgid "" -"Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to useconnection_type=vmwareapi" +#: cinder/volume/drivers/coraid.py:244 +msgid "Reply is empty." msgstr "" -#: cinder/virt/vmwareapi_conn.py:276 -#, python-format -msgid "In vmwareapi:_create_session, got this exception: %s" +#: cinder/volume/drivers/coraid.py:246 +msgid "Error message is empty." msgstr "" -#: cinder/virt/vmwareapi_conn.py:359 +#: cinder/volume/drivers/coraid.py:284 #, python-format -msgid "In vmwareapi:_call_method, got this exception: %s" +msgid "Coraid Appliance ping failed: %s" msgstr "" -#: cinder/virt/vmwareapi_conn.py:398 +#: cinder/volume/drivers/coraid.py:297 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgid "Volume \"%(name)s\" created with VSX LUN \"%(lun)s\"" msgstr "" -#: cinder/virt/vmwareapi_conn.py:404 +#: cinder/volume/drivers/coraid.py:311 #, python-format -msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgid "Volume \"%s\" deleted." msgstr "" -#: cinder/virt/vmwareapi_conn.py:409 +#: cinder/volume/drivers/coraid.py:315 #, python-format -msgid "In vmwareapi:_poll_task, Got this error %s" +msgid "Resize volume \"%(name)s\" to %(size)s GB." msgstr "" -#: cinder/virt/xenapi_conn.py:140 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +#: cinder/volume/drivers/coraid.py:319 +#, python-format +msgid "Repository for volume \"%(name)s\" found: \"%(repo)s\"" msgstr "" -#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 -msgid "Could not determine iscsi initiator name" +#: cinder/volume/drivers/coraid.py:333 +#, python-format +msgid "Volume \"%(name)s\" resized. New size is %(size)s GB." msgstr "" -#: cinder/virt/xenapi_conn.py:460 -msgid "Host startup on XenServer is not supported." +#: cinder/volume/drivers/coraid.py:385 +msgid "Cannot create clone volume in different repository." msgstr "" -#: cinder/virt/xenapi_conn.py:489 -msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +#: cinder/volume/drivers/coraid.py:505 +#, python-format +msgid "Initialize connection %(shelf)s/%(lun)s for %(name)s" msgstr "" -#: cinder/virt/xenapi_conn.py:527 -msgid "Host is member of a pool, but DB says otherwise" +#: cinder/volume/drivers/eqlx.py:139 +#, python-format +msgid "" +"CLI output\n" +"%s" msgstr "" -#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 -#, python-format -msgid "Got exception: %s" +#: cinder/volume/drivers/eqlx.py:154 +msgid "Reading CLI MOTD" msgstr "" -#: cinder/virt/baremetal/dom.py:93 -msgid "No domains exist." +#: cinder/volume/drivers/eqlx.py:158 +#, python-format +msgid "Setting CLI terminal width: '%s'" msgstr "" -#: cinder/virt/baremetal/dom.py:95 +#: cinder/volume/drivers/eqlx.py:162 #, python-format -msgid "============= initial domains =========== : %s" +msgid "Sending CLI command: '%s'" msgstr "" -#: cinder/virt/baremetal/dom.py:99 -msgid "Building domain: to be removed" +#: cinder/volume/drivers/eqlx.py:169 +msgid "Error executing EQL command" msgstr "" -#: cinder/virt/baremetal/dom.py:103 -msgid "Not running domain: remove" +#: cinder/volume/drivers/eqlx.py:199 +#, python-format +msgid "EQL-driver: executing \"%s\"" msgstr "" -#: cinder/virt/baremetal/dom.py:111 -msgid "domain running on an unknown node: discarded" +#: cinder/volume/drivers/eqlx.py:208 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:383 +#, python-format +msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" msgstr "" -#: cinder/virt/baremetal/dom.py:127 +#: cinder/volume/drivers/eqlx.py:215 cinder/volume/drivers/san/san.py:149 +#, fuzzy, python-format +msgid "Error running SSH command: %s" +msgstr "非預期的執行錯誤" + +#: cinder/volume/drivers/eqlx.py:282 #, python-format -msgid "No such domain (%s)" +msgid "Volume %s does not exist, it may have already been deleted" msgstr "" -#: cinder/virt/baremetal/dom.py:134 +#: cinder/volume/drivers/eqlx.py:300 #, python-format -msgid "Failed power down Bare-metal node %s" +msgid "EQL-driver: Setup is complete, group IP is %s" msgstr "" -#: cinder/virt/baremetal/dom.py:143 -msgid "deactivate -> activate fails" +#: cinder/volume/drivers/eqlx.py:304 +msgid "Failed to setup the Dell EqualLogic driver" msgstr "" -#: cinder/virt/baremetal/dom.py:153 -msgid "destroy_domain: no such domain" +#: cinder/volume/drivers/eqlx.py:320 +#, python-format +msgid "Failed to create volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:154 +#: cinder/volume/drivers/eqlx.py:329 #, python-format -msgid "No such domain %s" +msgid "Volume %s was not found while trying to delete it" msgstr "" -#: cinder/virt/baremetal/dom.py:161 +#: cinder/volume/drivers/eqlx.py:333 #, python-format -msgid "Domains: %s" +msgid "Failed to delete volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:163 +#: cinder/volume/drivers/eqlx.py:348 #, python-format -msgid "Nodes: %s" +msgid "Failed to create snapshot of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:166 +#: cinder/volume/drivers/eqlx.py:361 #, python-format -msgid "After storing domains: %s" +msgid "Failed to create volume from snapshot %s" msgstr "" -#: cinder/virt/baremetal/dom.py:169 -msgid "deactivation/removing domain failed" +#: cinder/volume/drivers/eqlx.py:374 +#, python-format +msgid "Failed to create clone of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:176 -msgid "===== Domain is being created =====" +#: cinder/volume/drivers/eqlx.py:384 +#, python-format +msgid "Failed to delete snapshot %(snap)s of volume %(vol)s" msgstr "" -#: cinder/virt/baremetal/dom.py:179 -msgid "Same domain name already exists" +#: cinder/volume/drivers/eqlx.py:405 +#, python-format +msgid "Failed to initialize connection to volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:181 -msgid "create_domain: before get_idle_node" +#: cinder/volume/drivers/eqlx.py:415 +#, python-format +msgid "Failed to terminate connection to volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:198 +#: cinder/volume/drivers/eqlx.py:436 #, python-format -msgid "Created new domain: %s" +msgid "Volume %s is not found!, it may have been deleted" msgstr "" -#: cinder/virt/baremetal/dom.py:213 +#: cinder/volume/drivers/eqlx.py:440 #, python-format -msgid "Failed to boot Bare-metal node %s" +msgid "Failed to ensure export of volume %s" msgstr "" -#: cinder/virt/baremetal/dom.py:222 -msgid "No such domain exists" +#: cinder/volume/drivers/eqlx.py:459 +#, python-format +msgid "Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB" msgstr "" -#: cinder/virt/baremetal/dom.py:226 +#: cinder/volume/drivers/glusterfs.py:86 #, python-format -msgid "change_domain_state: to new state %s" +msgid "There's no Gluster config file configured (%s)" msgstr "" -#: cinder/virt/baremetal/dom.py:233 +#: cinder/volume/drivers/glusterfs.py:91 #, python-format -msgid "Stored fake domains to the file: %s" +msgid "Gluster config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/baremetal/dom.py:244 -msgid "domain does not exist" +#: cinder/volume/drivers/glusterfs.py:103 +msgid "mount.glusterfs is not installed" msgstr "" -#: cinder/virt/baremetal/nodes.py:42 +#: cinder/volume/drivers/glusterfs.py:161 #, python-format -msgid "Unknown baremetal driver %(d)s" +msgid "Cloning volume %(src)s to volume %(dst)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:148 -#, python-format -msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +#: cinder/volume/drivers/glusterfs.py:166 +msgid "Volume status must be 'available'." msgstr "" -#: cinder/virt/baremetal/proxy.py:162 +#: cinder/volume/drivers/glusterfs.py:202 cinder/volume/drivers/nfs.py:122 +#: cinder/volume/drivers/netapp/nfs.py:753 #, python-format -msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgid "casted to %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:189 -#, python-format -msgid "instance %s: rebooted" +#: cinder/volume/drivers/glusterfs.py:216 +msgid "Snapshot status must be \"available\" to clone." msgstr "" -#: cinder/virt/baremetal/proxy.py:192 -msgid "_wait_for_reboot failed" +#: cinder/volume/drivers/glusterfs.py:238 +#, python-format +msgid "snapshot: %(snap)s, volume: %(vol)s, volume_size: %(size)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:222 +#: cinder/volume/drivers/glusterfs.py:257 #, python-format -msgid "instance %s: rescued" +msgid "will copy from snapshot at %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:225 -msgid "_wait_for_rescue failed" +#: cinder/volume/drivers/glusterfs.py:275 cinder/volume/drivers/nfs.py:172 +#, python-format +msgid "Volume %s does not have provider_location specified, skipping" msgstr "" -#: cinder/virt/baremetal/proxy.py:242 -msgid "<============= spawn of baremetal =============>" +#: cinder/volume/drivers/glusterfs.py:373 +#, python-format +msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" msgstr "" -#: cinder/virt/baremetal/proxy.py:255 +#: cinder/volume/drivers/glusterfs.py:403 #, python-format -msgid "instance %s: is building" +msgid "nova call result: %s" +msgstr "" + +#: cinder/volume/drivers/glusterfs.py:405 +msgid "Call to Nova to create snapshot failed" msgstr "" -#: cinder/virt/baremetal/proxy.py:260 -msgid "Key is injected but instance is not running yet" +#: cinder/volume/drivers/glusterfs.py:427 +msgid "Nova returned \"error\" status while creating snapshot." msgstr "" -#: cinder/virt/baremetal/proxy.py:265 +#: cinder/volume/drivers/glusterfs.py:431 #, python-format -msgid "instance %s: booted" +msgid "Status of snapshot %(id)s is now %(status)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:268 +#: cinder/volume/drivers/glusterfs.py:444 #, python-format -msgid "~~~~~~ current state = %s ~~~~~~" +msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:269 +#: cinder/volume/drivers/glusterfs.py:456 #, python-format -msgid "instance %s spawned successfully" +msgid "create snapshot: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:272 +#: cinder/volume/drivers/glusterfs.py:457 #, python-format -msgid "instance %s:not booted" +msgid "volume id: %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:274 -msgid "Bremetal assignment is overcommitted." +#: cinder/volume/drivers/glusterfs.py:532 +msgid "'active' must be present when writing snap_info." msgstr "" -#: cinder/virt/baremetal/proxy.py:354 +#: cinder/volume/drivers/glusterfs.py:562 #, python-format -msgid "instance %s: Creating image" +msgid "deleting snapshot %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:473 -#, python-format -msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +#: cinder/volume/drivers/glusterfs.py:566 +msgid "Volume status must be \"available\" or \"in-use\"." msgstr "" -#: cinder/virt/baremetal/proxy.py:484 +#: cinder/volume/drivers/glusterfs.py:582 #, python-format msgid "" -"instance %(inst_name)s: ignoring error injecting data into image " -"%(img_id)s (%(e)s)" +"Snapshot record for %s is not present, allowing snapshot_delete to " +"proceed." msgstr "" -#: cinder/virt/baremetal/proxy.py:529 +#: cinder/volume/drivers/glusterfs.py:587 #, python-format -msgid "instance %s: starting toXML method" +msgid "snapshot_file for this snap is %s" msgstr "" -#: cinder/virt/baremetal/proxy.py:531 +#: cinder/volume/drivers/glusterfs.py:608 #, python-format -msgid "instance %s: finished toXML method" -msgstr "" - -#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 -msgid "" -"Cannot get the number of cpu, because this function is not implemented " -"for this platform. This error can be safely ignored for now." +msgid "No base file found for %s." msgstr "" -#: cinder/virt/baremetal/proxy.py:714 +#: cinder/volume/drivers/glusterfs.py:625 #, python-format -msgid "#### RLK: cpu_arch = %s " +msgid "No %(base_id)s found for %(file)s" msgstr "" -#: cinder/virt/baremetal/proxy.py:746 -msgid "Updating!" +#: cinder/volume/drivers/glusterfs.py:680 +#, python-format +msgid "No file found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 -#: cinder/virt/xenapi/host.py:129 -msgid "Updating host stats" +#: cinder/volume/drivers/glusterfs.py:690 +#, python-format +msgid "No snap found with %s as backing file." msgstr "" -#: cinder/virt/baremetal/tilera.py:185 -msgid "free_node...." +#: cinder/volume/drivers/glusterfs.py:701 +#, python-format +msgid "No file depends on %s." msgstr "" -#: cinder/virt/baremetal/tilera.py:216 +#: cinder/volume/drivers/glusterfs.py:727 #, python-format -msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgid "Check condition failed: %s expected to be None." msgstr "" -#: cinder/virt/baremetal/tilera.py:221 -msgid "status of node is set to 0" +#: cinder/volume/drivers/glusterfs.py:778 +msgid "Call to Nova delete snapshot failed" msgstr "" -#: cinder/virt/baremetal/tilera.py:232 -msgid "rootfs is already removed" +#: cinder/volume/drivers/glusterfs.py:796 +#, python-format +msgid "status of snapshot %s is still \"deleting\"... waiting" msgstr "" -#: cinder/virt/baremetal/tilera.py:264 -msgid "Before ping to the bare-metal node" +#: cinder/volume/drivers/glusterfs.py:802 +#, python-format +msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:275 +#: cinder/volume/drivers/glusterfs.py:815 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgid "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" -#: cinder/virt/baremetal/tilera.py:279 +#: cinder/volume/drivers/glusterfs.py:904 #, python-format -msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgid "%s must be a valid raw or qcow2 image." msgstr "" -#: cinder/virt/baremetal/tilera.py:292 -msgid "Noting to do for tilera nodes: vmlinux is in CF" +#: cinder/volume/drivers/glusterfs.py:967 +msgid "Extend volume is only supported for this driver when no snapshots exist." msgstr "" -#: cinder/virt/baremetal/tilera.py:316 -msgid "activate_node" +#: cinder/volume/drivers/glusterfs.py:975 +#, python-format +msgid "Unrecognized backing format: %s" msgstr "" -#: cinder/virt/baremetal/tilera.py:330 -msgid "Node is unknown error state." +#: cinder/volume/drivers/glusterfs.py:990 +#, python-format +msgid "creating new volume at %s" msgstr "" -#: cinder/virt/disk/api.py:165 -msgid "no capable image handler configured" +#: cinder/volume/drivers/glusterfs.py:993 +#, python-format +msgid "file already exists at %s" msgstr "" -#: cinder/virt/disk/api.py:178 +#: cinder/volume/drivers/glusterfs.py:1019 cinder/volume/drivers/nfs.py:159 #, python-format -msgid "unknown disk image handler: %s" +msgid "Exception during mounting %s" msgstr "" -#: cinder/virt/disk/api.py:189 -msgid "image already mounted" +#: cinder/volume/drivers/glusterfs.py:1021 +#, python-format +msgid "Available shares: %s" msgstr "" -#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 -#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#: cinder/volume/drivers/glusterfs.py:1038 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "" +"GlusterFS share at %(dir)s is not writable by the Cinder volume service. " +"Snapshot operations will not be supported." msgstr "" -#: cinder/virt/disk/api.py:291 +#: cinder/volume/drivers/gpfs.py:96 #, python-format -msgid "Failed to remove container: %s" +msgid "GPFS is not active. Detailed output: %s" msgstr "" -#: cinder/virt/disk/api.py:441 +#: cinder/volume/drivers/gpfs.py:97 #, python-format -msgid "User %(username)s not found in password file." +msgid "GPFS is not running - state: %s" msgstr "" -#: cinder/virt/disk/api.py:457 -#, python-format -msgid "User %(username)s not found in shadow file." +#: cinder/volume/drivers/gpfs.py:140 +msgid "Option gpfs_mount_point_base is not set correctly." msgstr "" -#: cinder/virt/disk/guestfs.py:39 -#, python-format -msgid "unsupported partition: %s" +#: cinder/volume/drivers/gpfs.py:147 +msgid "Option gpfs_images_share_mode is not set correctly." msgstr "" -#: cinder/virt/disk/guestfs.py:77 -msgid "unknown guestmount error" +#: cinder/volume/drivers/gpfs.py:153 +msgid "Option gpfs_images_dir is not set correctly." msgstr "" -#: cinder/virt/disk/loop.py:30 +#: cinder/volume/drivers/gpfs.py:160 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "" +"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " +"belong to different file systems" msgstr "" -#: cinder/virt/disk/mount.py:76 -msgid "no partitions found" +#: cinder/volume/drivers/gpfs.py:169 +#, python-format +msgid "" +"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in " +"cluster daemon level %(cur)s - must be at least at level %(min)s." msgstr "" -#: cinder/virt/disk/mount.py:77 +#: cinder/volume/drivers/gpfs.py:183 #, python-format -msgid "Failed to map partitions: %s" +msgid "%s must be an absolute path." msgstr "" -#: cinder/virt/disk/nbd.py:58 -msgid "nbd unavailable: module not loaded" +#: cinder/volume/drivers/gpfs.py:188 +#, python-format +msgid "%s is not a directory." msgstr "" -#: cinder/virt/disk/nbd.py:63 -msgid "No free nbd devices" +#: cinder/volume/drivers/gpfs.py:197 +#, python-format +msgid "" +"The GPFS filesystem %(fs)s is not at the required release level. Current" +" level is %(cur)s, must be at least %(min)s." msgstr "" -#: cinder/virt/disk/nbd.py:81 +#: cinder/volume/drivers/gpfs.py:556 #, python-format -msgid "qemu-nbd error: %s" +msgid "Failed to resize volume %(volume_id)s, error: %(error)s" msgstr "" -#: cinder/virt/disk/nbd.py:93 +#: cinder/volume/drivers/gpfs.py:604 #, python-format -msgid "nbd device %s did not show up" +msgid "mkfs failed on volume %(vol)s, error message was: %(err)s" msgstr "" -#: cinder/virt/libvirt/connection.py:265 +#: cinder/volume/drivers/gpfs.py:637 #, python-format -msgid "Connecting to libvirt: %s" +msgid "" +"%s cannot be accessed. Verify that GPFS is active and file system is " +"mounted." msgstr "" -#: cinder/virt/libvirt/connection.py:286 -msgid "Connection to libvirt broke" +#: cinder/volume/drivers/lvm.py:189 +#, python-format +msgid "Unabled to delete due to existing snapshot for volume: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:388 +#: cinder/volume/drivers/lvm.py:215 #, python-format -msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgid "Volume device file path %s does not exist." msgstr "" -#: cinder/virt/libvirt/connection.py:400 +#: cinder/volume/drivers/lvm.py:221 #, python-format -msgid "" -"Error from libvirt during saved instance removal. Code=%(errcode)s " -"Error=%(e)s" +msgid "Size for volume: %s not found, cannot secure delete." msgstr "" -#: cinder/virt/libvirt/connection.py:411 +#: cinder/volume/drivers/lvm.py:262 #, python-format -msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgid "snapshot: %s not found, skipping delete operations" msgstr "" -#: cinder/virt/libvirt/connection.py:423 -msgid "Instance destroyed successfully." +#: cinder/volume/drivers/lvm.py:361 +#, python-format +msgid "Unable to update stats on non-initialized Volume Group: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:435 +#: cinder/volume/drivers/lvm.py:462 #, python-format -msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgid "Error creating iSCSI target, retrying creation for target: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:461 +#: cinder/volume/drivers/lvm.py:482 #, python-format -msgid "Deleting instance files %(target)s" +msgid "volume_info:%s" msgstr "" -#: cinder/virt/libvirt/connection.py:554 -msgid "attaching LXC block device" +#: cinder/volume/drivers/lvm.py:518 +msgid "Detected inconsistency in provider_location id" msgstr "" -#: cinder/virt/libvirt/connection.py:567 -msgid "detaching LXC block device" +#: cinder/volume/drivers/lvm.py:519 cinder/volume/drivers/lvm.py:724 +#: cinder/volume/drivers/huawei/rest_common.py:1225 +#, python-format +msgid "%s" msgstr "" -#: cinder/virt/libvirt/connection.py:692 -msgid "Instance soft rebooted successfully." +#: cinder/volume/drivers/lvm.py:573 +#, python-format +msgid "Symbolic link %s not found" msgstr "" -#: cinder/virt/libvirt/connection.py:696 -msgid "Failed to soft reboot instance." +#: cinder/volume/drivers/nfs.py:109 +msgid "Driver specific implementation needs to return mount_point_base." msgstr "" -#: cinder/virt/libvirt/connection.py:725 -msgid "Instance shutdown successfully." +#: cinder/volume/drivers/nfs.py:263 +#, python-format +msgid "Expected volume size was %d" msgstr "" -#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 -msgid "During reboot, instance disappeared." +#: cinder/volume/drivers/nfs.py:264 +#, python-format +msgid " but size is now %d" msgstr "" -#: cinder/virt/libvirt/connection.py:766 -msgid "Instance rebooted successfully." +#: cinder/volume/drivers/nfs.py:361 +#, python-format +msgid "%s is already mounted" msgstr "" -#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#: cinder/volume/drivers/nfs.py:421 #, python-format -msgid "" -"Found %(migration_count)d unconfirmed migrations older than " -"%(confirm_window)d seconds" +msgid "There's no NFS config file configured (%s)" msgstr "" -#: cinder/virt/libvirt/connection.py:871 +#: cinder/volume/drivers/nfs.py:426 #, python-format -msgid "Automatically confirming migration %d" +msgid "NFS config file at %(config)s doesn't exist" msgstr "" -#: cinder/virt/libvirt/connection.py:896 -msgid "Instance is running" +#: cinder/volume/drivers/nfs.py:431 +#, python-format +msgid "NFS config 'nfs_oversub_ratio' invalid. Must be > 0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:910 -msgid "Instance spawned successfully." +#: cinder/volume/drivers/nfs.py:439 +#, python-format +msgid "NFS config 'nfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" msgstr "" -#: cinder/virt/libvirt/connection.py:926 +#: cinder/volume/drivers/nfs.py:493 #, python-format -msgid "data: %(data)r, fpath: %(fpath)r" +msgid "Selected %s as target nfs share." msgstr "" -#: cinder/virt/libvirt/connection.py:978 -#, fuzzy -msgid "Guest does not have a console available" -msgstr "使用者並沒有管理者權力" +#: cinder/volume/drivers/nfs.py:526 +#, python-format +msgid "%s is above nfs_used_ratio" +msgstr "" -#: cinder/virt/libvirt/connection.py:1020 +#: cinder/volume/drivers/nfs.py:529 #, python-format -msgid "Path '%(path)s' supports direct I/O" +msgid "%s is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:1024 +#: cinder/volume/drivers/nfs.py:532 #, python-format -msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgid "%s reserved space is above nfs_oversub_ratio" msgstr "" -#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#: cinder/volume/drivers/rbd.py:160 #, python-format -msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgid "Invalid argument - whence=%s not supported" msgstr "" -#: cinder/virt/libvirt/connection.py:1153 -msgid "Creating image" +#: cinder/volume/drivers/rbd.py:164 +msgid "Invalid argument" msgstr "" -#: cinder/virt/libvirt/connection.py:1339 -#, python-format -msgid "Injecting %(injection)s into image %(img_id)s" +#: cinder/volume/drivers/rbd.py:183 +msgid "fileno() not supported by RBD()" msgstr "" -#: cinder/virt/libvirt/connection.py:1349 +#: cinder/volume/drivers/rbd.py:210 #, python-format -msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgid "error opening rbd image %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1381 -#, python-format -msgid "block_device_list %s" +#: cinder/volume/drivers/rbd.py:259 +msgid "rados and rbd python libraries not found" msgstr "" -#: cinder/virt/libvirt/connection.py:1658 -msgid "Starting toXML method" +#: cinder/volume/drivers/rbd.py:265 +msgid "error connecting to ceph cluster" msgstr "" -#: cinder/virt/libvirt/connection.py:1662 -msgid "Finished toXML method" +#: cinder/volume/drivers/rbd.py:346 cinder/volume/drivers/sheepdog.py:178 +msgid "error refreshing volume stats" msgstr "" -#: cinder/virt/libvirt/connection.py:1679 +#: cinder/volume/drivers/rbd.py:377 #, python-format -msgid "" -"Error from libvirt while looking up %(instance_name)s: [Error Code " -"%(error_code)s] %(ex)s" +msgid "clone depth exceeds limit of %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1920 -msgid "libvirt version is too old (does not support getVersion)" +#: cinder/volume/drivers/rbd.py:411 +#, python-format +msgid "maximum clone depth (%d) has been reached - flattening source volume" msgstr "" -#: cinder/virt/libvirt/connection.py:1942 +#: cinder/volume/drivers/rbd.py:423 #, python-format -msgid "'' must be 1, but %d\n" +msgid "flattening source volume %s" msgstr "" -#: cinder/virt/libvirt/connection.py:1969 +#: cinder/volume/drivers/rbd.py:435 #, python-format -msgid "topology (%(topology)s) must have %(ks)s" +msgid "creating snapshot='%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:2067 +#: cinder/volume/drivers/rbd.py:445 #, python-format -msgid "" -"Instance launched has CPU info:\n" -"%s" +msgid "cloning '%(src_vol)s@%(src_snap)s' to '%(dest)s'" msgstr "" -#: cinder/virt/libvirt/connection.py:2079 -#, python-format -msgid "" -"CPU doesn't have compatibility.\n" -"\n" -"%(ret)s\n" -"\n" -"Refer to %(u)s" +#: cinder/volume/drivers/rbd.py:459 +msgid "clone created successfully" msgstr "" -#: cinder/virt/libvirt/connection.py:2136 +#: cinder/volume/drivers/rbd.py:468 #, python-format -msgid "Timeout migrating for %s. nwfilter not found." +msgid "creating volume '%s'" msgstr "" -#: cinder/virt/libvirt/connection.py:2352 +#: cinder/volume/drivers/rbd.py:484 #, python-format -msgid "skipping %(path)s since it looks like volume" +msgid "flattening %(pool)s/%(img)s" msgstr "" -#: cinder/virt/libvirt/connection.py:2407 +#: cinder/volume/drivers/rbd.py:490 #, python-format -msgid "Getting disk size of %(i_name)s: %(e)s" +msgid "cloning %(pool)s/%(img)s@%(snap)s to %(dst)s" +msgstr "" + +#: cinder/volume/drivers/rbd.py:527 +msgid "volume has no backup snaps" msgstr "" -#: cinder/virt/libvirt/connection.py:2458 +#: cinder/volume/drivers/rbd.py:550 #, python-format -msgid "Instance %s: Starting migrate_disk_and_power_off" +msgid "volume %s is not a clone" msgstr "" -#: cinder/virt/libvirt/connection.py:2513 -msgid "During wait running, instance disappeared." +#: cinder/volume/drivers/rbd.py:568 +#, python-format +msgid "deleting parent snapshot %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2518 -msgid "Instance running successfully." +#: cinder/volume/drivers/rbd.py:579 +#, python-format +msgid "deleting parent %s" msgstr "" -#: cinder/virt/libvirt/connection.py:2525 +#: cinder/volume/drivers/rbd.py:593 #, python-format -msgid "Instance %s: Starting finish_migration" +msgid "volume %s no longer exists in backend" +msgstr "" + +#: cinder/volume/drivers/rbd.py:609 +msgid "volume has clone snapshot(s)" msgstr "" -#: cinder/virt/libvirt/connection.py:2565 +#: cinder/volume/drivers/rbd.py:625 #, python-format -msgid "Instance %s: Starting finish_revert_migration" +msgid "deleting rbd volume %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:42 +#: cinder/volume/drivers/rbd.py:629 msgid "" -"Libvirt module could not be loaded. NWFilterFirewall will not work " -"correctly." +"ImageBusy error raised while deleting rbd volume. This may have been " +"caused by a connection from a client that has crashed and, if so, may be " +"resolved by retrying the delete after 30 seconds has elapsed." +msgstr "" + +#: cinder/volume/drivers/rbd.py:642 +msgid "volume is a clone so cleaning references" +msgstr "" + +#: cinder/volume/drivers/rbd.py:696 +#, python-format +msgid "connection data: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:93 -msgid "Called setup_basic_filtering in nwfilter" +#: cinder/volume/drivers/rbd.py:705 +msgid "Not stored in rbd" msgstr "" -#: cinder/virt/libvirt/firewall.py:101 -msgid "Ensuring static filters" +#: cinder/volume/drivers/rbd.py:709 +msgid "Blank components" msgstr "" -#: cinder/virt/libvirt/firewall.py:171 +#: cinder/volume/drivers/rbd.py:712 +#, fuzzy +msgid "Not an rbd snapshot" +msgstr "無效的快照(snapshot)" + +#: cinder/volume/drivers/rbd.py:724 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgid "not cloneable: %s" msgstr "" -#: cinder/virt/libvirt/firewall.py:217 +#: cinder/volume/drivers/rbd.py:728 #, python-format -msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgid "%s is in a different ceph cluster" msgstr "" -#: cinder/virt/libvirt/firewall.py:233 -msgid "iptables firewall: Setup Basic Filtering" +#: cinder/volume/drivers/rbd.py:733 +msgid "rbd image clone requires image format to be 'raw' but image {0} is '{1}'" msgstr "" -#: cinder/virt/libvirt/firewall.py:252 -msgid "Attempted to unfilter instance which is not filtered" +#: cinder/volume/drivers/rbd.py:747 +#, fuzzy, python-format +msgid "Unable to open image %(loc)s: %(err)s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/rbd.py:817 +msgid "volume backup complete." msgstr "" -#: cinder/virt/libvirt/imagecache.py:170 -#, python-format -msgid "%s is a valid instance name" +#: cinder/volume/drivers/rbd.py:830 +msgid "volume restore complete." msgstr "" -#: cinder/virt/libvirt/imagecache.py:173 +#: cinder/volume/drivers/rbd.py:840 cinder/volume/drivers/sheepdog.py:195 #, python-format -msgid "%s has a disk file" +msgid "Failed to Extend Volume %(volname)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:175 +#: cinder/volume/drivers/rbd.py:845 cinder/volume/drivers/sheepdog.py:200 +#: cinder/volume/drivers/windows/windows.py:223 #, python-format -msgid "Instance %(instance)s is backed by %(backing)s" +msgid "Extend volume from %(old_size)s GB to %(new_size)s GB." msgstr "" -#: cinder/virt/libvirt/imagecache.py:186 -#, python-format -msgid "" -"Instance %(instance)s is using a backing file %(backing)s which does not " -"appear in the image service" +#: cinder/volume/drivers/scality.py:67 +msgid "Value required for 'scality_sofs_config'" msgstr "" -#: cinder/virt/libvirt/imagecache.py:237 +#: cinder/volume/drivers/scality.py:78 #, python-format -msgid "%(id)s (%(base_file)s): image verification failed" +msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:247 -#, python-format -msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +#: cinder/volume/drivers/scality.py:84 +msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: cinder/virt/libvirt/imagecache.py:266 -#, python-format -msgid "Cannot remove %(base_file)s, it does not exist" +#: cinder/volume/drivers/scality.py:105 +msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: cinder/virt/libvirt/imagecache.py:278 +#: cinder/volume/drivers/scality.py:139 #, python-format -msgid "Base file too young to remove: %s" +msgid "Cannot find volume dir for Scality SOFS at '%s'" msgstr "" -#: cinder/virt/libvirt/imagecache.py:281 +#: cinder/volume/drivers/sheepdog.py:59 #, python-format -msgid "Removing base file: %s" +msgid "Sheepdog is not working: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:288 -#, python-format -msgid "Failed to remove %(base_file)s, error was %(error)s" +#: cinder/volume/drivers/sheepdog.py:64 +msgid "Sheepdog is not working" msgstr "" -#: cinder/virt/libvirt/imagecache.py:299 +#: cinder/volume/drivers/solidfire.py:144 #, python-format -msgid "%(id)s (%(base_file)s): checking" +msgid "Payload for SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:318 +#: cinder/volume/drivers/solidfire.py:151 #, python-format msgid "" -"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " -"on other nodes" +"Failed to make httplib connection SolidFire Cluster: %s (verify san_ip " +"settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:330 +#: cinder/volume/drivers/solidfire.py:154 #, python-format -msgid "" -"%(id)s (%(base_file)s): warning -- an absent base file is in use! " -"instances: %(instance_list)s" +msgid "Failed to make httplib connection: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:338 +#: cinder/volume/drivers/solidfire.py:161 #, python-format -msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgid "" +"Request to SolidFire cluster returned bad status: %(status)s / %(reason)s" +" (check san_login/san_password settings)" msgstr "" -#: cinder/virt/libvirt/imagecache.py:348 +#: cinder/volume/drivers/solidfire.py:166 #, python-format -msgid "%(id)s (%(base_file)s): image is not in use" +msgid "HTTP request failed, with status: %(status)s and reason: %(reason)s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:354 +#: cinder/volume/drivers/solidfire.py:177 #, python-format -msgid "%(id)s (%(base_file)s): image is in use" +msgid "Call to json.loads() raised an exception: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:377 +#: cinder/volume/drivers/solidfire.py:183 #, python-format -msgid "Skipping verification, no base directory at %s" +msgid "Results of SolidFire API call: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:381 -msgid "Verify base images" +#: cinder/volume/drivers/solidfire.py:187 +#, python-format +msgid "Clone operation encountered: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:388 +#: cinder/volume/drivers/solidfire.py:189 #, python-format -msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgid "Waiting for outstanding operation before retrying snapshot: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:406 +#: cinder/volume/drivers/solidfire.py:195 #, python-format -msgid "Unknown base file: %s" +msgid "Detected xDBVersionMismatch, retry %s of 5" msgstr "" -#: cinder/virt/libvirt/imagecache.py:411 +#: cinder/volume/drivers/solidfire.py:202 +#: cinder/volume/drivers/solidfire.py:271 +#: cinder/volume/drivers/solidfire.py:366 #, python-format -msgid "Active base files: %s" +msgid "API response: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:414 +#: cinder/volume/drivers/solidfire.py:222 #, python-format -msgid "Corrupt base files: %s" +msgid "Found solidfire account: %s" msgstr "" -#: cinder/virt/libvirt/imagecache.py:418 +#: cinder/volume/drivers/solidfire.py:253 #, python-format -msgid "Removable base files: %s" +msgid "solidfire account: %s does not exist, create it..." msgstr "" -#: cinder/virt/libvirt/imagecache.py:426 -msgid "Verification complete" +#: cinder/volume/drivers/solidfire.py:315 +#, python-format +msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" msgstr "" -#: cinder/virt/libvirt/utils.py:264 -msgid "Unable to find an open port" +#: cinder/volume/drivers/solidfire.py:398 +msgid "Failed to get model update from clone" msgstr "" -#: cinder/virt/libvirt/vif.py:90 +#: cinder/volume/drivers/solidfire.py:410 #, python-format -msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgid "Failed volume create: %s" msgstr "" -#: cinder/virt/libvirt/vif.py:99 +#: cinder/volume/drivers/solidfire.py:425 #, python-format -msgid "Ensuring bridge %s" +msgid "More than one valid preset was detected, using %s" msgstr "" -#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#: cinder/volume/drivers/solidfire.py:460 #, python-format -msgid "Failed while unplugging vif of instance '%s'" +msgid "Failed to get SolidFire Volume: %s" msgstr "" -#: cinder/virt/libvirt/volume.py:163 +#: cinder/volume/drivers/solidfire.py:469 #, python-format -msgid "iSCSI device not found at %s" +msgid "Mapped SolidFire volumeID %(sfid)s to cinder ID %(uuid)s." msgstr "" -#: cinder/virt/libvirt/volume.py:166 +#: cinder/volume/drivers/solidfire.py:478 #, python-format -msgid "" -"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " -"Try number: %(tries)s" +msgid "Volume %s, not found on SF Cluster." msgstr "" -#: cinder/virt/libvirt/volume.py:178 +#: cinder/volume/drivers/solidfire.py:481 #, python-format -msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgid "Found %(count)s volumes mapped to id: %(uuid)s." msgstr "" -#: cinder/virt/vmwareapi/error_util.py:93 -#, python-format -msgid "Error(s) %s occurred in the call to RetrieveProperties" +#: cinder/volume/drivers/solidfire.py:550 +msgid "Enter SolidFire delete_volume..." msgstr "" -#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#: cinder/volume/drivers/solidfire.py:554 #, python-format -msgid "%(text)s: _db_content => %(content)s" +msgid "Account for Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/vmwareapi/fake.py:131 -#, python-format -msgid "Property %(attr)s not set for the managed object %(objName)s" +#: cinder/volume/drivers/solidfire.py:556 +msgid "This usually means the volume was never successfully created." msgstr "" -#: cinder/virt/vmwareapi/fake.py:437 -msgid "There is no VM registered" +#: cinder/volume/drivers/solidfire.py:569 +#, python-format +msgid "Failed to delete SolidFire Volume: %s" msgstr "" -#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#: cinder/volume/drivers/solidfire.py:572 +#: cinder/volume/drivers/solidfire.py:646 +#: cinder/volume/drivers/solidfire.py:709 +#: cinder/volume/drivers/solidfire.py:734 #, python-format -msgid "Virtual Machine with ref %s is not there" +msgid "Volume ID %s was not found on the SolidFire Cluster!" msgstr "" -#: cinder/virt/vmwareapi/fake.py:502 -#, python-format -msgid "Logging out a session that is invalid or already logged out: %s" +#: cinder/volume/drivers/solidfire.py:575 +msgid "Leaving SolidFire delete_volume" msgstr "" -#: cinder/virt/vmwareapi/fake.py:517 -msgid "Session is faulty" +#: cinder/volume/drivers/solidfire.py:579 +msgid "Executing SolidFire ensure_export..." msgstr "" -#: cinder/virt/vmwareapi/fake.py:520 -msgid "Session Invalid" +#: cinder/volume/drivers/solidfire.py:587 +msgid "Executing SolidFire create_export..." msgstr "" -#: cinder/virt/vmwareapi/fake.py:606 -msgid " No Virtual Machine has been registered yet" +#: cinder/volume/drivers/solidfire.py:638 +msgid "Entering SolidFire extend_volume..." msgstr "" -#: cinder/virt/vmwareapi/io_util.py:99 -#, python-format -msgid "Glance image %s is in killed state" +#: cinder/volume/drivers/solidfire.py:660 +msgid "Leaving SolidFire extend_volume" msgstr "" -#: cinder/virt/vmwareapi/io_util.py:107 -#, python-format -msgid "Glance image %(image_id)s is in unknown state - %(state)s" +#: cinder/volume/drivers/solidfire.py:665 +msgid "Updating cluster status info" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:128 -msgid "" -"ESX SOAP server returned an empty port group for the host system in its " -"response" +#: cinder/volume/drivers/solidfire.py:673 +#, fuzzy +msgid "Failed to get updated stats" +msgstr "內文解碼失敗" + +#: cinder/volume/drivers/solidfire.py:703 +#: cinder/volume/drivers/solidfire.py:728 +msgid "Entering SolidFire attach_volume..." msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:155 +#: cinder/volume/drivers/solidfire.py:773 +msgid "Leaving SolidFire transfer volume" +msgstr "" + +#: cinder/volume/drivers/zadara.py:236 #, python-format -msgid "Creating Port Group with name %s on the ESX host" +msgid "Sending %(method)s to %(url)s. Body \"%(body)s\"" msgstr "" -#: cinder/virt/vmwareapi/network_utils.py:169 +#: cinder/volume/drivers/zadara.py:260 #, python-format -msgid "Created Port Group with name %s on the ESX host" +msgid "Operation completed. %(data)s" msgstr "" -#: cinder/virt/vmwareapi/read_write_util.py:150 +#: cinder/volume/drivers/zadara.py:357 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "Pool %(name)s: %(total)sGB total, %(free)sGB free" msgstr "" -#: cinder/virt/vmwareapi/vim.py:84 -msgid "Unable to import suds." +#: cinder/volume/drivers/zadara.py:408 cinder/volume/drivers/zadara.py:531 +#, python-format +msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" -#: cinder/virt/vmwareapi/vim.py:90 -msgid "Must specify vmwareapi_wsdl_loc" +#: cinder/volume/drivers/zadara.py:438 +#, python-format +msgid "Create snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:145 +#: cinder/volume/drivers/zadara.py:445 cinder/volume/drivers/zadara.py:490 +#: cinder/volume/drivers/zadara.py:516 #, python-format -msgid "No such SOAP method '%s' provided by VI SDK" +msgid "Volume %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/vim.py:150 +#: cinder/volume/drivers/zadara.py:456 #, python-format -msgid "httplib error in %s: " +msgid "Delete snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vim.py:157 +#: cinder/volume/drivers/zadara.py:464 #, python-format -msgid "Socket error in %s: " +msgid "snapshot: original volume %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/vim.py:162 +#: cinder/volume/drivers/zadara.py:472 #, python-format -msgid "Type error in %s: " +msgid "snapshot: snapshot %s not found, skipping delete operation" msgstr "" -#: cinder/virt/vmwareapi/vim.py:166 +#: cinder/volume/drivers/zadara.py:483 #, python-format -msgid "Exception in %s " +msgid "Creating volume from snapshot: %s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:66 -msgid "Getting list of instances" +#: cinder/volume/drivers/zadara.py:496 +#, python-format +msgid "Snapshot %(name)s not found" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:82 +#: cinder/volume/drivers/zadara.py:614 #, python-format -msgid "Got total of %s instances" +msgid "Attach properties: %(properties)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:126 -msgid "Couldn't get a local Datastore reference" +#: cinder/volume/drivers/emc/emc_smis_common.py:40 +msgid "" +"Module PyWBEM not installed. Install PyWBEM using the python-pywbem " +"package." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:196 -#, python-format -msgid "Creating VM with the name %s on the ESX host" +#: cinder/volume/drivers/emc/emc_smis_common.py:79 +msgid "Entering create_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:205 +#: cinder/volume/drivers/emc/emc_smis_common.py:83 #, python-format -msgid "Created VM with the name %s on the ESX host" +msgid "Create Volume: %(volume)s Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:234 +#: cinder/volume/drivers/emc/emc_smis_common.py:91 #, python-format -msgid "" -"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " -"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgid "Create Volume: %(volume)s Storage type: %(storage_type)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:251 +#: cinder/volume/drivers/emc/emc_smis_common.py:98 #, python-format msgid "" -"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " -"local store %(data_store_name)s" +"Create Volume: %(volume)s Pool: %(pool)s Storage System: " +"%(storage_system)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:260 +#: cinder/volume/drivers/emc/emc_smis_common.py:107 #, python-format msgid "" -"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " -"%(data_store_name)s" +"Error Create Volume: %(volumename)s. Storage Configuration Service not " +"found for pool %(storage_type)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:272 +#: cinder/volume/drivers/emc/emc_smis_common.py:115 #, python-format msgid "" -"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " -"%(data_store_name)s" +"Create Volume: %(name)s Method: CreateOrModifyElementFromStoragePool " +"ConfigServicie: %(service)s ElementName: %(name)s InPool: %(pool)s " +"ElementType: 5 Size: %(size)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:283 +#: cinder/volume/drivers/emc/emc_smis_common.py:130 #, python-format -msgid "" -"Downloading image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +msgid "Create Volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:298 +#: cinder/volume/drivers/emc/emc_smis_common.py:137 #, python-format msgid "" -"Downloaded image file data %(image_ref)s to the ESX data store " -"%(data_store_name)s" +"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:315 +#: cinder/volume/drivers/emc/emc_smis_common.py:144 #, python-format -msgid "Reconfiguring VM instance %s to attach the image disk" +msgid "Leaving create_volume: %(volumename)s Return code: %(rc)lu" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:152 +msgid "Entering create_volume_from_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:322 +#: cinder/volume/drivers/emc/emc_smis_common.py:157 #, python-format -msgid "Reconfigured VM instance %s to attach the image disk" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:329 +#: cinder/volume/drivers/emc/emc_smis_common.py:167 #, python-format -msgid "Powering on the VM instance %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Snapshot Instance: %(snapshotinstance)s Storage " +"System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:335 +#: cinder/volume/drivers/emc/emc_smis_common.py:177 #, python-format -msgid "Powered on the VM instance %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Create Volume from Snapshot is NOT supported on VMAX." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:381 +#: cinder/volume/drivers/emc/emc_smis_common.py:188 #, python-format -msgid "Creating Snapshot of the VM instance %s " +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Cannot find Replication Service to create volume from " +"snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:391 +#: cinder/volume/drivers/emc/emc_smis_common.py:197 #, python-format -msgid "Created Snapshot of the VM instance %s " +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Method: CreateElementReplica ReplicationService: " +"%(service)s ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:434 +#: cinder/volume/drivers/emc/emc_smis_common.py:218 #, python-format -msgid "Copying disk data before snapshot of the VM instance %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s " +"Snapshot:%(snapshotname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:447 +#: cinder/volume/drivers/emc/emc_smis_common.py:230 #, python-format -msgid "Copied disk data before snapshot of the VM instance %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Successfully clone volume from snapshot. Finding the " +"clone relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:456 +#: cinder/volume/drivers/emc/emc_smis_common.py:241 #, python-format -msgid "Uploading image %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Remove the clone relationship. Method: " +"ModifyReplicaSynchronization ReplicationService: %(service)s Operation: " +"8 Synchronization: %(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:469 +#: cinder/volume/drivers/emc/emc_smis_common.py:257 #, python-format -msgid "Uploaded image %s" +msgid "" +"Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:479 +#: cinder/volume/drivers/emc/emc_smis_common.py:266 #, python-format -msgid "Deleting temporary vmdk file %s" +msgid "" +"Error Create Volume from Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:488 +#: cinder/volume/drivers/emc/emc_smis_common.py:278 #, python-format -msgid "Deleted temporary vmdk file %s" +msgid "" +"Leaving create_volume_from_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:520 -msgid "instance is not powered on" +#: cinder/volume/drivers/emc/emc_smis_common.py:287 +msgid "Entering create_cloned_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:527 +#: cinder/volume/drivers/emc/emc_smis_common.py:292 #, python-format -msgid "Rebooting guest OS of VM %s" +msgid "" +"Create a Clone from Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:530 +#: cinder/volume/drivers/emc/emc_smis_common.py:302 #, python-format -msgid "Rebooted guest OS of VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Source Instance: %(src_instance)s Storage System: %(storage_system)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:532 +#: cinder/volume/drivers/emc/emc_smis_common.py:312 #, python-format -msgid "Doing hard reboot of VM %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Cannot find Replication Service to create cloned volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:536 +#: cinder/volume/drivers/emc/emc_smis_common.py:321 #, python-format -msgid "Did hard reboot of VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Method: CreateElementReplica ReplicationService: %(service)s " +"ElementName: %(elementname)s SyncType: 8 SourceElement: " +"%(sourceelement)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:548 +#: cinder/volume/drivers/emc/emc_smis_common.py:342 #, python-format -msgid "instance - %s not present" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source " +"Volume:%(srcname)s. Return code: %(rc)lu.Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:567 +#: cinder/volume/drivers/emc/emc_smis_common.py:354 #, python-format -msgid "Powering off the VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Successfully cloned volume from source volume. Finding the clone " +"relationship." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:572 +#: cinder/volume/drivers/emc/emc_smis_common.py:365 #, python-format -msgid "Powered off the VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s." +" Remove the clone relationship. Method: ModifyReplicaSynchronization " +"ReplicationService: %(service)s Operation: 8 Synchronization: " +"%(sync_name)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:576 +#: cinder/volume/drivers/emc/emc_smis_common.py:381 #, python-format -msgid "Unregistering the VM %s" +msgid "" +"Create Cloned Volume: Volume: %(volumename)s Source Volume: %(srcname)s" +" Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:579 +#: cinder/volume/drivers/emc/emc_smis_common.py:390 #, python-format -msgid "Unregistered the VM %s" +msgid "" +"Error Create Cloned Volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:581 +#: cinder/volume/drivers/emc/emc_smis_common.py:402 #, python-format msgid "" -"In vmwareapi:vmops:destroy, got this exception while un-registering the " -"VM: %s" +"Leaving create_cloned_volume: Volume: %(volumename)s Source Volume: " +"%(srcname)s Return code: %(rc)lu." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:411 +msgid "Entering delete_volume." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:592 +#: cinder/volume/drivers/emc/emc_smis_common.py:413 #, python-format -msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Delete Volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:602 +#: cinder/volume/drivers/emc/emc_smis_common.py:420 #, python-format -msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgid "Volume %(name)s not found on the array. No volume to delete." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:607 +#: cinder/volume/drivers/emc/emc_smis_common.py:430 #, python-format msgid "" -"In vmwareapi:vmops:destroy, got this exception while deleting the VM " -"contents from the disk: %s" +"Error Delete Volume: %(volumename)s. Storage Configuration Service not " +"found." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:615 -msgid "pause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:438 +#, python-format +msgid "Delete Volume: %(name)s DeviceID: %(deviceid)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:619 -msgid "unpause not supported for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:442 +#, python-format +msgid "" +"Delete Volume: %(name)s Method: EMCReturnToStoragePool ConfigServic: " +"%(service)s TheElement: %(vol_instance)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:633 +#: cinder/volume/drivers/emc/emc_smis_common.py:456 #, python-format -msgid "Suspending the VM %s " +msgid "" +"Error Delete Volume: %(volumename)s. Return code: %(rc)lu. Error: " +"%(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:637 +#: cinder/volume/drivers/emc/emc_smis_common.py:465 #, python-format -msgid "Suspended the VM %s " +msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:640 -msgid "instance is powered off and can not be suspended." +#: cinder/volume/drivers/emc/emc_smis_common.py:472 +msgid "Entering create_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:643 +#: cinder/volume/drivers/emc/emc_smis_common.py:476 #, python-format -msgid "VM %s was already in suspended state. So returning without doing anything" +msgid "Create snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:656 +#: cinder/volume/drivers/emc/emc_smis_common.py:488 #, python-format -msgid "Resuming the VM %s" +msgid "Device ID: %(deviceid)s: Storage System: %(storagesystem)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:661 +#: cinder/volume/drivers/emc/emc_smis_common.py:495 +#: cinder/volume/drivers/emc/emc_smis_common.py:497 +#: cinder/volume/drivers/emc/emc_smis_common.py:567 #, python-format -msgid "Resumed the VM %s " +msgid "Cannot find Replication Service to create snapshot for volume %s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:663 -msgid "instance is not in a suspended state" +#: cinder/volume/drivers/emc/emc_smis_common.py:502 +#, python-format +msgid "" +"Create Snapshot: Method: CreateElementReplica: Target: %(snapshot)s " +"Source: %(volume)s Replication Service: %(service)s ElementName: " +"%(elementname)s Sync Type: 7 SourceElement: %(sourceelement)s." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:699 -msgid "get_diagnostics not implemented for vmwareapi" +#: cinder/volume/drivers/emc/emc_smis_common.py:518 +#, python-format +msgid "" +"Create Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:757 +#: cinder/volume/drivers/emc/emc_smis_common.py:527 #, python-format msgid "" -"Reconfiguring VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Error Create Snapshot: %(snapshot)s Volume: %(volume)s Error: " +"%(errordesc)s" msgstr "" -#: cinder/virt/vmwareapi/vmops.py:765 +#: cinder/volume/drivers/emc/emc_smis_common.py:535 #, python-format msgid "" -"Reconfigured VM instance %(name)s to set the machine id with ip - " -"%(ip_addr)s" +"Leaving create_snapshot: Snapshot: %(snapshot)s Volume: %(volume)s " +"Return code: %(rc)lu." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:802 -#, python-format -msgid "Creating directory with path %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:541 +msgid "Entering delete_snapshot." msgstr "" -#: cinder/virt/vmwareapi/vmops.py:806 +#: cinder/volume/drivers/emc/emc_smis_common.py:545 #, python-format -msgid "Created directory with path %s" +msgid "Delete Snapshot: %(snapshot)s: volume: %(volume)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:89 +#: cinder/volume/drivers/emc/emc_smis_common.py:551 #, python-format -msgid "Downloading image %s from glance image server" +msgid "" +"Delete Snapshot: %(snapshot)s: volume: %(volume)s. Finding " +"StorageSychronization_SV_SV." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:559 #, python-format -msgid "Downloaded image %s from glance image server" +msgid "" +"Snapshot: %(snapshot)s: volume: %(volume)s not found on the array. No " +"snapshot to delete." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:108 +#: cinder/volume/drivers/emc/emc_smis_common.py:574 #, python-format -msgid "Uploading image %s to the Glance image server" +msgid "" +"Delete Snapshot: Target: %(snapshot)s Source: %(volume)s. Method: " +"ModifyReplicaSynchronization: Replication Service: %(service)s " +"Operation: 19 Synchronization: %(sync_name)s." msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:129 +#: cinder/volume/drivers/emc/emc_smis_common.py:590 #, python-format -msgid "Uploaded image %s to the Glance image server" +msgid "" +"Delete Snapshot: Volume: %(volumename)s Snapshot: %(snapshotname)s " +"Return code: %(rc)lu" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:139 +#: cinder/volume/drivers/emc/emc_smis_common.py:599 #, python-format -msgid "Getting image size for the image %s" +msgid "" +"Error Delete Snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s. Return code: %(rc)lu. Error: %(error)s" msgstr "" -#: cinder/virt/vmwareapi/vmware_images.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:611 #, python-format -msgid "Got image size of %(size)s for the image %(image)s" +msgid "" +"Leaving delete_snapshot: Volume: %(volumename)s Snapshot: " +"%(snapshotname)s Return code: %(rc)lu." msgstr "" -#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 -#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 -msgid "Raising NotImplemented" +#: cinder/volume/drivers/emc/emc_smis_common.py:621 +#, python-format +msgid "Create export: %(volume)s" msgstr "" -#: cinder/virt/xenapi/fake.py:555 +#: cinder/volume/drivers/emc/emc_smis_common.py:626 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "create_export: Volume: %(volume)s Device ID: %(device_id)s" msgstr "" -#: cinder/virt/xenapi/fake.py:589 +#: cinder/volume/drivers/emc/emc_smis_common.py:648 #, python-format -msgid "Calling %(localname)s %(impl)s" +msgid "" +"ExposePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(lun_name)s" +" InitiatorPortIDs: %(initiator)s DeviceAccesses: 2" msgstr "" -#: cinder/virt/xenapi/fake.py:594 +#: cinder/volume/drivers/emc/emc_smis_common.py:663 #, python-format -msgid "Calling getter %s" +msgid "ExposePaths parameter LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/fake.py:654 +#: cinder/volume/drivers/emc/emc_smis_common.py:674 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "Error mapping volume %s." msgstr "" -#: cinder/virt/xenapi/host.py:67 +#: cinder/volume/drivers/emc/emc_smis_common.py:678 +#, python-format +msgid "ExposePaths for volume %s completed successfully." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:694 #, python-format msgid "" -"Instance %(name)s running on %(host)s could not be found in the database:" -" assuming it is a worker VM and skipping migration to a new host" +"HidePaths: %(vol)s ConfigServicie: %(service)s LUNames: %(device_id)s " +"LunMaskingSCSIProtocolController: %(lunmasking)s" msgstr "" -#: cinder/virt/xenapi/host.py:137 +#: cinder/volume/drivers/emc/emc_smis_common.py:707 #, python-format -msgid "Unable to get SR for this host: %s" +msgid "Error unmapping volume %s." msgstr "" -#: cinder/virt/xenapi/host.py:169 -msgid "Unable to get updated status" +#: cinder/volume/drivers/emc/emc_smis_common.py:711 +#, python-format +msgid "HidePaths for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/host.py:172 +#: cinder/volume/drivers/emc/emc_smis_common.py:724 #, python-format -msgid "The call to %(method)s returned an error: %(e)s." +msgid "" +"AddMembers: ConfigServicie: %(service)s MaskingGroup: %(masking_group)s" +" Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:37 +#: cinder/volume/drivers/emc/emc_smis_common.py:739 #, python-format -msgid "Found non-unique network for name_label %s" +msgid "Error mapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/network_utils.py:55 +#: cinder/volume/drivers/emc/emc_smis_common.py:744 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "AddMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/network_utils.py:58 +#: cinder/volume/drivers/emc/emc_smis_common.py:757 #, python-format -msgid "Found no network for bridge %s" +msgid "" +"RemoveMembers: ConfigServicie: %(service)s MaskingGroup: " +"%(masking_group)s Members: %(vol)s" msgstr "" -#: cinder/virt/xenapi/pool.py:111 +#: cinder/volume/drivers/emc/emc_smis_common.py:770 #, python-format -msgid "Unable to eject %(host)s from the pool; pool not empty" +msgid "Error unmapping volume %(vol)s. %(error)s" msgstr "" -#: cinder/virt/xenapi/pool.py:126 +#: cinder/volume/drivers/emc/emc_smis_common.py:775 #, python-format -msgid "Unable to eject %(host)s from the pool; No master found" +msgid "RemoveMembers for volume %s completed successfully." msgstr "" -#: cinder/virt/xenapi/pool.py:143 +#: cinder/volume/drivers/emc/emc_smis_common.py:781 #, python-format -msgid "Pool-Join failed: %(e)s" +msgid "Map volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:146 +#: cinder/volume/drivers/emc/emc_smis_common.py:790 +#: cinder/volume/drivers/emc/emc_smis_common.py:820 #, python-format -msgid "Unable to join %(host)s in the pool" +msgid "Cannot find Controller Configuration Service for storage system %s" msgstr "" -#: cinder/virt/xenapi/pool.py:162 +#: cinder/volume/drivers/emc/emc_smis_common.py:804 #, python-format -msgid "Pool-eject failed: %(e)s" +msgid "Unmap volume: %(volume)s" msgstr "" -#: cinder/virt/xenapi/pool.py:174 -#, fuzzy, python-format -msgid "Unable to set up pool: %(e)s." -msgstr "無法卸載 Volume %s" +#: cinder/volume/drivers/emc/emc_smis_common.py:810 +#, python-format +msgid "Volume %s is not mapped. No volume to unmap." +msgstr "" -#: cinder/virt/xenapi/pool.py:185 +#: cinder/volume/drivers/emc/emc_smis_common.py:834 #, python-format -msgid "Pool-set_name_label failed: %(e)s" +msgid "Initialize connection: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vif.py:103 +#: cinder/volume/drivers/emc/emc_smis_common.py:840 #, python-format -msgid "Found no PIF for device %s" +msgid "Volume %s is already mapped." msgstr "" -#: cinder/virt/xenapi/vif.py:122 +#: cinder/volume/drivers/emc/emc_smis_common.py:852 #, python-format -msgid "" -"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " -"Expected %(vlan_num)d" +msgid "Terminate connection: %(volume)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:884 +#, python-format +msgid "Found Storage Type: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:218 -msgid "Created VM" +#: cinder/volume/drivers/emc/emc_smis_common.py:887 +msgid "Storage type not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:245 +#: cinder/volume/drivers/emc/emc_smis_common.py:903 #, python-format -msgid "VBD not found in instance %s" +msgid "Found Masking View: %s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:906 +msgid "Masking View not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:262 +#: cinder/volume/drivers/emc/emc_smis_common.py:928 +msgid "Ecom user not found." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:948 #, python-format -msgid "VBD %s already detached" +msgid "Ecom IP: %(ecomIp)s Port: %(ecomPort)s" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:952 +msgid "Ecom server not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:265 +#: cinder/volume/drivers/emc/emc_smis_common.py:959 +msgid "Cannot connect to ECOM server" +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:971 #, python-format -msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgid "Found Replication Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:270 +#: cinder/volume/drivers/emc/emc_smis_common.py:984 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Found Storage Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:275 +#: cinder/volume/drivers/emc/emc_smis_common.py:997 #, python-format -msgid "Reached maximum number of retries trying to unplug VBD %s" +msgid "Found Controller Configuration Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:286 +#: cinder/volume/drivers/emc/emc_smis_common.py:1010 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Found Storage Hardware ID Management Service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:305 +#: cinder/volume/drivers/emc/emc_smis_common.py:1054 #, python-format -msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgid "Pool %(storage_type)s is not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:308 +#: cinder/volume/drivers/emc/emc_smis_common.py:1060 #, python-format -msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgid "Storage system not found for pool %(storage_type)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:319 +#: cinder/volume/drivers/emc/emc_smis_common.py:1066 #, python-format -msgid "Unable to destroy VDI %s" +msgid "Pool: %(pool)s SystemName: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:336 +#: cinder/volume/drivers/emc/emc_smis_common.py:1082 #, python-format -msgid "" -"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" -" on %(sr_ref)s." +msgid "Pool name: %(poolname)s System name: %(systemname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:345 +#: cinder/volume/drivers/emc/emc_smis_common.py:1114 #, python-format -msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgid "Volume %(volumename)s not found on the array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:353 +#: cinder/volume/drivers/emc/emc_smis_common.py:1117 #, python-format -msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgid "Volume name: %(volumename)s Volume instance: %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:372 +#: cinder/volume/drivers/emc/emc_smis_common.py:1130 #, python-format -msgid "No primary VDI found for %(vm_ref)s" +msgid "Source: %(volumename)s Target: %(snapshotname)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:379 +#: cinder/volume/drivers/emc/emc_smis_common.py:1153 #, python-format -msgid "Snapshotting with label '%(label)s'" +msgid "" +"Source: %(volumename)s Target: %(snapshotname)s. Storage Synchronized " +"not found. " msgstr "" -#: cinder/virt/xenapi/vm_utils.py:392 +#: cinder/volume/drivers/emc/emc_smis_common.py:1158 #, python-format -msgid "Created snapshot %(template_vm_ref)s" +msgid "" +"Storage system: %(storage_system)s Storage Synchronized instance: " +"%(sync)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:431 +#: cinder/volume/drivers/emc/emc_smis_common.py:1184 #, python-format -msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgid "Error finding %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:583 +#: cinder/volume/drivers/emc/emc_smis_common.py:1188 #, python-format -msgid "Creating blank HD of size %(req_size)d gigs" +msgid "Found %(name)s: %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:634 +#: cinder/volume/drivers/emc/emc_smis_common.py:1248 #, python-format msgid "" -"Fast cloning is only supported on default local SR of type ext. SR on " -"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +"LunMaskingSCSIProtocolController for storage system %(storage_system)s " +"and initiator %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:724 +#: cinder/volume/drivers/emc/emc_smis_common.py:1289 #, python-format msgid "" -"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " -"%(glance_host)s:%(glance_port)s" +"LunMaskingSCSIProtocolController for storage volume %(vol)s and initiator" +" %(initiator)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:734 +#: cinder/volume/drivers/emc/emc_smis_common.py:1302 #, python-format -msgid "download_vhd failed: %r" +msgid "" +"Volume %(name)s not found on the array. Cannot determine if there are " +"volumes mapped." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:750 +#: cinder/volume/drivers/emc/emc_smis_common.py:1314 #, python-format -msgid "Asking xapi to fetch vhd image %(image)s" +msgid "" +"LunMaskingSCSIProtocolController for storage system %(storage)s and " +"%(connector)s is %(ctrl)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:760 +#: cinder/volume/drivers/emc/emc_smis_common.py:1326 #, python-format msgid "" -"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " -"'%(vdi_uuid)s'" +"Found %(numVolumesMapped)d volumes on storage system %(storage)s mapped " +"to %(initiator)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:789 +#: cinder/volume/drivers/emc/emc_smis_common.py:1361 #, python-format -msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgid "Available device number on %(storage)s: %(device)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:805 +#: cinder/volume/drivers/emc/emc_smis_common.py:1404 #, python-format -msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgid "Device number not found for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:809 +#: cinder/volume/drivers/emc/emc_smis_common.py:1409 #, python-format -msgid "" -"Image size %(size_bytes)d exceeded instance_type allowed size " -"%(allowed_size_bytes)d" +msgid "Found device number %(device)d for volume %(volumename)s %(vol_instance)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:831 +#: cinder/volume/drivers/emc/emc_smis_common.py:1419 #, python-format -msgid "Fetching image %(image)s, type %(image_type_str)" +msgid "Device info: %(data)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:844 +#: cinder/volume/drivers/emc/emc_smis_common.py:1441 #, python-format -msgid "Size for image %(image)s: %(virtual_size)d" +msgid "Masking view: %(view)s DeviceMaskingGroup: %(masking)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:853 +#: cinder/volume/drivers/emc/emc_smis_common.py:1463 #, python-format -msgid "" -"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " -"bytes" +msgid "Found Storage Processor System: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:870 +#: cinder/volume/drivers/emc/emc_smis_common.py:1491 #, python-format -msgid "Copying VDI %s to /boot/guest on dom0" +msgid "" +"iSCSIProtocolEndpoint for storage system %(storage_system)s and SP %(sp)s" +" is %(endpoint)s." +msgstr "" + +#: cinder/volume/drivers/emc/emc_smis_common.py:1520 +msgid "Error finding Storage Hardware ID Service." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:884 +#: cinder/volume/drivers/emc/emc_smis_common.py:1526 #, python-format -msgid "Kernel/Ramdisk VDI %s destroyed" +msgid "" +"EMCGetTargetEndpoints: Service: %(service)s Storage HardwareIDs: " +"%(hardwareids)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:895 -msgid "Failed to fetch glance image" +#: cinder/volume/drivers/emc/emc_smis_common.py:1538 +msgid "Error finding Target WWNs." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:934 +#: cinder/volume/drivers/emc/emc_smis_common.py:1548 #, python-format -msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgid "Add target WWN: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:955 +#: cinder/volume/drivers/emc/emc_smis_common.py:1550 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Target WWNs: %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:973 +#: cinder/volume/drivers/emc/emc_smis_common.py:1566 #, python-format -msgid "Unknown image format %(disk_image_type)s" +msgid "Storage Hardware IDs for %(wwpns)s is %(foundInstances)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1016 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:152 #, python-format -msgid "VDI %s is still available" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1059 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:161 +#, fuzzy, python-format +msgid "Cannot find device number for volume %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:191 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Found iSCSI endpoint: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1061 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:198 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "ISCSI endpoint not found for SP %(sp)s on storage system %(storage)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1088 +#: cinder/volume/drivers/emc/emc_smis_iscsi.py:215 #, python-format -msgid "Unable to parse rrd of %(vm_uuid)s" +msgid "ISCSI properties: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1108 +#: cinder/volume/drivers/hds/hds.py:70 #, python-format -msgid "Re-scanning SR %s" +msgid "Range: start LU: %(start)s, end LU: %(end)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1136 +#: cinder/volume/drivers/hds/hds.py:84 #, python-format -msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgid "setting LU upper (end) limit to %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1154 -msgid "" -"XenAPI is unable to find a Storage Repository to install guest instances " -"on. Please check your configuration and/or configure the flag " -"'sr_matching_filter'" +#: cinder/volume/drivers/hds/hds.py:92 +#, python-format +msgid "%(element)s: %(val)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1167 -msgid "Cannot find SR of content-type ISO" +#: cinder/volume/drivers/hds/hds.py:103 cinder/volume/drivers/hds/hds.py:105 +#, python-format +msgid "XML exception reading parameter: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1175 +#: cinder/volume/drivers/hds/hds.py:178 #, python-format -msgid "ISO: looking at SR %(sr_rec)s" +msgid "portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1177 -msgid "ISO: not iso content" +#: cinder/volume/drivers/hds/hds.py:197 +#, python-format +msgid "No configuration found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1180 -msgid "ISO: iso content_type, no 'i18n-key' key" +#: cinder/volume/drivers/hds/hds.py:250 +#, python-format +msgid "HDP not found: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1183 -msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +#: cinder/volume/drivers/hds/hds.py:289 +#, python-format +msgid "iSCSI portal not found for service: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1187 -msgid "ISO: SR MATCHing our criteria" +#: cinder/volume/drivers/hds/hds.py:327 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1189 -msgid "ISO: ISO, looking to see if it is host local" +#: cinder/volume/drivers/hds/hds.py:355 +#, python-format +msgid "LUN %(lun)s of size %(size)s MB is cloned." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1192 +#: cinder/volume/drivers/hds/hds.py:372 #, python-format -msgid "ISO: PBD %(pbd_ref)s disappeared" +msgid "LUN %(lun)s extended to %(size)s GB." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1195 +#: cinder/volume/drivers/hds/hds.py:395 #, python-format -msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgid "delete lun %(lun)s on %(name)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1198 -msgid "ISO: SR with local PBD" +#: cinder/volume/drivers/hds/hds.py:480 +#, python-format +msgid "LUN %(lun)s of size %(sz)s MB is created from snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1220 +#: cinder/volume/drivers/hds/hds.py:503 #, python-format -msgid "" -"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " -"%(server)s." +msgid "LUN %(lun)s of size %(size)s MB is created as snapshot." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1236 +#: cinder/volume/drivers/hds/hds.py:522 #, python-format -msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgid "LUN %s is deleted." +msgstr "" + +#: cinder/volume/drivers/huawei/__init__.py:57 +msgid "_instantiate_driver: configuration not found." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1290 +#: cinder/volume/drivers/huawei/__init__.py:64 #, python-format -msgid "Invalid statistics data from Xenserver: %s" +msgid "" +"_instantiate_driver: Loading %(protocol)s driver for Huawei OceanStor " +"%(product)s series storage arrays." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1343 +#: cinder/volume/drivers/huawei/__init__.py:84 #, python-format -msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgid "" +"\"Product\" or \"Protocol\" is illegal. \"Product\" should be set to " +"either T, Dorado or HVS. \"Protocol\" should be set to either iSCSI or " +"FC. Product: %(product)s Protocol: %(protocol)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1417 +#: cinder/volume/drivers/huawei/huawei_dorado.py:74 #, python-format msgid "" -"Parent %(parent_uuid)s doesn't match original parent " -"%(original_parent_uuid)s, waiting for coalesce..." +"initialize_connection: volume name: %(vol)s host: %(host)s initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1427 +#: cinder/volume/drivers/huawei/huawei_dorado.py:92 +#: cinder/volume/drivers/huawei/huawei_t.py:461 #, python-format -msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgid "initialize_connection: Target FC ports WWNS: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1462 +#: cinder/volume/drivers/huawei/huawei_t.py:101 #, python-format -msgid "Timeout waiting for device %s to be created" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(ini)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1473 +#: cinder/volume/drivers/huawei/huawei_t.py:159 +#: cinder/volume/drivers/huawei/rest_common.py:1278 #, python-format -msgid "Plugging VBD %s ... " +msgid "" +"_get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " +"check config file." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1476 +#: cinder/volume/drivers/huawei/huawei_t.py:206 +#: cinder/volume/drivers/huawei/rest_common.py:1083 #, python-format -msgid "Plugging VBD %s done." +msgid "_get_tgt_iqn: iSCSI IP is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1478 +#: cinder/volume/drivers/huawei/huawei_t.py:234 #, python-format -msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgid "_get_tgt_iqn: iSCSI target iqn is %s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1481 +#: cinder/volume/drivers/huawei/huawei_t.py:248 #, python-format -msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgid "" +"_get_iscsi_tgt_port_info: Failed to get iSCSI port info. Please make sure" +" the iSCSI port IP %s is configured in array." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1490 +#: cinder/volume/drivers/huawei/huawei_t.py:323 +#: cinder/volume/drivers/huawei/huawei_t.py:552 #, python-format -msgid "Destroying VBD for VDI %s ... " +msgid "" +"terminate_connection: volume: %(vol)s, host: %(host)s, connector: " +"%(initiator)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1498 +#: cinder/volume/drivers/huawei/huawei_t.py:351 #, python-format -msgid "Destroying VBD for VDI %s done." +msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s." +msgstr "" + +#: cinder/volume/drivers/huawei/huawei_t.py:436 +msgid "validate_connector: The FC driver requires thewwpns in the connector." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1511 +#: cinder/volume/drivers/huawei/huawei_t.py:443 #, python-format -msgid "Running pygrub against %s" +msgid "" +"initialize_connection: volume name: %(vol)s, host: %(host)s, initiator: " +"%(wwn)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1518 +#: cinder/volume/drivers/huawei/huawei_t.py:578 #, python-format -msgid "Found Xen kernel %s" +msgid "_remove_fc_ports: FC port was not found on host %(hostid)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1520 -msgid "No Xen kernel found. Booting HVM." +#: cinder/volume/drivers/huawei/huawei_utils.py:40 +#, python-format +msgid "parse_xml_file: %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1533 -msgid "Partitions:" +#: cinder/volume/drivers/huawei/huawei_utils.py:129 +#, python-format +msgid "_get_host_os_type: Host %(ip)s OS type is %(os)s." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1539 +#: cinder/volume/drivers/huawei/rest_common.py:59 #, python-format -msgid " %(num)s: %(ptype)s %(size)d sectors" +msgid "HVS Request URL: %(url)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1565 +#: cinder/volume/drivers/huawei/rest_common.py:60 #, python-format -msgid "" -"Writing partition table %(primary_first)d %(primary_last)d to " -"%(dev_path)s..." +msgid "HVS Request Data: %(data)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1578 +#: cinder/volume/drivers/huawei/rest_common.py:73 #, python-format -msgid "Writing partition table %s done." +msgid "HVS Response Data: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1632 +#: cinder/volume/drivers/huawei/rest_common.py:75 #, python-format -msgid "" -"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " -"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgid "Bad response from server: %s" +msgstr "" + +#: cinder/volume/drivers/huawei/rest_common.py:82 +msgid "JSON transfer error" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1664 +#: cinder/volume/drivers/huawei/rest_common.py:102 #, python-format -msgid "" -"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " -"reduction in size" +msgid "Login error, reason is %s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1714 +#: cinder/volume/drivers/huawei/rest_common.py:166 +#, python-format msgid "" -"XenServer tools installed in this image are capable of network injection." -" Networking files will not bemanipulated" +"%(err)s\n" +"result: %(res)s" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1722 -msgid "" -"XenServer tools are present in this image but are not capable of network " -"injection" +#: cinder/volume/drivers/huawei/rest_common.py:173 +#, python-format +msgid "%s \"data\" was not in result." msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1726 -msgid "XenServer tools are not installed in this image" +#: cinder/volume/drivers/huawei/rest_common.py:208 +msgid "Can't find the Qos policy in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1742 -msgid "Manipulating interface files directly" +#: cinder/volume/drivers/huawei/rest_common.py:246 +msgid "Can't find lun or lun group in array" msgstr "" -#: cinder/virt/xenapi/vm_utils.py:1751 +#: cinder/volume/drivers/huawei/rest_common.py:280 #, python-format -msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgid "Invalid resource pool: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#: cinder/volume/drivers/huawei/rest_common.py:298 #, python-format -msgid "Updating progress to %(progress)d" +msgid "Get pool info error, pool name is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:231 +#: cinder/volume/drivers/huawei/rest_common.py:327 #, python-format -msgid "Attempted to power on non-existent instance bad instance id %s" +msgid "create_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:233 -msgid "Starting instance" +#: cinder/volume/drivers/huawei/rest_common.py:354 +#, python-format +msgid "_stop_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:303 -msgid "Removing kernel/ramdisk files from dom0" +#: cinder/volume/drivers/huawei/rest_common.py:474 +#, python-format +msgid "" +"_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)sview_id: " +"%(view_id)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:358 -msgid "Failed to spawn, rolling back" +#: cinder/volume/drivers/huawei/rest_common.py:511 +#: cinder/volume/drivers/huawei/rest_common.py:543 +#, python-format +msgid "initiator name:%(initiator_name)s, volume name:%(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:443 -msgid "Detected ISO image type, creating blank VM for install" +#: cinder/volume/drivers/huawei/rest_common.py:527 +#, python-format +msgid "host lun id is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:462 -msgid "Auto configuring disk, attempting to resize partition..." +#: cinder/volume/drivers/huawei/rest_common.py:553 +#, python-format +msgid "the free wwns %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:515 +#: cinder/volume/drivers/huawei/rest_common.py:574 #, python-format -msgid "Invalid value for injected_files: %r" +msgid "the fc server properties is:%s" msgstr "" -#: cinder/virt/xenapi/vmops.py:520 +#: cinder/volume/drivers/huawei/rest_common.py:688 #, python-format -msgid "Injecting file path: '%s'" +msgid "JSON transfer data error. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:527 -msgid "Setting admin password" +#: cinder/volume/drivers/huawei/rest_common.py:874 +#, python-format +msgid "terminate_connection:volume name: %(volume)s, initiator name: %(ini)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:531 -msgid "Resetting network" +#: cinder/volume/drivers/huawei/rest_common.py:937 +#, python-format +msgid "" +"Config file is wrong. LUNType must be \"Thin\" or \"Thick\". " +"LUNType:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:538 -msgid "Setting VCPU weight" +#: cinder/volume/drivers/huawei/rest_common.py:964 +#, python-format +msgid "" +"PrefetchType config is wrong. PrefetchType must in 1,2,3,4. fetchtype " +"is:%(fetchtype)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:544 -msgid "Starting VM" +#: cinder/volume/drivers/huawei/rest_common.py:970 +msgid "Use default prefetch fetchtype. Prefetch fetchtype:Intelligent." msgstr "" -#: cinder/virt/xenapi/vmops.py:551 +#: cinder/volume/drivers/huawei/rest_common.py:982 #, python-format msgid "" -"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " -"%(version)s" +"_wait_for_luncopy:LUNcopy status is not normal.LUNcopy name: " +"%(luncopyname)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:554 +#: cinder/volume/drivers/huawei/rest_common.py:1056 #, python-format -msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgid "" +"_get_iscsi_port_info: Failed to get iscsi port info through config IP " +"%(ip)s, please check config file." msgstr "" -#: cinder/virt/xenapi/vmops.py:561 -msgid "Waiting for instance state to become running" +#: cinder/volume/drivers/huawei/rest_common.py:1101 +#, python-format +msgid "_get_tgt_iqn: iSCSI target iqn is %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:573 -msgid "Querying agent version" +#: cinder/volume/drivers/huawei/rest_common.py:1124 +#, python-format +msgid "_parse_volume_type: type id: %(type_id)s config parameter is: %(params)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:576 +#: cinder/volume/drivers/huawei/rest_common.py:1157 #, python-format -msgid "Instance agent version: %s" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the configuration file " +"%(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:581 +#: cinder/volume/drivers/huawei/rest_common.py:1162 #, python-format -msgid "Updating Agent to %s" +msgid "The config parameters are: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:616 +#: cinder/volume/drivers/huawei/rest_common.py:1239 +#: cinder/volume/drivers/huawei/ssh_common.py:118 +#: cinder/volume/drivers/huawei/ssh_common.py:1265 #, python-format -msgid "No opaque_ref could be determined for '%s'." +msgid "_check_conf_file: Config file invalid. %s must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:670 -msgid "Finished snapshot and upload for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1246 +#: cinder/volume/drivers/huawei/ssh_common.py:125 +msgid "_check_conf_file: Config file invalid. StoragePool must be set." msgstr "" -#: cinder/virt/xenapi/vmops.py:677 -msgid "Starting snapshot for VM" +#: cinder/volume/drivers/huawei/rest_common.py:1256 +#, python-format +msgid "" +"_check_conf_file: Config file invalid. Host OSType invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:686 -#, fuzzy, python-format -msgid "Unable to Snapshot instance: %(exc)s" -msgstr "無法掛載Volume 到虛擬機器 %s" - -#: cinder/virt/xenapi/vmops.py:702 -msgid "Failed to transfer vhd to new host" +#: cinder/volume/drivers/huawei/rest_common.py:1300 +msgid "Can not find lun in array" msgstr "" -#: cinder/virt/xenapi/vmops.py:770 +#: cinder/volume/drivers/huawei/ssh_common.py:54 #, python-format -msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgid "ssh_read: Read SSH timeout. %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:893 -#, python-format -msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +#: cinder/volume/drivers/huawei/ssh_common.py:70 +msgid "No response message. Please check system status." msgstr "" -#: cinder/virt/xenapi/vmops.py:901 -msgid "Resize complete" +#: cinder/volume/drivers/huawei/ssh_common.py:101 +#: cinder/volume/drivers/huawei/ssh_common.py:1249 +msgid "do_setup" msgstr "" -#: cinder/virt/xenapi/vmops.py:928 +#: cinder/volume/drivers/huawei/ssh_common.py:135 +#: cinder/volume/drivers/huawei/ssh_common.py:1287 #, python-format -msgid "Failed to query agent version: %(resp)r" +msgid "" +"_check_conf_file: Config file invalid. Host OSType is invalid.\n" +"The valid values are: %(os_list)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:949 +#: cinder/volume/drivers/huawei/ssh_common.py:169 #, python-format -msgid "domid changed from %(domid)s to %(newdomid)s" +msgid "_get_login_info: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:962 +#: cinder/volume/drivers/huawei/ssh_common.py:224 #, python-format -msgid "Failed to update agent: %(resp)r" +msgid "create_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:983 +#: cinder/volume/drivers/huawei/ssh_common.py:242 #, python-format -msgid "Failed to exchange keys: %(resp)r" +msgid "" +"_name_translate: Name in cinder: %(old)s, new name in storage system: " +"%(new)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:998 +#: cinder/volume/drivers/huawei/ssh_common.py:279 #, python-format -msgid "Failed to update password: %(resp)r" +msgid "" +"_parse_volume_type: Unacceptable parameter %(key)s. Please check this key" +" in extra_specs and make it consistent with the element in configuration " +"file %(conf)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1023 +#: cinder/volume/drivers/huawei/ssh_common.py:373 +#: cinder/volume/drivers/huawei/ssh_common.py:1451 #, python-format -msgid "Failed to inject file: %(resp)r" +msgid "LUNType must be \"Thin\" or \"Thick\". LUNType:%(type)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1032 -msgid "VM already halted, skipping shutdown..." +#: cinder/volume/drivers/huawei/ssh_common.py:395 +msgid "" +"_parse_conf_lun_params: Use default prefetch type. Prefetch type: " +"Intelligent" msgstr "" -#: cinder/virt/xenapi/vmops.py:1036 -msgid "Shutting down VM" +#: cinder/volume/drivers/huawei/ssh_common.py:421 +#, python-format +msgid "" +"_get_maximum_capacity_pool_id: Failed to get pool id. Please check config" +" file and make sure the StoragePool %s is created in storage array." msgstr "" -#: cinder/virt/xenapi/vmops.py:1054 -msgid "Unable to find VBD for VM" +#: cinder/volume/drivers/huawei/ssh_common.py:436 +#, python-format +msgid "CLI command: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1097 -msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +#: cinder/volume/drivers/huawei/ssh_common.py:466 +#, python-format +msgid "" +"_execute_cli: Can not connect to IP %(old)s, try to connect to the other " +"IP %(new)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1104 -msgid "instance has a kernel or ramdisk but not both" +#: cinder/volume/drivers/huawei/ssh_common.py:501 +#, python-format +msgid "_execute_cli: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1111 -msgid "kernel/ramdisk files removed" +#: cinder/volume/drivers/huawei/ssh_common.py:511 +#, python-format +msgid "delete_volume: volume name: %s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1121 -msgid "VM destroyed" +#: cinder/volume/drivers/huawei/ssh_common.py:516 +#, python-format +msgid "delete_volume: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1147 -msgid "Destroying VM" +#: cinder/volume/drivers/huawei/ssh_common.py:570 +#, python-format +msgid "" +"create_volume_from_snapshot: snapshot name: %(snapshot)s, volume name: " +"%(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1169 -msgid "VM is not present, skipping destroy..." +#: cinder/volume/drivers/huawei/ssh_common.py:580 +#, python-format +msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1222 +#: cinder/volume/drivers/huawei/ssh_common.py:650 #, python-format -msgid "Instance is already in Rescue Mode: %s" +msgid "_wait_for_luncopy: LUNcopy %(luncopyname)s status is %(status)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1296 +#: cinder/volume/drivers/huawei/ssh_common.py:688 #, python-format -msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgid "create_cloned_volume: src volume: %(src)s, tgt volume: %(tgt)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1300 -msgid "Automatically hard rebooting" +#: cinder/volume/drivers/huawei/ssh_common.py:697 +#, python-format +msgid "Source volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1363 +#: cinder/volume/drivers/huawei/ssh_common.py:739 #, python-format -msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgid "" +"extend_volume: extended volume name: %(extended_name)s new added volume " +"name: %(added_name)s new added volume size: %(added_size)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1374 +#: cinder/volume/drivers/huawei/ssh_common.py:747 #, python-format -msgid "" -"Automatically confirming migration %(migration_id)s for instance " -"%(instance_uuid)s" +msgid "extend_volume: volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1379 +#: cinder/volume/drivers/huawei/ssh_common.py:779 #, python-format -msgid "Instance %(instance_uuid)s not found" +msgid "create_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1383 -msgid "In ERROR state" +#: cinder/volume/drivers/huawei/ssh_common.py:785 +msgid "create_snapshot: Resource pool needs 1GB valid size at least." msgstr "" -#: cinder/virt/xenapi/vmops.py:1389 +#: cinder/volume/drivers/huawei/ssh_common.py:792 #, python-format -msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgid "create_snapshot: Volume %(name)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1396 +#: cinder/volume/drivers/huawei/ssh_common.py:855 #, python-format -msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgid "delete_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1418 -msgid "Could not get bandwidth info." +#: cinder/volume/drivers/huawei/ssh_common.py:865 +#, python-format +msgid "" +"delete_snapshot: Can not delete snapshot %s for it is a source LUN of " +"LUNCopy." msgstr "" -#: cinder/virt/xenapi/vmops.py:1469 -msgid "Injecting network info to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:873 +#, python-format +msgid "delete_snapshot: Snapshot %(snap)s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1483 -msgid "Creating vifs" +#: cinder/volume/drivers/huawei/ssh_common.py:916 +#, python-format +msgid "" +"%(func)s: %(msg)s\n" +"CLI command: %(cmd)s\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1492 +#: cinder/volume/drivers/huawei/ssh_common.py:933 #, python-format -msgid "Creating VIF for network %(network_ref)s" +msgid "map_volume: Volume %s was not found." msgstr "" -#: cinder/virt/xenapi/vmops.py:1495 +#: cinder/volume/drivers/huawei/ssh_common.py:1079 #, python-format -msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1520 -msgid "Injecting hostname to xenstore" +#: cinder/volume/drivers/huawei/ssh_common.py:1102 +#, python-format +msgid "remove_map: Host %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1545 +#: cinder/volume/drivers/huawei/ssh_common.py:1106 #, python-format -msgid "" -"The agent call to %(method)s returned an invalid response: %(ret)r. " -"path=%(path)s; args=%(args)r" +msgid "remove_map: Volume %s does not exist." msgstr "" -#: cinder/virt/xenapi/vmops.py:1566 +#: cinder/volume/drivers/huawei/ssh_common.py:1119 #, python-format -msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgid "remove_map: No map between host %(host)s and volume %(volume)s." msgstr "" -#: cinder/virt/xenapi/vmops.py:1570 +#: cinder/volume/drivers/huawei/ssh_common.py:1138 #, python-format msgid "" -"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " -"args=%(args)r" +"_delete_map: There are IOs accessing the system. Retry to delete host map" +" %(mapid)s 10s later." msgstr "" -#: cinder/virt/xenapi/vmops.py:1575 +#: cinder/volume/drivers/huawei/ssh_common.py:1146 #, python-format -msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgid "" +"_delete_map: Failed to delete host map %(mapid)s.\n" +"CLI out: %(out)s" msgstr "" -#: cinder/virt/xenapi/vmops.py:1661 -#, python-format -msgid "OpenSSL error: %s" +#: cinder/volume/drivers/huawei/ssh_common.py:1185 +msgid "_update_volume_stats: Updating volume stats." +msgstr "" + +#: cinder/volume/drivers/huawei/ssh_common.py:1277 +msgid "_check_conf_file: Config file invalid. StoragePool must be specified." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:52 -msgid "creating sr within volume_utils" +#: cinder/volume/drivers/huawei/ssh_common.py:1311 +msgid "" +"_get_device_type: The driver only supports Dorado5100 and Dorado 2100 G2 " +"now." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#: cinder/volume/drivers/huawei/ssh_common.py:1389 #, python-format -msgid "type is = %s" +msgid "" +"create_volume_from_snapshot: %(device)s does not support create volume " +"from snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#: cinder/volume/drivers/huawei/ssh_common.py:1396 #, python-format -msgid "name = %s" +msgid "create_cloned_volume: %(device)s does not support clone volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:71 +#: cinder/volume/drivers/huawei/ssh_common.py:1404 #, python-format -msgid "Created %(label)s as %(sr_ref)s." +msgid "extend_volume: %(device)s does not support extend volume." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 -msgid "Unable to create Storage Repository" +#: cinder/volume/drivers/huawei/ssh_common.py:1413 +#, python-format +msgid "create_snapshot: %(device)s does not support snapshot." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:80 -msgid "introducing sr within volume_utils" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:132 +msgid "enter: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 -#: cinder/virt/xenapi/volumeops.py:156 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:142 #, python-format -msgid "Introduced %(label)s as %(sr_ref)s." +msgid "Failed getting details for pool %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:106 -msgid "Creating pbd for SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:179 +msgid "do_setup: No configured nodes." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:108 -msgid "Plugging SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:182 +msgid "leave: do_setup" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 -msgid "Unable to introduce Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:186 +msgid "enter: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 -msgid "Unable to get SR using uuid" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:190 +msgid "Unable to determine system name" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:129 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:193 +msgid "Unable to determine system id" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:196 +msgid "Unable to determine pool extent size" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:203 +#: cinder/volume/drivers/netapp/iscsi.py:122 +#: cinder/volume/drivers/netapp/nfs.py:639 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:157 #, python-format -msgid "Forgetting SR %s..." +msgid "%s is not set" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:137 -msgid "Unable to forget Storage Repository" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:209 +msgid "" +"Password or SSH private key is required for authentication: set either " +"san_password or san_private_key option" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:157 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:217 #, python-format -msgid "Introducing %s..." +msgid "" +"Illegal value %d specified for storwize_svc_flashcopy_timeout: valid " +"values are between 0 and 600" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:225 +msgid "leave: check_for_setup_error" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:186 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:235 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "ensure_export: Volume %s not found on storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:254 +msgid "The connector does not contain the required information." msgstr "" -#: cinder/virt/xenapi/volume_utils.py:204 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:277 #, python-format -msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:297 +msgid "CHAP secret exists for host but CHAP is disabled" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:302 #, python-format -msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgid "initialize_connection: Failed to get attributes for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:234 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:314 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Did not find expected column name in lsvdisk: %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:242 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:316 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "initialize_connection: Missing volume attribute for volume %s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:264 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:333 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "" +"initialize_connection: No node found in I/O group %(gid)s for volume " +"%(vol)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:274 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:342 #, python-format -msgid "Error finding vdis in SR %s" +msgid "initialize_connection: Did not find a preferred node for volume %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:365 +msgid "" +"Could not get FC connection information for the host-volume connection. " +"Is the host configured properly for FC connections?" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:281 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:380 #, python-format -msgid "Unable to find vbd for vdi %s" +msgid "" +"initialize_connection: Failed to collect return properties for volume " +"%(vol)s and connector %(conn)s.\n" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:315 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:385 #, python-format -msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgid "" +"leave: initialize_connection:\n" +" volume: %(vol)s\n" +" connector %(conn)s\n" +" properties: %(prop)s" msgstr "" -#: cinder/virt/xenapi/volume_utils.py:341 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:403 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:64 -msgid "Could not find VDI ref" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:411 +msgid "terminate_connection: Failed to get host name from connector." msgstr "" -#: cinder/virt/xenapi/volumeops.py:69 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:421 #, python-format -msgid "Creating SR %s" +msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:73 -msgid "Could not create SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:447 +msgid "create_volume_from_snapshot: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:76 -msgid "Could not retrieve SR record" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:459 +msgid "create_cloned_volume: Source and destination size differ." msgstr "" -#: cinder/virt/xenapi/volumeops.py:81 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:470 #, python-format -msgid "Introducing SR %s" +msgid "enter: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:85 -msgid "SR found in xapi database. No need to introduce" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:474 +msgid "extend_volume: Extending a volume with snapshots is not supported." msgstr "" -#: cinder/virt/xenapi/volumeops.py:90 -msgid "Could not introduce SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:481 +#, python-format +msgid "leave: extend_volume: volume %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:94 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:497 #, python-format -msgid "Checking for SR %s" +msgid "enter: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:523 #, python-format -msgid "SR %s not found in the xapi database" +msgid "leave: migrate_volume: id=%(id)s, host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:112 -msgid "Could not forget SR" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:540 +#, python-format +msgid "" +"enter: retype: id=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:121 +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:581 #, python-format -msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgid "" +"exit: retype: ild=%(id)s, new_type=%(new_type)s,diff=%(diff)s, " +"host=%(host)s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:178 -#, python-format -msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" -msgstr "無法替 instance實例 %(instance_name)s , 建立 VDI 在SR %(sr_ref)s" +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:622 +msgid "Could not get pool data from the storage" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/__init__.py:623 +msgid "_update_volume_stats: Could not get storage pool data" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:189 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:44 #, python-format -msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "無法替 instance實例 %(instance_name)s , 使用SR %(sr_ref)s" +msgid "Could not find key in output of command %(cmd)s: %(out)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:197 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:64 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "無法掛載Volume 到虛擬機器 %s" +msgid "Failed to get code level (%s)." +msgstr "" -#: cinder/virt/xenapi/volumeops.py:200 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:86 #, python-format -msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -msgstr "掛載點 %(mountpoint)s 掛載到虛擬機器 %(instance_name)s" +msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:210 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:143 #, python-format -msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "卸載_Volume: %(instance_name)s, %(mountpoint)s" +msgid "WWPN on node %(node)s: %(wwpn)s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:219 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:165 #, python-format -msgid "Unable to locate volume %s" -msgstr "找不到Volume %s" +msgid "Failed to find host %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:227 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:178 #, python-format -msgid "Unable to detach volume %s" -msgstr "無法卸載 Volume %s" +msgid "enter: get_host_from_connector: %s" +msgstr "" -#: cinder/virt/xenapi/volumeops.py:232 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:207 #, python-format -msgid "Unable to destroy vbd %s" +msgid "leave: get_host_from_connector: host %s" msgstr "" -#: cinder/virt/xenapi/volumeops.py:239 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:218 #, python-format -msgid "Error purging SR %s" +msgid "enter: create_host: host %s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:224 +msgid "create_host: Host name is not unicode or string" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:235 +msgid "create_host: No initiators or wwpns supplied." msgstr "" -#: cinder/virt/xenapi/volumeops.py:241 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:265 #, python-format -msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" -msgstr "掛載點 %(mountpoint)s 從虛擬機器 %(instance_name)s 卸載" +msgid "leave: create_host: host %(host)s - %(host_name)s" +msgstr "" -#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:275 #, python-format -msgid "Error in handshake: %s" +msgid "enter: map_vol_to_host: volume %(volume_name)s to host %(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:301 #, python-format -msgid "Invalid request: %s" +msgid "" +"leave: map_vol_to_host: LUN %(result_lun)s, volume %(volume_name)s, host " +"%(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:139 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:311 #, python-format -msgid "Request: %s" +msgid "enter: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/vnc/xvp_proxy.py:142 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:318 #, python-format -msgid "Request made with missing token: %s" +msgid "unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." msgstr "" -#: cinder/vnc/xvp_proxy.py:153 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:324 #, python-format -msgid "Request made with invalid token: %s" +msgid "" +"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no " +"host specified." msgstr "" -#: cinder/vnc/xvp_proxy.py:160 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:336 #, python-format -msgid "Unexpected error: %s" +msgid "" +"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host) " +"found." msgstr "" -#: cinder/vnc/xvp_proxy.py:180 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:348 #, python-format -msgid "Starting cinder-xvpvncproxy node (version %s)" +msgid "leave: unmap_vol_from_host: volume %(volume_name)s from host %(host_name)s" msgstr "" -#: cinder/volume/api.py:74 cinder/volume/api.py:220 -msgid "status must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:377 +msgid "" +"Illegal value specified for storwize_svc_vol_rsize: set to either a " +"percentage (0-100) or -1" msgstr "" -#: cinder/volume/api.py:85 -#, python-format -msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:383 +msgid "" +"Illegal value specified for storwize_svc_vol_warning: set to a percentage" +" (0-100)" msgstr "" -#: cinder/volume/api.py:137 -#, fuzzy -msgid "Volume status must be available or error" -msgstr "Volume 狀態需要可被使用" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:390 +msgid "" +"Illegal value specified for storwize_svc_vol_grainsize: set to either 32," +" 64, 128, or 256" +msgstr "" -#: cinder/volume/api.py:142 -#, python-format -msgid "Volume still has %d dependent snapshots" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:397 +msgid "System does not support compression" msgstr "" -#: cinder/volume/api.py:223 -msgid "already attached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:402 +msgid "If compression is set to True, rsize must also be set (not equal to -1)" msgstr "" -#: cinder/volume/api.py:230 -msgid "already detached" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:408 +#, python-format +msgid "" +"Illegal value %(prot)s specified for storwize_svc_connection_protocol: " +"valid values are %(enabled)s" msgstr "" -#: cinder/volume/api.py:292 -msgid "must be available" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:417 +#, python-format +msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s" msgstr "" -#: cinder/volume/api.py:325 -#, fuzzy -msgid "Volume Snapshot status must be available or error" -msgstr "Volume 狀態需要可被使用" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:452 +msgid "Protocol must be specified as ' iSCSI' or ' FC'." +msgstr "" -#: cinder/volume/driver.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:495 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "enter: create_vdisk: vdisk %s " msgstr "" -#: cinder/volume/driver.py:106 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:498 #, python-format -msgid "volume group %s doesn't exist" +msgid "leave: _create_vdisk: volume %s " msgstr "" -#: cinder/volume/driver.py:270 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:525 #, python-format -msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgid "" +"Unexecpted mapping status %(status)s for mapping%(id)s. Attributes: " +"%(attr)s" msgstr "" -#: cinder/volume/driver.py:318 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:535 #, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgid "" +"Mapping %(id)s prepare failed to complete within theallotted %(to)d " +"seconds timeout. Terminating." msgstr "" -#: cinder/volume/driver.py:327 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:544 #, python-format msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume:" -" %d" +"enter: run_flashcopy: execute FlashCopy from source %(source)s to target " +"%(target)s" msgstr "" -#: cinder/volume/driver.py:337 -msgid "ISCSI provider_location not stored, using discovery" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:552 +#, python-format +msgid "leave: run_flashcopy: FlashCopy started from %(source)s to %(target)s" msgstr "" -#: cinder/volume/driver.py:384 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:572 #, python-format -msgid "Could not find iSCSI export for volume %s" +msgid "Loopcall: _check_vdisk_fc_mappings(), vdisk %s" msgstr "" -#: cinder/volume/driver.py:388 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:595 #, python-format -msgid "ISCSI Discovery: Found %s" +msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s" msgstr "" -#: cinder/volume/driver.py:466 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:631 #, python-format -msgid "Cannot confirm exported volume id:%(volume_id)s." +msgid "Calling _ensure_vdisk_no_fc_mappings: vdisk %s" msgstr "" -#: cinder/volume/driver.py:493 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:639 #, python-format -msgid "FAKE ISCSI: %s" +msgid "enter: delete_vdisk: vdisk %s" msgstr "" -#: cinder/volume/driver.py:505 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:641 #, python-format -msgid "rbd has no pool %s" +msgid "Tried to delete non-existant vdisk %s." msgstr "" -#: cinder/volume/driver.py:579 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:645 #, python-format -msgid "Sheepdog is not working: %s" +msgid "leave: delete_vdisk: vdisk %s" msgstr "" -#: cinder/volume/driver.py:581 -msgid "Sheepdog is not working" +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:649 +#, python-format +msgid "enter: create_copy: snapshot %(src)s to %(tgt)s" msgstr "" -#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:654 #, python-format -msgid "LoggingVolumeDriver: %s" +msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist" msgstr "" -#: cinder/volume/manager.py:96 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:669 #, python-format -msgid "Re-exporting %s volumes" +msgid "leave: _create_copy: snapshot %(tgt)s from vdisk %(src)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:691 +msgid "migrate_volume started without a vdisk copy in the expected pool." msgstr "" -#: cinder/volume/manager.py:101 +#: cinder/volume/drivers/ibm/storwize_svc/helpers.py:743 #, python-format -msgid "volume %s: skipping export" +msgid "" +"Ignore change IO group as storage code level is %(code_level)s, below " +"then 6.4.0.0" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:35 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:211 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:244 +#, fuzzy, python-format +msgid "" +"CLI Exception output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" +"%(description)s\n" +"命令: %(cmd)s\n" +"退出代碼: %(exit_code)s\n" +"標準輸出: %(stdout)r\n" +"標準錯誤輸出: %(stderr)r" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:53 +#, python-format +msgid "Expected no output from CLI command %(cmd)s, got %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:65 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:256 +#, python-format +msgid "" +"Failed to parse CLI output:\n" +" command: %(cmd)s\n" +" stdout: %(out)s\n" +" stderr: %(err)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:142 +msgid "Must pass wwpn or host to lsfabric." msgstr "" -#: cinder/volume/manager.py:107 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:158 #, python-format -msgid "volume %s: creating" +msgid "Did not find success message nor error for %(fun)s: %(out)s" +msgstr "" + +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:163 +msgid "" +"storwize_svc_multihostmap_enabled is set to False, not allowing multi " +"host mapping." msgstr "" -#: cinder/volume/manager.py:119 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:347 #, python-format -msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgid "Did not find expected key %(key)s in %(fun)s: %(raw)s" msgstr "" -#: cinder/volume/manager.py:131 +#: cinder/volume/drivers/ibm/storwize_svc/ssh.py:382 #, python-format -msgid "volume %s: creating export" +msgid "" +"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " +"%(row)s" msgstr "" -#: cinder/volume/manager.py:144 +#: cinder/volume/drivers/netapp/api.py:419 #, python-format -msgid "volume %s: created successfully" +msgid "No element by given name %s." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:440 +msgid "Not a valid value for NaElement." +msgstr "" + +#: cinder/volume/drivers/netapp/api.py:444 +msgid "NaElement name cannot be null." msgstr "" -#: cinder/volume/manager.py:153 -msgid "Volume is still attached" +#: cinder/volume/drivers/netapp/api.py:468 +msgid "Type cannot be converted into NaElement." msgstr "" -#: cinder/volume/manager.py:155 -msgid "Volume is not local to this node" +#: cinder/volume/drivers/netapp/common.py:75 +msgid "Required configuration not found" msgstr "" -#: cinder/volume/manager.py:159 +#: cinder/volume/drivers/netapp/common.py:103 #, python-format -msgid "volume %s: removing export" +msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s" msgstr "" -#: cinder/volume/manager.py:161 +#: cinder/volume/drivers/netapp/common.py:109 #, python-format -msgid "volume %s: deleting" +msgid "Storage family %s is not supported" msgstr "" -#: cinder/volume/manager.py:164 +#: cinder/volume/drivers/netapp/common.py:116 #, python-format -msgid "volume %s: volume is busy" +msgid "No default storage protocol found for storage family %(storage_family)s" msgstr "" -#: cinder/volume/manager.py:176 +#: cinder/volume/drivers/netapp/common.py:123 #, python-format -msgid "volume %s: deleted successfully" +msgid "" +"Protocol %(storage_protocol)s is not supported for storage family " +"%(storage_family)s" msgstr "" -#: cinder/volume/manager.py:183 +#: cinder/volume/drivers/netapp/common.py:130 #, python-format -msgid "snapshot %s: creating" +msgid "" +"NetApp driver of family %(storage_family)s and protocol " +"%(storage_protocol)s loaded" +msgstr "" + +#: cinder/volume/drivers/netapp/common.py:139 +msgid "Only loading netapp drivers supported." msgstr "" -#: cinder/volume/manager.py:187 +#: cinder/volume/drivers/netapp/common.py:158 #, python-format -msgid "snapshot %(snap_name)s: creating" +msgid "" +"The configured NetApp driver is deprecated. Please refer the link to " +"resolve the issue '%s'." msgstr "" -#: cinder/volume/manager.py:202 +#: cinder/volume/drivers/netapp/iscsi.py:69 #, python-format -msgid "snapshot %s: created successfully" +msgid "No metadata property %(prop)s defined for the LUN %(name)s" msgstr "" -#: cinder/volume/manager.py:211 +#: cinder/volume/drivers/netapp/iscsi.py:105 #, python-format -msgid "snapshot %s: deleting" +msgid "Using NetApp filer: %s" msgstr "" -#: cinder/volume/manager.py:214 +#: cinder/volume/drivers/netapp/iscsi.py:150 +msgid "Success getting LUN list from server" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:166 #, python-format -msgid "snapshot %s: snapshot is busy" +msgid "Created LUN with name %s" msgstr "" -#: cinder/volume/manager.py:226 +#: cinder/volume/drivers/netapp/iscsi.py:175 #, python-format -msgid "snapshot %s: deleted successfully" +msgid "No entry in LUN table for volume/snapshot %(name)s." msgstr "" -#: cinder/volume/manager.py:310 -msgid "Checking volume capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:191 +#, python-format +msgid "Destroyed LUN %s" msgstr "" -#: cinder/volume/manager.py:314 +#: cinder/volume/drivers/netapp/iscsi.py:227 #, python-format -msgid "New capabilities found: %s" +msgid "Mapped LUN %(name)s to the initiator %(initiator_name)s" msgstr "" -#: cinder/volume/manager.py:325 -msgid "Clear capabilities" +#: cinder/volume/drivers/netapp/iscsi.py:232 +#, python-format +msgid "" +"Succesfully fetched target details for LUN %(name)s and initiator " +"%(initiator_name)s" msgstr "" -#: cinder/volume/manager.py:329 +#: cinder/volume/drivers/netapp/iscsi.py:238 #, python-format -msgid "Notification {%s} received" +msgid "Failed to get LUN target details for the LUN %s" msgstr "" -#: cinder/volume/netapp.py:79 +#: cinder/volume/drivers/netapp/iscsi.py:249 #, python-format -msgid "API %(name)sfailed: %(reason)s" +msgid "Failed to get target portal for the LUN %s" msgstr "" -#: cinder/volume/netapp.py:109 +#: cinder/volume/drivers/netapp/iscsi.py:252 #, python-format -msgid "%s is not set" +msgid "Failed to get target IQN for the LUN %s" msgstr "" -#: cinder/volume/netapp.py:128 -msgid "Connected to DFM server" +#: cinder/volume/drivers/netapp/iscsi.py:290 +#, python-format +msgid "Snapshot %s deletion successful" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:310 +#: cinder/volume/drivers/netapp/iscsi.py:565 +#: cinder/volume/drivers/netapp/nfs.py:99 +#: cinder/volume/drivers/netapp/nfs.py:206 +#, python-format +msgid "Resizing %s failed. Cleaning volume." msgstr "" -#: cinder/volume/netapp.py:159 +#: cinder/volume/drivers/netapp/iscsi.py:325 #, python-format -msgid "Job failed: %s" +msgid "Unmapped LUN %(name)s from the initiator %(initiator_name)s" msgstr "" -#: cinder/volume/netapp.py:240 -msgid "Failed to provision dataset member" +#: cinder/volume/drivers/netapp/iscsi.py:412 +#, python-format +msgid "Error mapping lun. Code :%(code)s, Message:%(message)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:431 +#, python-format +msgid "Error unmapping lun. Code :%(code)s, Message:%(message)s" msgstr "" -#: cinder/volume/netapp.py:252 -msgid "No LUN was created by the provision job" -msgstr "" +#: cinder/volume/drivers/netapp/iscsi.py:511 +msgid "Object is not a NetApp LUN." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:543 +#, python-format +msgid "Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:545 +#, python-format +msgid "Error getting lun attribute. Exception: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:600 +#, python-format +msgid "No need to extend volume %s as it is already the requested new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:606 +#, python-format +msgid "Resizing lun %s directly to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:633 +#, python-format +msgid "Lun %(path)s geometry failed. Message - %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:662 +#, python-format +msgid "Moving lun %(name)s to %(new_name)s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:677 +#, python-format +msgid "Resizing lun %s using sub clone to new size." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:684 +#, python-format +msgid "%s cannot be sub clone resized as it is hosted on compressed volume" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:690 +#, python-format +msgid "%s cannot be sub clone resized as it contains no blocks." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:707 +#, python-format +msgid "Post clone resize lun %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:718 +#, python-format +msgid "Failure staging lun %s to tmp." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:723 +#, python-format +msgid "Failure moving new cloned lun to %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:727 +#, python-format +msgid "Failure deleting staged tmp lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:730 +#, python-format +msgid "Unknown exception in post clone resize lun %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:732 +#, python-format +msgid "Exception details: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:736 +msgid "Getting lun block count." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:741 +#, python-format +msgid "Failure getting lun info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:785 +#, python-format +msgid "Failed to get vol with required size and extra specs for volume: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:796 +#, python-format +msgid "Error provisioning vol %(name)s on %(volume)s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:841 +#, python-format +msgid "No iscsi service found for vserver %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:982 +#, python-format +msgid "Cloned LUN with new name %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:986 +#, python-format +msgid "No clonned lun named %s found on the filer" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1087 +msgid "Cluster ssc is not updated. No volume stats found." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1149 +#: cinder/volume/drivers/netapp/nfs.py:1080 +msgid "Unsupported ONTAP version. ONTAP version 7.3.1 and above is supported." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1153 +#: cinder/volume/drivers/netapp/nfs.py:1084 +#: cinder/volume/drivers/netapp/utils.py:320 +msgid "Api version could not be determined." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1164 +#, fuzzy, python-format +msgid "Failed to get vol with required size for volume: %s" +msgstr "找不到Volume %s" + +#: cinder/volume/drivers/netapp/iscsi.py:1273 +#, python-format +msgid "Error finding luns for volume %s. Verify volume exists." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1390 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s completed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1393 +#, python-format +msgid "Clone operation with src %(name)s and dest %(new_name)s failed" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1456 +msgid "Volume refresh job already running. Returning..." +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1462 +#, python-format +msgid "Error refreshing vol capacity. Message: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/iscsi.py:1470 +#, python-format +msgid "Refreshing capacity info for %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:104 +#: cinder/volume/drivers/netapp/nfs.py:211 +#, python-format +msgid "NFS file %s not discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:224 +#, python-format +msgid "Copied image to volume %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:230 +#, python-format +msgid "Registering image in cache %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:237 +#, python-format +msgid "" +"Exception while registering image %(image_id)s in cache. Exception: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:250 +#, python-format +msgid "Found cache file for image %(image_id)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:263 +#, python-format +msgid "Cloning img from cache for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:271 +msgid "Image cache cleaning in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:282 +msgid "Image cache cleaning in progress." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:293 +#, python-format +msgid "Cleaning cache for share %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:298 +#, python-format +msgid "Files to be queued for deletion %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:305 +#, python-format +msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:311 +msgid "Image cache cleaning done." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:336 +#, python-format +msgid "Bytes to free %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:343 +#, python-format +msgid "Delete file path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:358 +#, python-format +msgid "Deleting file at path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:363 +#, python-format +msgid "Exception during deleting %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:395 +#, python-format +msgid "Unexpected exception in cloning image %(image_id)s. Message: %(msg)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:411 +#, python-format +msgid "Cloning image %s from cache" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:415 +#, python-format +msgid "Cache share: %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:425 +#, python-format +msgid "Unexpected exception during image cloning in share %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:431 +#, python-format +msgid "Cloning image %s directly in share" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:436 +#, python-format +msgid "Share is cloneable %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:443 +#, python-format +msgid "Image is raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:450 +#, python-format +msgid "Image will locally be converted to raw %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:457 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:467 +#, python-format +msgid "Performing post clone for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:474 +msgid "NFS file could not be discovered." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:478 +msgid "Checking file for resize" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:482 +#, python-format +msgid "Resizing file to %sG" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:488 +msgid "Resizing image file failed." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:510 +msgid "Discover file retries exhausted." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:529 +#, python-format +msgid "Image location not in the expected format %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:557 +#, python-format +msgid "Found possible share matches %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:561 +msgid "Unexpected exception while short listing used share." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:599 +#, python-format +msgid "Extending volume %s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:710 +#, python-format +msgid "Shares on vserver %s will only be used for provisioning." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:714 +#: cinder/volume/drivers/netapp/nfs.py:892 +msgid "No vserver set in config. SSC will be disabled." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:757 +#, python-format +msgid "Exception creating vol %(name)s on share %(share)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:765 +#, python-format +msgid "Volume %s could not be created on shares." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:815 +#, python-format +msgid "No interface found on cluster for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:856 +#, python-format +msgid "" +"No volume on cluster with vserver\n" +" %(vserver)s and junction path " +"%(junction)s\n" +" " +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:862 +#, python-format +msgid "" +"Cloning with params volume %(volume)s, src %(src_path)s,\n" +" dest %(dest_path)s, vserver %(vserver)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:924 +msgid "No cluster ssc stats found. Wait for next volume stats update." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:941 +msgid "No shares found hence skipping ssc refresh." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:978 +#: cinder/volume/drivers/netapp/nfs.py:1221 +#, python-format +msgid "Shortlisted del elg files %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:983 +#: cinder/volume/drivers/netapp/nfs.py:1226 +#, python-format +msgid "Getting file usage for %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:988 +#: cinder/volume/drivers/netapp/nfs.py:1231 +#, python-format +msgid "file-usage for path %(path)s is %(bytes)s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1005 +#: cinder/volume/drivers/netapp/nfs.py:1268 +#, python-format +msgid "Share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1007 +#: cinder/volume/drivers/netapp/nfs.py:1270 +#, python-format +msgid "No share match found for ip %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1038 +#, python-format +msgid "Found volume %(vol)s for share %(share)s." +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1129 +#, python-format +msgid "No storage path found for export path %s" +msgstr "" + +#: cinder/volume/drivers/netapp/nfs.py:1139 +#, python-format +msgid "Cloning with src %(src_path)s, dest %(dest_path)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:241 +#, python-format +msgid "Unexpected error while creating ssc vol list. Message - %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:272 +#, python-format +msgid "Exception querying aggr options. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:313 +#, python-format +msgid "Exception querying sis information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:347 +#, python-format +msgid "Exception querying mirror information. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:379 +#, python-format +msgid "Exception querying storage disk. %s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:421 +#, python-format +msgid "Running stale ssc refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:455 +#, python-format +msgid "Successfully completed stale refresh job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:482 +#, python-format +msgid "Running cluster latest ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:488 +#, python-format +msgid "Successfully completed ssc job for %(server)s and vserver %(vs)s" +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:500 +msgid "Backend not a VolumeDriver." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:502 +msgid "Backend server not NaServer." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:505 +msgid "ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:517 +msgid "refresh stale ssc job in progress. Returning... " +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:616 +msgid "Fatal error: User not permitted to query NetApp volumes." +msgstr "" + +#: cinder/volume/drivers/netapp/ssc_utils.py:623 +#, python-format +msgid "" +"The user does not have access or sufficient privileges to use all ssc " +"apis. The ssc features %s may not work as expected." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:122 +msgid "ems executed successfully." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:124 +#, python-format +msgid "Failed to invoke ems. Message : %s" +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:137 +msgid "" +"It is not the recommended way to use drivers by NetApp. Please use " +"NetAppDriver to achieve the functionality." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:160 +msgid "Requires an NaServer instance." +msgstr "" + +#: cinder/volume/drivers/netapp/utils.py:317 +msgid "Unsupported Clustered Data ONTAP version." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:99 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:150 +#, python-format +msgid "Extending volume: %(id)s New size: %(size)s GB" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:166 +#, python-format +msgid "Volume %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:179 +#, python-format +msgid "Cannot delete snapshot %(origin): %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:190 +#, python-format +msgid "Creating temp snapshot of the original volume: %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:200 +#: cinder/volume/drivers/nexenta/nfs.py:200 +#, python-format +msgid "Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:205 +#: cinder/volume/drivers/nexenta/nfs.py:205 +#, python-format +msgid "Failed to delete zfs snapshot %(volume_name)s@%(name)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:223 +#, python-format +msgid "Enter: migrate_volume: id=%(id)s, host=%(host)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:250 +#, python-format +msgid "Remote NexentaStor appliance at %s should be SSH-bound." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:267 +#, python-format +msgid "" +"Cannot send source snapshot %(src)s to destination %(dst)s. Reason: " +"%(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:275 +#, python-format +msgid "" +"Cannot delete temporary source snapshot %(src)s on NexentaStor Appliance:" +" %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:281 +#, python-format +msgid "Cannot delete source volume %(volume)s on NexentaStor Appliance: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:318 +#, python-format +msgid "Snapshot %s does not exist, it seems it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:439 +#: cinder/volume/drivers/windows/windows_utils.py:230 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:449 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:461 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:471 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:481 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:514 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/iscsi.py:522 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:83 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:88 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:89 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:90 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/jsonrpc.py:96 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:85 +#, python-format +msgid "Volume %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:89 +#, python-format +msgid "Folder %s does not exist in Nexenta Store appliance" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:114 +#, python-format +msgid "Creating folder on Nexenta Store %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:146 +#, python-format +msgid "Cannot destroy created folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:176 +#, python-format +msgid "Cannot destroy cloned folder: %(vol)s/%(folder)s" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:227 +#, python-format +msgid "Folder %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:237 +#: cinder/volume/drivers/nexenta/nfs.py:268 +#, python-format +msgid "Snapshot %s does not exist, it was already deleted." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:302 +#, python-format +msgid "Creating regular file: %s.This may take some time." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:313 +#, python-format +msgid "Regular file: %s created." +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:365 +#, python-format +msgid "Sharing folder %s on Nexenta Store" +msgstr "" + +#: cinder/volume/drivers/nexenta/nfs.py:393 +#, python-format +msgid "Shares loaded: %s" +msgstr "" + +#: cinder/volume/drivers/nexenta/utils.py:46 +#, python-format +msgid "Invalid value: \"%s\"" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:93 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:99 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:107 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:137 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:190 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:246 +#, python-format +msgid "Snapshot info: %(name)s => %(attributes)s" +msgstr "" + +#: cinder/volume/drivers/san/hp_lefthand.py:321 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/drivers/san/san.py:169 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/drivers/san/san.py:173 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:79 +#, python-format +msgid "Cannot parse list-view output: %s" +msgstr "" + +#: cinder/volume/drivers/san/solaris.py:174 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:166 +#, python-format +msgid "Invalid hp3parclient version. Version %s or greater required." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:179 +#, python-format +msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:193 +#, python-format +msgid "HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:212 +#: cinder/volume/drivers/san/hp/hp_3par_common.py:494 +#, python-format +msgid "CPG (%s) doesn't exist on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:228 +#, python-format +msgid "Failed to get domain because CPG (%s) doesn't exist on array." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:247 +#, python-format +msgid "Error extending volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:347 +#, python-format +msgid "command %s failed" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:390 +#, fuzzy, python-format +msgid "Error running ssh command: %s" +msgstr "非預期的執行錯誤" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:586 +#, python-format +msgid "VV Set %s does not exist." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:633 +#, python-format +msgid "Must specify a valid persona %(valid)s, value '%(persona)s' is invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:684 +#, python-format +msgid "" +"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " +"invalid." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:752 +#, python-format +msgid "Volume (%s) already exists on array" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1004 +#, python-format +msgid "Failure in update_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1019 +#, python-format +msgid "Failure in clear_volume_key_value_pair:%s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1031 +#, python-format +msgid "Error attaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_common.py:1039 +#, python-format +msgid "Error detaching volume %s" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:124 +#, python-format +msgid "Invalid IP address format '%s'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:158 +#, python-format +msgid "" +"Found invalid iSCSI IP address(s) in configuration option(s) " +"hp3par_iscsi_ips or iscsi_ip_address '%s.'" +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:164 +msgid "At least one valid iSCSI IP address must be set." +msgstr "" + +#: cinder/volume/drivers/san/hp/hp_3par_iscsi.py:266 +msgid "Least busy iSCSI port not found, using first iSCSI port in list." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:75 +#, python-format +msgid "Failure while invoking function: %(func)s. Error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:162 +#, python-format +msgid "Error while terminating session: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:165 +msgid "Successfully established connection to the server." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:172 +#, python-format +msgid "Error while logging out the user: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:218 +#, python-format +msgid "" +"Not authenticated error occurred. Will create session and try API call " +"again: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:258 +#, python-format +msgid "Task: %(task)s progress: %(prog)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:262 +#, python-format +msgid "Task %s status: success." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:266 +#: cinder/volume/drivers/vmware/api.py:271 +#, python-format +msgid "Task: %(task)s failed with error: %(err)s." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:290 +msgid "Lease is ready." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:294 +msgid "Lease initializing..." +msgstr "" + +#: cinder/volume/drivers/vmware/api.py:304 +#, python-format +msgid "Error: unknown lease state %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:51 +#, python-format +msgid "Read %(bytes)s out of %(max)s from ThreadSafePipe." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:56 +#, python-format +msgid "Completed transfer of size %s." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:102 +#, python-format +msgid "Initiating image service update on image: %(image)s with meta: %(meta)s" +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:117 +#, python-format +msgid "Glance image: %s is now active." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:123 +#, python-format +msgid "Glance image: %s is in killed state." +msgstr "" + +#: cinder/volume/drivers/vmware/io_util.py:132 +#, python-format +msgid "Glance image %(id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:171 +#, python-format +msgid "" +"Exception during HTTP connection close in VMwareHTTPWrite. Exception is " +"%s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:203 +#: cinder/volume/drivers/vmware/read_write_util.py:292 +msgid "Could not retrieve URL from lease." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:206 +#, python-format +msgid "Opening vmdk url: %s for write." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:231 +#, python-format +msgid "Written %s bytes to vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:242 +#: cinder/volume/drivers/vmware/read_write_util.py:318 +#, python-format +msgid "Updating progress to %s percent." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:258 +#: cinder/volume/drivers/vmware/read_write_util.py:334 +msgid "Lease released." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:260 +#: cinder/volume/drivers/vmware/read_write_util.py:336 +#, python-format +msgid "Lease is already in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:295 +#, python-format +msgid "Opening vmdk url: %s for read." +msgstr "" + +#: cinder/volume/drivers/vmware/read_write_util.py:307 +#, python-format +msgid "Read %s bytes from vmdk." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:150 +#, python-format +msgid "Error(s): %s occurred in the call to RetrievePropertiesEx." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:189 +#, python-format +msgid "No such SOAP method %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:198 +#, python-format +msgid "httplib error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:209 +#, python-format +msgid "Socket error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:218 +#, python-format +msgid "Type error in %(attr)s: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vim.py:225 +#, python-format +msgid "Error in %(attr)s. Detailed error: %(excep)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:112 +#, python-format +msgid "Returning spec value %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:115 +#, python-format +msgid "Invalid spec value: %s specified." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:118 +#, python-format +msgid "Returning default spec value: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:169 +#, python-format +msgid "%s not set." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:174 +#, python-format +msgid "Successfully setup driver: %(driver)s for server: %(ip)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:219 +msgid "Backing not available, no operation to be performed." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:287 +#, python-format +msgid "" +"Unable to pick datastore to accommodate %(size)s bytes from the " +"datastores: %(dss)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:293 +#, python-format +msgid "" +"Selected datastore: %(datastore)s with %(host_count)d connected host(s) " +"for the volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:375 +#, python-format +msgid "" +"Unable to find suitable datastore for volume of size: %(vol)s GB under " +"host: %(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:385 +#, python-format +msgid "Unable to find host to accommodate a disk of size: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:412 +#, python-format +msgid "" +"Unable to find suitable datastore for volume: %(vol)s under host: " +"%(host)s. More details: %(excep)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:422 +#, python-format +msgid "Unable to create volume: %s in the inventory." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:441 +#, python-format +msgid "The instance: %s for which initialize connection is called, exists." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:448 +#, python-format +msgid "There is no backing for the volume: %s. Need to create one." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:456 +msgid "The instance for which initialize connection is called, does not exist." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:461 +#, python-format +msgid "Trying to boot from an empty volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:470 +#, python-format +msgid "" +"Returning connection_info: %(info)s for volume: %(volume)s with " +"connector: %(connector)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:518 +#, python-format +msgid "Snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:523 +#, python-format +msgid "There is no backing, so will not create snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:528 +#, python-format +msgid "Successfully created snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:549 +#, python-format +msgid "Delete snapshot of volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:554 +#, python-format +msgid "There is no backing, and so there is no snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:558 +#, python-format +msgid "Successfully deleted snapshot: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:586 +#, python-format +msgid "Successfully cloned new backing: %(back)s from source VMDK file: %(vmdk)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:603 +#, python-format +msgid "" +"There is no backing for the source volume: %(svol)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:633 +#, python-format +msgid "" +"There is no backing for the source snapshot: %(snap)s. Not creating any " +"backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:642 +#: cinder/volume/drivers/vmware/vmdk.py:982 +#, python-format +msgid "" +"There is no snapshot point for the snapshoted volume: %(snap)s. Not " +"creating any backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:678 +#, python-format +msgid "Cannot create image of disk format: %s. Only vmdk disk format is accepted." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:713 +#: cinder/volume/drivers/vmware/vmdk.py:771 +#, python-format +msgid "Fetching glance image: %(id)s to server: %(host)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:722 +#: cinder/volume/drivers/vmware/vmdk.py:792 +#, python-format +msgid "Done copying image: %(id)s to volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:725 +#, python-format +msgid "" +"Exception in copy_image_to_volume: %(excep)s. Deleting the backing: " +"%(back)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:746 +#, python-format +msgid "Exception in _select_ds_for_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:749 +#, python-format +msgid "Selected datastore %(ds)s for new volume of size %(size)s GB." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:784 +#, python-format +msgid "Exception in copy_image_to_volume: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:787 +#, python-format +msgid "Deleting the backing: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:808 +#, python-format +msgid "Copy glance image: %s to create new volume." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:842 +msgid "Upload to glance of attached volume is not supported." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:847 +#, python-format +msgid "Copy Volume: %s to new image." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:853 +#, python-format +msgid "Backing not found, creating for volume: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:872 +#, python-format +msgid "Done copying volume %(vol)s to a new image %(img)s" +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:922 +#, python-format +msgid "Relocating volume: %(backing)s to %(ds)s and %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:961 +#: cinder/volume/drivers/vmware/volumeops.py:630 +#, python-format +msgid "Successfully created clone: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:974 +#, python-format +msgid "" +"There is no backing for the snapshoted volume: %(snap)s. Not creating any" +" backing for the volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1010 +#, python-format +msgid "" +"There is no backing for the source volume: %(src)s. Not creating any " +"backing for volume: %(vol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmdk.py:1018 +#, python-format +msgid "Linked clone of source volume not supported in state: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:94 +#, python-format +msgid "Downloading image: %s from glance image server as a flat vmdk file." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:107 +#: cinder/volume/drivers/vmware/vmware_images.py:126 +#, python-format +msgid "Downloaded image: %s from glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:113 +#, python-format +msgid "Downloading image: %s from glance image server using HttpNfc import." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:132 +#, python-format +msgid "Uploading image: %s to the Glance image server using HttpNfc export." +msgstr "" + +#: cinder/volume/drivers/vmware/vmware_images.py:158 +#, python-format +msgid "Uploaded image: %s to the Glance image server." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:87 +#, python-format +msgid "Did not find any backing with name: %s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:94 +#, python-format +msgid "Deleting the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:97 +#, python-format +msgid "Initiated deletion of VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:99 +#, python-format +msgid "Deleted the VM backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:237 +#, python-format +msgid "There are no valid datastores attached to %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:289 +#, python-format +msgid "" +"Creating folder: %(child_folder_name)s under parent folder: " +"%(parent_folder)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:306 +#, python-format +msgid "Child folder already present: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:314 +#, python-format +msgid "Created child folder: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:365 +#, python-format +msgid "Spec for creating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:383 +#, python-format +msgid "" +"Creating volume backing name: %(name)s disk_type: %(disk_type)s size_kb: " +"%(size_kb)s at folder: %(folder)s resourse pool: %(resource_pool)s " +"datastore name: %(ds_name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:395 +#, python-format +msgid "Initiated creation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:398 +#, python-format +msgid "Successfully created volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:438 +#, python-format +msgid "Spec for relocating the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:452 +#, python-format +msgid "" +"Relocating backing: %(backing)s to datastore: %(ds)s and resource pool: " +"%(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:462 +#, python-format +msgid "Initiated relocation of volume backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:464 +#, python-format +msgid "" +"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s " +"and resource pool: %(rp)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:474 +#, python-format +msgid "Moving backing: %(backing)s to folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:479 +#, python-format +msgid "Initiated move of volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:482 +#, python-format +msgid "Successfully moved volume backing: %(backing)s into the folder: %(fol)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:494 +#, python-format +msgid "Snapshoting backing: %(backing)s with name: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:501 +#, python-format +msgid "Initiated snapshot of volume backing: %(backing)s named: %(name)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:505 +#, python-format +msgid "Successfully created snapshot: %(snap)s for volume backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:553 +#, python-format +msgid "Deleting the snapshot: %(name)s from backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:558 +#, python-format +msgid "" +"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " +"delete anything." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:565 +#, python-format +msgid "Initiated snapshot: %(name)s deletion for backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:569 +#, python-format +msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:597 +#, python-format +msgid "Spec for cloning the backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:613 +#, python-format +msgid "" +"Creating a clone of backing: %(back)s, named: %(name)s, clone type: " +"%(type)s from snapshot: %(snap)s on datastore: %(ds)s" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:627 +#, python-format +msgid "Initiated clone of backing: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:638 +#, python-format +msgid "Deleting file: %(file)s under datacenter: %(dc)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:646 +#, python-format +msgid "Initiated deletion via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:648 +#, python-format +msgid "Successfully deleted file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:701 +msgid "Copying disk data before snapshot of the VM" +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:711 +#, python-format +msgid "Initiated copying disk data via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:713 +#, python-format +msgid "Successfully copied disk at: %(src)s to: %(dest)s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:722 +#, python-format +msgid "Deleting vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:729 +#, python-format +msgid "Initiated deleting vmdk file via task: %s." +msgstr "" + +#: cinder/volume/drivers/vmware/volumeops.py:731 +#, python-format +msgid "Deleted vmdk file: %s." +msgstr "" + +#: cinder/volume/drivers/windows/windows.py:102 +#, python-format +msgid "Creating folder %s " +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:47 +#, python-format +msgid "" +"check_for_setup_error: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:53 +msgid "check_for_setup_error: there is no ISCSI traffic listening." +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:63 +#, python-format +msgid "" +"get_host_information: the state of the WT Portal could not be verified. " +"WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:73 +#, python-format +msgid "" +"get_host_information: the ISCSI target information could not be " +"retrieved. WMI exception: %s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:105 +#, python-format +msgid "" +"associate_initiator_with_iscsi_target: an association between initiator: " +"%(init)s and target name: %(target)s could not be established. WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:123 +#, python-format +msgid "" +"delete_iscsi_target: error when deleting the iscsi target associated with" +" target name: %(target)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:139 +#, python-format +msgid "" +"create_volume: error when creating the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:157 +#, python-format +msgid "" +"delete_volume: error when deleting the volume name: %(vol_name)s . WMI " +"exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:177 +#, python-format +msgid "" +"create_snapshot: error when creating the snapshot name: %(vol_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:193 +#, python-format +msgid "" +"create_volume_from_snapshot: error when creating the volume name: " +"%(vol_name)s from snapshot name: %(snap_name)s. WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:208 +#, python-format +msgid "" +"delete_snapshot: error when deleting the snapshot name: %(snap_name)s . " +"WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:223 +#, python-format +msgid "" +"create_iscsi_target: error when creating iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:240 +#, python-format +msgid "" +"remove_iscsi_target: error when deleting iscsi target: %(tar_name)s . WMI" +" exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:255 +#, python-format +msgid "" +"add_disk_to_target: error adding disk associated to volume : %(vol_name)s" +" to the target name: %(tar_name)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:273 +#, python-format +msgid "" +"copy_vhd_disk: error when copying disk from source path : %(src_path)s to" +" destination path: %(dest_path)s . WMI exception: %(wmi_exc)s" +msgstr "" + +#: cinder/volume/drivers/windows/windows_utils.py:290 +#, python-format +msgid "" +"extend: error when extending the volume: %(vol_name)s .WMI exception: " +"%(wmi_exc)s" +msgstr "" + +#: cinder/volume/flows/common.py:52 +#, python-format +msgid "Restoring source %(source_volid)s status to %(status)s" +msgstr "" + +#: cinder/volume/flows/common.py:58 +#, python-format +msgid "" +"Failed setting source volume %(source_volid)s back to its initial " +"%(source_status)s status" +msgstr "" + +#: cinder/volume/flows/common.py:83 +#, python-format +msgid "Updating volume: %(volume_id)s with %(update)s due to: %(reason)s" +msgstr "" + +#: cinder/volume/flows/common.py:90 +#: cinder/volume/flows/manager/create_volume.py:676 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(update)s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:81 +#, python-format +msgid "Originating snapshot status must be one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:103 +#, python-format +msgid "" +"Unable to create a volume from an originating source volume when its " +"status is not one of %s values" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:126 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than the snapshot size " +"%(snap_size)sGB. They must be >= original snapshot size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:135 +#, python-format +msgid "" +"Volume size %(size)sGB cannot be smaller than original volume size " +"%(source_size)sGB. They must be >= original volume size." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:144 +#, python-format +msgid "Volume size %(size)s must be an integer and greater than 0" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:186 +#, python-format +msgid "" +"Size of specified image %(image_size)sGB is larger than volume size " +"%(volume_size)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:194 +#, python-format +msgid "" +"Volume size %(volume_size)sGB cannot be smaller than the image minDisk " +"size %(min_disk)sGB." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:212 +#, python-format +msgid "Metadata property key %s greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:217 +#, python-format +msgid "Metadata property key %s value greater than 255 characters" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:254 +#, python-format +msgid "Availability zone '%s' is invalid" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:267 +msgid "Volume must be in the same availability zone as the snapshot" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:276 +msgid "Volume must be in the same availability zone as the source volume" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:315 +msgid "Volume type will be changed to be the same as the source volume." +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:463 +#, python-format +msgid "Failed destroying volume entry %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:546 +#, python-format +msgid "Failed rolling back quota for %s reservations" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:590 +#, python-format +msgid "Failed to update quota for deleting volume: %s" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:678 +#: cinder/volume/flows/manager/create_volume.py:192 +#, python-format +msgid "Volume %s: create failed" +msgstr "" + +#: cinder/volume/flows/api/create_volume.py:682 +msgid "Unexpected build error:" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:105 +#, python-format +msgid "" +"Volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d due to " +"%(reason)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:124 +#, python-format +msgid "Volume %s: re-scheduled" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:141 +#, python-format +msgid "Updating volume %(volume_id)s with %(update)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:146 +#, python-format +msgid "Volume %s: resetting 'creating' status failed." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:165 +#, python-format +msgid "Volume %s: rescheduling failed" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:308 +#, python-format +msgid "" +"Failed notifying about the volume action %(event)s for volume " +"%(volume_id)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:345 +#, python-format +msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:347 +#, python-format +msgid "" +"Failed updating volume %(vol_id)s metadata using the provided " +"%(src_type)s %(src_id)s metadata" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:405 +#, python-format +msgid "" +"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided" +" glance snapshot %(snapshot_ref_id)s volume reference" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:418 +#, python-format +msgid "Marking volume %s as bootable." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:421 +#, python-format +msgid "Failed updating volume %(volume_id)s bootable flag to true" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:448 +#, python-format +msgid "" +"Attempting download of %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:455 +#: cinder/volume/flows/manager/create_volume.py:466 +#, python-format +msgid "" +"Failed to copy image %(image_id)s to volume: %(volume_id)s, error: " +"%(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:461 +#, python-format +msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:475 +#, python-format +msgid "" +"Downloaded image %(image_id)s (%(image_location)s) to volume " +"%(volume_id)s successfully." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:512 +#, python-format +msgid "" +"Creating volume glance metadata for volume %(volume_id)s backed by image " +"%(image_id)s with: %(vol_metadata)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:526 +#, python-format +msgid "" +"Cloning %(volume_id)s from image %(image_id)s at location " +"%(image_location)s." +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:552 +#, python-format +msgid "Failed updating volume %(volume_id)s with %(updates)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:574 +#, python-format +msgid "Unable to create volume. Volume driver %s not initialized" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:588 +#, python-format +msgid "" +"Volume %(volume_id)s: being created using %(functor)s with specification:" +" %(volume_spec)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:611 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with creation provided " +"model %(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:619 +#, python-format +msgid "Volume %s: creating export" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:633 +#, python-format +msgid "" +"Failed updating model of volume %(volume_id)s with driver provided model " +"%(model)s" +msgstr "" + +#: cinder/volume/flows/manager/create_volume.py:680 +#, python-format +msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" +msgstr "" + +#~ msgid "Error retrieving volume status: %s" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get system name" +#~ msgstr "" + +#~ msgid "_update_volume_status: Could not get storage pool data" +#~ msgstr "" + +#~ msgid "Cannot find any Fibre Channel HBAs" +#~ msgstr "" + +#~ msgid "Volume status must be available or error" +#~ msgstr "Volume 狀態需要可被使用" + +#~ msgid "No backend config with id %s" +#~ msgstr "" + +#~ msgid "No sm_flavor called %s" +#~ msgstr "" + +#~ msgid "No sm_volume with id %s" +#~ msgstr "" + +#~ msgid "Error: %s" +#~ msgstr "" + +#~ msgid "Unexpected state while cloning %s" +#~ msgstr "非預期的執行錯誤" + +#~ msgid "iSCSI device not found at %s" +#~ msgstr "" + +#~ msgid "Fibre Channel device not found." +#~ msgstr "" + +#~ msgid "Uncaught exception" +#~ msgstr "" + +#~ msgid "Out reactor registered" +#~ msgstr "" + +#~ msgid "CONSUMER GOT %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT QUEUED %(data)s" +#~ msgstr "" + +#~ msgid "Could not create IPC directory %s" +#~ msgstr "" + +#~ msgid "ROUTER RELAY-OUT %(data)s" +#~ msgstr "" + +#~ msgid "May specify only one of snapshot, imageRef or source volume" +#~ msgstr "" + +#~ msgid "Volume size cannot be lesser than the Snapshot size" +#~ msgstr "" + +#~ msgid "Unable to clone volumes that are in an error state" +#~ msgstr "" + +#~ msgid "Clones currently must be >= original volume size." +#~ msgstr "" + +#~ msgid "Volume size '%s' must be an integer and greater than 0" +#~ msgstr "" + +#~ msgid "Size of specified image is larger than volume size." +#~ msgstr "" + +#~ msgid "Image minDisk size is larger than the volume size." +#~ msgstr "" + +#~ msgid "" +#~ msgstr "" + +#~ msgid "Availability zone is invalid" +#~ msgstr "" + +#~ msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +#~ msgstr "" + +#~ msgid "volume %s: creating from snapshot" +#~ msgstr "" + +#~ msgid "volume %s: creating from existing volume" +#~ msgstr "" + +#~ msgid "volume %s: creating from image" +#~ msgstr "" + +#~ msgid "volume %s: creating" +#~ msgstr "" + +#~ msgid "Setting volume: %s status to error after failed image copy." +#~ msgstr "" + +#~ msgid "Unexpected Error: " +#~ msgstr "" + +#~ msgid "volume %s: creating export" +#~ msgstr "" + +#~ msgid "volume %s: create failed" +#~ msgstr "" + +#~ msgid "volume %s: created successfully" +#~ msgstr "" + +#~ msgid "volume %s: Error trying to reschedule create" +#~ msgstr "" + +#~ msgid "volume %(volume_id)s: re-scheduling %(method)s attempt %(num)d" +#~ msgstr "" + +#~ msgid "Failed to copy image to volume: %(volume_id)s, error: %(error)s" +#~ msgstr "" + +#~ msgid "Downloaded image %(image_id)s to %(volume_id)s successfully." +#~ msgstr "" + +#~ msgid "Array Mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "LUN %(lun)s of size %(size)s MB is created." +#~ msgstr "" + +#~ msgid "Array mismatch %(myid)s vs %(arid)s" +#~ msgstr "" + +#~ msgid "Failed to attach iser target for volume %(volume_id)s." +#~ msgstr "" + +#~ msgid "Fetching %s" +#~ msgstr "" + +#~ msgid "Link Local address is not found.:%s" +#~ msgstr "" + +#~ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +#~ msgstr "" + +#~ msgid "Started %(name)s on %(_host)s:%(_port)s" +#~ msgstr "" + +#~ msgid "Unable to find a Fibre Channel volume device" +#~ msgstr "" + +#~ msgid "Volume device not found at %s" +#~ msgstr "" + +#~ msgid "Unable to find Volume Group: %s" +#~ msgstr "無法卸載 Volume %s" + +#~ msgid "Failed to create Volume Group: %s" +#~ msgstr "找不到Volume %s" + +#~ msgid "snapshot %(snap_name)s: creating" +#~ msgstr "" + +#~ msgid "Running with CoraidDriver for ESM EtherCLoud" +#~ msgstr "" + +#~ msgid "Update session cookie %(session)s" +#~ msgstr "" + +#~ msgid "Message : %(message)s" +#~ msgstr "" + +#~ msgid "Error while trying to set group: %(message)s" +#~ msgstr "" + +#~ msgid "Unable to find group: %(group)s" +#~ msgstr "無法卸載 Volume %s" + +#~ msgid "ESM urlOpen error" +#~ msgstr "" + +#~ msgid "JSON Error" +#~ msgstr "" + +#~ msgid "Request without URL" +#~ msgstr "" + +#~ msgid "Configure data : %s" +#~ msgstr "" + +#~ msgid "Configure response : %s" +#~ msgstr "" + +#~ msgid "Unable to retrive volume infos for volume %(volname)s" +#~ msgstr "" + +#~ msgid "Cannot login on Coraid ESM" +#~ msgstr "" + +#~ msgid "Fail to create volume %(volname)s" +#~ msgstr "找不到Volume %s" + +#~ msgid "Failed to delete volume %(volname)s" +#~ msgstr "找不到Volume %s" + +#~ msgid "Failed to Create Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Delete Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "Failed to Create Volume from Snapshot %(snapname)s" +#~ msgstr "" + +#~ msgid "fmt = %(fmt)s backed by: %(backing_file)s" +#~ msgstr "" + +#~ msgid "Expected image to be in raw format, but is %s" +#~ msgstr "" + +#~ msgid "volume group %s doesn't exist" +#~ msgstr "" + +#~ msgid "Error retrieving volume stats: %s" +#~ msgstr "" + +#~ msgid "_update_volume_stats: Could not get system name" +#~ msgstr "" + +#~ msgid "CPG (%s) must be in a domain" +#~ msgstr "" + +#~ msgid "Error populating default encryption types!" +#~ msgstr "" -#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 -#, fuzzy, python-format -msgid "Failed to find LUN ID for volume %s" -msgstr "找不到Volume %s" +#~ msgid "Unexpected error while running command." +#~ msgstr "非預期的執行錯誤" -#: cinder/volume/netapp.py:280 -msgid "Failed to remove and delete dataset member" -msgstr "" +#~ msgid "Nexenta SA returned the error" +#~ msgstr "" -#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 -#, fuzzy, python-format -msgid "No LUN ID for volume %s" -msgstr "找不到Volume %s" +#~ msgid "Ignored target group creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 -#, python-format -msgid "Failed to get LUN details for LUN ID %s" -msgstr "" +#~ msgid "Ignored target group member addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:614 -#, python-format -msgid "Failed to get host details for host ID %s" -msgstr "" +#~ msgid "Ignored LU creation error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:620 -#, python-format -msgid "Failed to get target portal for filer: %s" -msgstr "" +#~ msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +#~ msgstr "" -#: cinder/volume/netapp.py:625 -#, python-format -msgid "Failed to get target IQN for filer: %s" -msgstr "" +#~ msgid "Copying metadata from snapshot %(snap_volume_id)s to %(volume_id)s" +#~ msgstr "" -#: cinder/volume/san.py:113 cinder/volume/san.py:151 -msgid "Specify san_password or san_private_key" -msgstr "" +#~ msgid "Connection to glance failed" +#~ msgstr "連接到glance失敗" -#: cinder/volume/san.py:156 -msgid "san_ip must be set" -msgstr "" +#~ msgid "Invalid snapshot" +#~ msgstr "無效的快照(snapshot)" -#: cinder/volume/san.py:320 -#, python-format -msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" -msgstr "" +#~ msgid "Invalid input received" +#~ msgstr "" -#: cinder/volume/san.py:452 -#, python-format -msgid "CLIQ command returned %s" -msgstr "" +#~ msgid "Invalid volume type" +#~ msgstr "" -#: cinder/volume/san.py:458 -#, python-format -msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid volume" +#~ msgstr "" -#: cinder/volume/san.py:466 -#, python-format -msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" -msgstr "" +#~ msgid "Invalid host" +#~ msgstr "" -#: cinder/volume/san.py:496 -#, python-format -msgid "" -"Unexpected number of virtual ips for cluster %(cluster_name)s. " -"Result=%(_xml)s" -msgstr "" +#~ msgid "Invalid auth key" +#~ msgstr "無效的快照(snapshot)" -#: cinder/volume/san.py:549 -#, python-format -msgid "Volume info: %(volume_name)s => %(volume_attributes)s" -msgstr "" +#~ msgid "Invalid metadata" +#~ msgstr "" -#: cinder/volume/san.py:594 -msgid "local_path not supported" -msgstr "" +#~ msgid "Invalid metadata size" +#~ msgstr "" -#: cinder/volume/san.py:626 -#, python-format -msgid "Could not determine project for volume %s, can't export" -msgstr "" +#~ msgid "Migration error" +#~ msgstr "" -#: cinder/volume/san.py:696 -#, python-format -msgid "Payload for SolidFire API call: %s" -msgstr "" +#~ msgid "Quota exceeded" +#~ msgstr "" -#: cinder/volume/san.py:713 -#, python-format -msgid "Call to json.loads() raised an exception: %s" -msgstr "" +#~ msgid "Connection to swift failed" +#~ msgstr "連接到glance失敗" -#: cinder/volume/san.py:718 -#, python-format -msgid "Results of SolidFire API call: %s" -msgstr "" +#~ msgid "Volume migration failed" +#~ msgstr "" -#: cinder/volume/san.py:732 -#, python-format -msgid "Found solidfire account: %s" -msgstr "" +#~ msgid "SSH command injection detected" +#~ msgstr "" -#: cinder/volume/san.py:746 -#, python-format -msgid "solidfire account: %s does not exist, create it..." -msgstr "" +#~ msgid "Invalid qos specs" +#~ msgstr "" -#: cinder/volume/san.py:804 -msgid "Enter SolidFire create_volume..." -msgstr "" +#~ msgid "debug in callback: %s" +#~ msgstr "" -#: cinder/volume/san.py:846 -msgid "Leaving SolidFire create_volume" -msgstr "" +#~ msgid "Expected object of type: %s" +#~ msgstr "" -#: cinder/volume/san.py:861 -msgid "Enter SolidFire delete_volume..." -msgstr "" +#~ msgid "timefunc: '%(name)s' took %(total_time).2f secs" +#~ msgstr "" -#: cinder/volume/san.py:880 -#, python-format -msgid "Deleting volumeID: %s " -msgstr "" +#~ msgid "base image still has %s snapshots so not deleting base image" +#~ msgstr "" -#: cinder/volume/san.py:888 -msgid "Leaving SolidFire delete_volume" -msgstr "" +#~ msgid "Failed to rename migration destination volume %(vol)s to %(name)s" +#~ msgstr "" -#: cinder/volume/san.py:891 -msgid "Executing SolidFire ensure_export..." -msgstr "" +#~ msgid "Resize volume \"%(name)s\" to %(size)s" +#~ msgstr "" -#: cinder/volume/san.py:895 -msgid "Executing SolidFire create_export..." -msgstr "" +#~ msgid "Volume \"%(name)s\" resized. New size is %(size)s" +#~ msgstr "" -#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 -msgid "name cannot be None" -msgstr "" +#~ msgid "Invalid snapshot backing file format: %s" +#~ msgstr "" -#: cinder/volume/volume_types.py:96 -msgid "id cannot be None" -msgstr "" +#~ msgid "Extend volume from %(old_size) to %(new_size)" +#~ msgstr "" -#: cinder/volume/xensm.py:55 -#, python-format -msgid "SR name = %s" -msgstr "" +#~ msgid "pool %s doesn't exist" +#~ msgstr "" -#: cinder/volume/xensm.py:56 -#, python-format -msgid "Params: %s" -msgstr "" +#~ msgid "_update_volume_stats: Could not get system name." +#~ msgstr "" -#: cinder/volume/xensm.py:60 -#, python-format -msgid "Failed to create sr %s...continuing" -msgstr "" +#~ msgid "Disk not found: %s" +#~ msgstr "" -#: cinder/volume/xensm.py:62 -msgid "Create failed" -msgstr "" +#~ msgid "read timed out" +#~ msgstr "" -#: cinder/volume/xensm.py:64 -#, python-format -msgid "SR UUID of new SR is: %s" -msgstr "" +#~ msgid "check_for_setup_error." +#~ msgstr "" -#: cinder/volume/xensm.py:71 -msgid "Failed to update db" -msgstr "" +#~ msgid "check_for_setup_error: Can not get device type." +#~ msgstr "" -#: cinder/volume/xensm.py:80 -#, python-format -msgid "Failed to introduce sr %s...continuing" -msgstr "" +#~ msgid "check_for_setup_error: Device type is:%(type)s, version is:%(version)s." +#~ msgstr "" -#: cinder/volume/xensm.py:91 -#, python-format -msgid "Failed to reach backend %d" -msgstr "" +#~ msgid "_get_device_type: Storage Pool must be configured." +#~ msgstr "" -#: cinder/volume/xensm.py:100 -msgid "XenSMDriver requires xenapi connection" -msgstr "" +#~ msgid "create_volume:volume name: %s." +#~ msgstr "" -#: cinder/volume/xensm.py:110 -msgid "Failed to initiate session" -msgstr "" +#~ msgid "delete_volume:No need to delete volume. Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:142 -#, python-format -msgid "Volume will be created in backend - %d" -msgstr "" +#~ msgid "create_export: volume name:%s" +#~ msgstr "" -#: cinder/volume/xensm.py:154 -msgid "Failed to update volume in db" -msgstr "" +#~ msgid "create_export:Volume %(name)s does not exist." +#~ msgstr "" -#: cinder/volume/xensm.py:157 -msgid "Unable to create volume" -msgstr "" +#~ msgid "initialize_connection:Failed to find target iSCSI iqn. Target IP:%(ip)s" +#~ msgstr "" -#: cinder/volume/xensm.py:171 -msgid "Failed to delete vdi" -msgstr "" +#~ msgid "terminate_connection:Host does not exist. Host name:%(host)s." +#~ msgstr "" -#: cinder/volume/xensm.py:177 -msgid "Failed to delete volume in db" -msgstr "" +#~ msgid "terminate_connection:volume does not exist. volume name:%(volume)s" +#~ msgstr "" -#: cinder/volume/xensm.py:210 -msgid "Failed to find volume in db" -msgstr "" +#~ msgid "create_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/xensm.py:221 -msgid "Failed to find backend in db" -msgstr "" +#~ msgid "create_snapshot:Resource pool needs 1GB valid size at least." +#~ msgstr "" -#: cinder/volume/nexenta/__init__.py:27 -msgid "Nexenta SA returned the error" -msgstr "" +#~ msgid "create_snapshot:Volume does not exist. Volume name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:64 -#, python-format -msgid "Sending JSON data: %s" -msgstr "" +#~ msgid "create_snapshot:Snapshot does not exist. Snapshot name:%(name)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:69 -#, python-format -msgid "Auto switching to HTTPS connection to %s" -msgstr "" +#~ msgid "delete_snapshot:snapshot name:%(snapshot)s, volume name:%(volume)s." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:75 -msgid "No headers in server response" -msgstr "" +#~ msgid "delete_snapshot:Device does not support snapshot." +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:76 -msgid "Bad response from server" -msgstr "" +#~ msgid "delete_snapshot:Snapshot does not exist. snapshot name:%(snap)s" +#~ msgstr "" -#: cinder/volume/nexenta/jsonrpc.py:79 -#, python-format -msgid "Got response: %s" -msgstr "" +#~ msgid "_check_conf_file: %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:96 -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "" +#~ msgid "Write login information to xml error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:180 -msgid "" -"Call to local_path should not happen. Verify that use_local_volumes flag " -"is turned off." -msgstr "" +#~ msgid "_get_login_info error. %s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:202 -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:Use default prefetch type. Prefetch type:Intelligent." +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:210 -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_lun_set_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:219 -#, python-format -msgid "Ignored target group member addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_get_iscsi_info:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:227 -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export" -msgstr "" +#~ msgid "CLI command:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:237 -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" -msgstr "" +#~ msgid "_execute_cli:%s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:273 -#, python-format -msgid "" -"Got error trying to destroy target group %(target_group)s, assuming it is" -" already gone: %(exc)s" -msgstr "" +#~ msgid "_delete_hostport:Failed to delete host port. port id:%(portid)s" +#~ msgstr "" -#: cinder/volume/nexenta/volume.py:280 -#, python-format -msgid "" -"Got error trying to delete target %(target)s, assuming it is already " -"gone: %(exc)s" -msgstr "" +#~ msgid "_get_tgt_iqn:iSCSI IP is %s." +#~ msgstr "" -#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgid "_get_tgt_iqn:iSCSI target iqn is:%s" #~ msgstr "" -#~ msgid "Zone %(zone_id)s could not be found." +#~ msgid "_delete_map:Failed to delete host map. mapid:%(mapid)s out:%(out)s" #~ msgstr "" -#~ msgid "Cinder access parameters were not specified." +#~ msgid "_delete_host: Failed delete host. host id:%(hostid)s out:%(out)s" #~ msgstr "" -#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgid "_is_resource_pool_enough:Resource pool for snapshot not be added." #~ msgstr "" -#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgid "Ignored target creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgid "Ignored target group creation error while ensuring export" #~ msgstr "" -#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgid "Ignored target group member addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab semaphore \"%(lock)s\" " -#~ "for method \"%(method)s\"...lock" +#~ msgid "Ignored LU creation error while ensuring export" #~ msgstr "" -#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Ignored LUN mapping entry addition error while ensuring export" #~ msgstr "" -#~ msgid "" -#~ "Attempting to grab file lock " -#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "Invalid source volume %(reason)s." #~ msgstr "" -#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgid "The request is invalid." #~ msgstr "" -#~ msgid "Parent group id and group id cannot be same" +#~ msgid "Volume %(volume_id)s persistence file could not be found." #~ msgstr "" -#~ msgid "No body provided" +#~ msgid "No disk at %(location)s" #~ msgstr "" -#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgid "Class %(class_name)s could not be found: %(exception)s" #~ msgstr "" -#~ msgid "Delete VSA with id: %s" +#~ msgid "Action not allowed." #~ msgstr "" -#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgid "Key pair %(key_name)s already exists." #~ msgstr "" -#~ msgid "Disassociate address from VSA %(id)s" +#~ msgid "Migration error: %(reason)s" #~ msgstr "" -#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgid "Maximum volume/snapshot size exceeded" #~ msgstr "" -#~ msgid "" -#~ "%(obj)s with ID %(id)s belongs to " -#~ "VSA %(own_vsa_id)s and not to VSA " -#~ "%(vsa_id)s." +#~ msgid "3PAR Host already exists: %(err)s. %(info)s" #~ msgstr "" -#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgid "Backup volume %(volume_id)s type not recognised." #~ msgstr "" -#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgid "_remove_iscsi_port: iSCSI port was not found on host %(hostid)s" #~ msgstr "" -#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgid "ssh_read: Read SSH timeout" #~ msgstr "" -#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgid "do_setup." #~ msgstr "" -#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgid "create_volume: volume name: %s." #~ msgstr "" -#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "delete_volume: volume name: %s." #~ msgstr "" -#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgid "create_cloned_volume: src volume: %(src)s tgt volume: %(tgt)s" #~ msgstr "" -#~ msgid "Index instances for VSA %s" +#~ msgid "create_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgid "delete_snapshot: snapshot name: %(snapshot)s volume name: %(volume)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You cannot run" -#~ " any more instances of this type." +#~ msgid "change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s" #~ msgstr "" -#~ msgid "" -#~ "Instance quota exceeded. You can only" -#~ " run %s more instances of this " -#~ "type." +#~ msgid "_update_volume_stats: Updating volume stats" #~ msgstr "" -#~ msgid "Going to try to soft delete %s" +#~ msgid "restore finished." #~ msgstr "" -#~ msgid "No host for instance %s, deleting immediately" +#~ msgid "Error encountered during initialization of driver: %s" #~ msgstr "" -#~ msgid "Going to try to terminate %s" +#~ msgid "Unabled to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "Going to try to stop %s" +#~ msgid "Snapshot file at %s does not exist." #~ msgstr "" -#~ msgid "Going to try to start %s" +#~ msgid "_create_copy: Source vdisk %s does not exist" #~ msgstr "" -#~ msgid "" -#~ "Going to force the deletion of the" -#~ " vm %(instance_uuid)s, even if it is" -#~ " deleted" +#~ msgid "Login to 3PAR array invalid" #~ msgstr "" -#~ msgid "" -#~ "Instance %(instance_uuid)s did not exist " -#~ "in the DB, but I will shut " -#~ "it down anyway using a special " -#~ "context" +#~ msgid "There are no datastores present under %s." #~ msgstr "" -#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgid "Size for volume: %s not found, skipping secure delete." #~ msgstr "" -#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgid "Could not find attribute for LUN named %s" #~ msgstr "" -#~ msgid "" -#~ "Instance %(name)s found in database but" -#~ " not known by hypervisor. Setting " -#~ "power state to NOSTATE" +#~ msgid "Cleaning up incomplete backup operations" #~ msgstr "" -#~ msgid "" -#~ "Detected instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to available (was backing-up)" #~ msgstr "" -#~ msgid "" -#~ "Destroying instance with name label " -#~ "'%(name_label)s' which is marked as " -#~ "DELETED but still present on host." +#~ msgid "Resetting volume %s to error_restoring (was restoring-backup)" #~ msgstr "" -#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgid "Resetting backup %s to error (was creating)" #~ msgstr "" -#~ msgid "Can't downgrade without losing data" +#~ msgid "Resetting backup %s to available (was restoring)" #~ msgstr "" -#~ msgid "Instance %(instance_id)s not found" +#~ msgid "Resuming delete on backup: %s" #~ msgstr "" -#~ msgid "Network %s has active ports, cannot delete" +#~ msgid "create_backup started, backup: %(backup_id)s for volume: %(volume_id)s" #~ msgstr "" -#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgid "create_backup finished. backup: %s" #~ msgstr "" -#~ msgid "" -#~ "AMQP server on %(fl_host)s:%(fl_port)d is " -#~ "unreachable: %(e)s. Trying again in " -#~ "%(fl_intv)d seconds." +#~ msgid "delete_backup started, backup: %s" #~ msgstr "" -#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgid "delete_backup finished, backup %s deleted" #~ msgstr "" -#~ msgid "Reconnected to queue" +#~ msgid "JSON transfer Error" #~ msgstr "" -#~ msgid "Failed to fetch message from queue: %s" +#~ msgid "create volume error: %(err)s" #~ msgstr "" -#~ msgid "Initing the Adapter Consumer for %s" +#~ msgid "Create snapshot error." #~ msgstr "" -#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgid "Create luncopy error." #~ msgstr "" -#~ msgid "Exception while processing consumer" +#~ msgid "_find_host_lun_id transfer data error! " #~ msgstr "" -#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgid "ssh_read: Read SSH timeout." #~ msgstr "" -#~ msgid "response %s" +#~ msgid "There are no hosts in the inventory." #~ msgstr "" -#~ msgid "topic is %s" +#~ msgid "Unable to create volume: %(vol)s on the hosts: %(hosts)s." #~ msgstr "" -#~ msgid "message %s" +#~ msgid "Successfully cloned new backing: %s." #~ msgstr "" -#~ msgid "" -#~ "Cannot confirm tmpfile at %(ipath)s is" -#~ " on same shared storage between " -#~ "%(src)s and %(dest)s." +#~ msgid "Successfully reverted clone: %(clone)s to snapshot: %(snapshot)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of memory(host:%(avail)s <= " -#~ "instance:%(mem_inst)s)" +#~ msgid "Copying backing files from %(src)s to %(dest)s." #~ msgstr "" -#~ msgid "" -#~ "Unable to migrate %(instance_id)s to " -#~ "%(dest)s: Lack of disk(host:%(available)s <=" -#~ " instance:%(necessary)s)" +#~ msgid "Initiated copying of backing via task: %s." #~ msgstr "" -#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgid "Successfully copied backing to %s." #~ msgstr "" -#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgid "Registering backing at path: %s to inventory." #~ msgstr "" -#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgid "Initiated registring backing, task: %s." #~ msgstr "" -#~ msgid "Filter hosts for drive type %s" +#~ msgid "Successfully registered backing: %s." #~ msgstr "" -#~ msgid "Host %s has no free capacity. Skip" +#~ msgid "Reverting backing to snapshot: %s." #~ msgstr "" -#~ msgid "Filter hosts: %s" +#~ msgid "Initiated reverting snapshot via task: %s." #~ msgstr "" -#~ msgid "Must implement host selection mechanism" +#~ msgid "Successfully reverted to snapshot: %s." #~ msgstr "" -#~ msgid "Maximum number of hosts selected (%d)" +#~ msgid "Successfully copied disk data to: %s." #~ msgstr "" -#~ msgid "Selected excessive host %(host)s" +#~ msgid "Error(s): %s occurred in the call to RetrieveProperties." #~ msgstr "" -#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgid "Requested volume or snapshot exceeds allowed Gigabytes quota" #~ msgstr "" -#~ msgid "volume_params %(volume_params)s" +#~ msgid "Deploy v1 of the Cinder API. " #~ msgstr "" -#~ msgid "%(i)d: Volume %(name)s" +#~ msgid "Deploy v2 of the Cinder API. " #~ msgstr "" -#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgid "_read_xml:%s" #~ msgstr "" -#~ msgid "Error creating volumes" +#~ msgid "request ip info is %s." #~ msgstr "" -#~ msgid "Non-VSA volume %d" +#~ msgid "new str info is %s." #~ msgstr "" -#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgid "Failed to create iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "Error creating volume" +#~ msgid "Failed to remove iser target for volume %(volume_id)s." #~ msgstr "" -#~ msgid "No capability selected for volume of size %(size)s" +#~ msgid "rtstool is not installed correctly" #~ msgstr "" -#~ msgid "Host %s:" +#~ msgid "Creating iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " -#~ "used %(used)2s, free %(free)2s. Available " -#~ "capacity %(avail)-5s" +#~ msgid "Failed to create iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "" -#~ "\t LeastUsedHost: Best host: %(best_host)s." -#~ " (used capacity %(min_used)s)" +#~ msgid "Removing iser_target for: %s" #~ msgstr "" -#~ msgid "" -#~ "\t MostAvailCap: Best host: %(best_host)s. " -#~ "(available %(max_avail)s %(type_str)s)" +#~ msgid "Failed to remove iser target for volume id:%(vol_id)s: %(e)s" #~ msgstr "" -#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgid "Volume %s does not exist, it seems it was already deleted" #~ msgstr "" -#~ msgid "Publishing to route %s" +#~ msgid "Executing zfs send/recv on the appliance" #~ msgstr "" -#~ msgid "Declaring queue %s" +#~ msgid "zfs send/recv done, new volume %s created" #~ msgstr "" -#~ msgid "Declaring exchange %s" +#~ msgid "Failed to delete temp snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgid "Failed to delete zfs recv snapshot %(volume)s@%(snapshot)s" #~ msgstr "" -#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgid "rbd export-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgid "rbd import-diff failed - %s" #~ msgstr "" -#~ msgid "Test: Emulate DB error. Raise" +#~ msgid "%s is not on GPFS. Perhaps GPFS not mounted." #~ msgstr "" -#~ msgid "Test: user_data = %s" +#~ msgid "Folder %s does not exist, it seems it was already deleted." #~ msgstr "" -#~ msgid "_create: param=%s" +#~ msgid "No 'os-update_readonly_flag' was specified in request." #~ msgstr "" -#~ msgid "Host %s" +#~ msgid "Volume 'readonly' flag must be specified in request as a boolean." #~ msgstr "" -#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgid "ISER provider_location not stored, using discovery" #~ msgstr "" -#~ msgid "\t vol=%(vol)s" +#~ msgid "Could not find iSER export for volume %s" #~ msgstr "" -#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgid "ISER Discovery: Found %s" #~ msgstr "" -#~ msgid "Test: Volume create: %s" +#~ msgid "Failed to access the device on the path %(path)s: %(error)s." #~ msgstr "" -#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgid "iSER device not found at %s" #~ msgstr "" -#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgid "Found iSER node %(host_device)s (after %(tries)s rescans)." #~ msgstr "" -#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgid "Skipping ensure_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgid "Skipping remove_export. No iser_target provisioned for volume: %s" #~ msgstr "" -#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgid "Downloading image: %s from glance image server." #~ msgstr "" -#~ msgid "Unable to get updated status: %s" +#~ msgid "Uploading image: %s to the Glance image server." #~ msgstr "" -#~ msgid "" -#~ "deactivate_node is called for " -#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgid "Invalid request body" #~ msgstr "" -#~ msgid "virsh said: %r" +#~ msgid "enter: _get_host_from_connector: prefix %s" #~ msgstr "" -#~ msgid "cool, it's a device" +#~ msgid "Schedule volume flow not retrieved" #~ msgstr "" -#~ msgid "Unable to read LXC console" +#~ msgid "Failed to successfully complete schedule volume using flow: %s" #~ msgstr "" -#~ msgid "" -#~ "to xml...\n" -#~ ":%s " +#~ msgid "Create volume flow not retrieved" #~ msgstr "" -#~ msgid "During wait running, %s disappeared." +#~ msgid "Failed to successfully complete create volume workflow" #~ msgstr "" -#~ msgid "Instance %s running successfully." +#~ msgid "Expected volume result not found" #~ msgstr "" -#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgid "Manager volume flow not retrieved" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgid "Failed to successfully complete manager volume workflow" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): image " -#~ "verification skipped, no hash stored" +#~ msgid "Unable to update stats, driver is uninitialized" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgid "Bad reponse from server: %s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" -#~ " on this node %(local)d local, " -#~ "%(remote)d on other nodes" +#~ msgid "%(flow)s has moved into state %(state)s from state %(old_state)s" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): warning " -#~ "-- an absent base file is in " -#~ "use! instances: %(instance_list)s" +#~ msgid "No request spec, will not reschedule" #~ msgstr "" -#~ msgid "" -#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" -#~ " other nodes (%(remote)d on other " -#~ "nodes)" +#~ msgid "No retry filter property or associated retry info, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgid "Retry info not present, will not reschedule" #~ msgstr "" -#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgid "Clear capabilities" #~ msgstr "" -#~ msgid "Created VM %s..." +#~ msgid "This usually means the volume was never succesfully created." #~ msgstr "" -#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgid "setting LU uppper (end) limit to %s" #~ msgstr "" -#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "Can't find lun or lun goup in array" #~ msgstr "" -#~ msgid "" -#~ "Created a CDROM-specific VBD %(vbd_ref)s" -#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgid "Volume to be restored to is smaller than the backup to be restored" #~ msgstr "" -#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgid "Volume driver '%(driver)s' not initialized." #~ msgstr "" -#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgid "in looping call" #~ msgstr "" -#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgid "Is the appropriate service running?" #~ msgstr "" -#~ msgid "Fetching image %(image)s" +#~ msgid "Could not find another host" #~ msgstr "" -#~ msgid "Image Type: %s" +#~ msgid "Not enough allocatable volume gigabytes remaining" #~ msgstr "" -#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgid "Unable to update stats on non-intialized Volume Group: %s" #~ msgstr "" -#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgid "do_setup: Pool %s does not exist" #~ msgstr "" -#~ msgid "instance %s: Failed to fetch glance image" +#~ msgid "migrate_volume started with more than one vdisk copy" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s ... " +#~ msgid "migrate_volume: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "Creating VBD for VDI %s done." +#~ msgid "Selected datastore: %s for the volume." #~ msgstr "" -#~ msgid "VBD.unplug successful first time." +#~ msgid "There are no valid datastores present under %s." #~ msgstr "" -#~ msgid "VBD.unplug rejected: retrying..." +#~ msgid "Unable to create volume, driver not initialized" #~ msgstr "" -#~ msgid "Not sleeping anymore!" +#~ msgid "Migration %(migration_id)s could not be found." #~ msgstr "" -#~ msgid "VBD.unplug successful eventually." +#~ msgid "Bad driver response status: %(status)s" #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgid "Instance %(instance_id)s could not be found." #~ msgstr "" -#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgid "Volume retype failed: %(reason)s" #~ msgstr "" -#~ msgid "Starting instance %s" +#~ msgid "SIGTERM received" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn" +#~ msgid "Child %(pid)d exited with status %(code)d" #~ msgstr "" -#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgid "_wait_child %d" #~ msgstr "" -#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgid "wait wrap.failed %s" #~ msgstr "" #~ msgid "" -#~ "Auto configuring disk for instance " -#~ "%(instance_uuid)s, attempting to resize " -#~ "partition..." +#~ "Report interval must be less than " +#~ "service down time. Current config: " +#~ ". Setting " +#~ "service_down_time to: %(new_service_down_time)s" #~ msgstr "" -#~ msgid "Invalid value for injected_files: '%s'" +#~ msgid "Failed to update iscsi target for volume %(name)s." #~ msgstr "" -#~ msgid "Starting VM %s..." +#~ msgid "Updating iscsi target: %s" #~ msgstr "" -#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgid "Failed to update iscsi target %(name)s: %(e)s" #~ msgstr "" -#~ msgid "Instance %s: waiting for running" +#~ msgid "Caught '%(exception)s' exception." #~ msgstr "" -#~ msgid "Instance %s: running" +#~ msgid "Get code level failed" #~ msgstr "" -#~ msgid "Resources to remove:%s" +#~ msgid "do_setup: Could not get system name" #~ msgstr "" -#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgid "Failed to get license information." #~ msgstr "" -#~ msgid "Skipping VDI destroy for %s" +#~ msgid "do_setup: No configured nodes" #~ msgstr "" -#~ msgid "Finished snapshot and upload for VM %s" +#~ msgid "enter: _get_chap_secret_for_host: host name %s" #~ msgstr "" -#~ msgid "Starting snapshot for VM %s" +#~ msgid "" +#~ "leave: _get_chap_secret_for_host: host name " +#~ "%(host_name)s with secret %(chap_secret)s" #~ msgstr "" -#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgid "" +#~ "_create_host: Cannot clean host name. " +#~ "Host name is not unicode or string" #~ msgstr "" -#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgid "enter: _get_host_from_connector: %s" #~ msgstr "" -#~ msgid "Resize instance %s complete" +#~ msgid "leave: _get_host_from_connector: host %s" #~ msgstr "" -#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgid "enter: _create_host: host %s" #~ msgstr "" -#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgid "_create_host: No connector ports" #~ msgstr "" -#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgid "leave: _create_host: host %(host)s - %(host_name)s" #~ msgstr "" -#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgid "enter: _map_vol_to_host: volume %(volume_name)s to host %(host_name)s" #~ msgstr "" #~ msgid "" -#~ "Instance %(instance_uuid)s using RAW or " -#~ "VHD, skipping kernel and ramdisk " -#~ "deletion" +#~ "storwize_svc_multihostmap_enabled is set to " +#~ "False, Not allow multi host mapping" #~ msgstr "" -#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgid "volume %s mapping to multi host" #~ msgstr "" -#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgid "" +#~ "leave: _map_vol_to_host: LUN %(result_lun)s, " +#~ "volume %(volume_name)s, host %(host_name)s" #~ msgstr "" -#~ msgid "Automatically hard rebooting %d" +#~ msgid "enter: _delete_host: host %s " #~ msgstr "" -#~ msgid "Instance for migration %d not found, skipping" +#~ msgid "leave: _delete_host: host %s " #~ msgstr "" -#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgid "_create_host failed to return the host name." #~ msgstr "" -#~ msgid "creating vif(s) for vm: |%s|" +#~ msgid "_get_host_from_connector failed to return the host name for connector" #~ msgstr "" -#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to any host found." #~ msgstr "" -#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgid "" +#~ "terminate_connection: Multiple mappings of " +#~ "volume %(vol_name)s found, no host " +#~ "specified." #~ msgstr "" -#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgid "" +#~ "terminate_connection: No mapping of volume " +#~ "%(vol_name)s to host %(host_name)s found" #~ msgstr "" -#~ msgid "" -#~ "The agent call to %(method)s returned" -#~ " an invalid response: %(ret)r. VM " -#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgid "protocol must be specified as ' iSCSI' or ' FC'" +#~ msgstr "" + +#~ msgid "enter: _create_vdisk: vdisk %s " #~ msgstr "" #~ msgid "" -#~ "TIMEOUT: The call to %(method)s timed" -#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ "_create_vdisk %(name)s - did not find success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" #~ msgid "" -#~ "NOT IMPLEMENTED: The call to %(method)s" -#~ " is not supported by the agent. " -#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find success " +#~ "message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" #~ msgid "" -#~ "The call to %(method)s returned an " -#~ "error: %(e)s. VM id=%(instance_uuid)s; " -#~ "args=%(args)r" +#~ "create FC mapping from %(source)s to " +#~ "%(target)s - did not find mapping " +#~ "id in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s\n" #~ msgstr "" -#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgid "" +#~ "_prepare_fc_map: Failed to prepare FlashCopy" +#~ " from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Error destroying VDI" +#~ msgid "" +#~ "Unexecpted mapping status %(status)s for " +#~ "mapping %(id)s. Attributes: %(attr)s" #~ msgstr "" -#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgid "" +#~ "Mapping %(id)s prepare failed to " +#~ "complete within the allotted %(to)d " +#~ "seconds timeout. Terminating." #~ msgstr "" -#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgid "" +#~ "_prepare_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s with " +#~ "exception %(ex)s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s failed" +#~ msgid "_prepare_fc_map: %s" #~ msgstr "" -#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgid "" +#~ "_start_fc_map: Failed to start FlashCopy " +#~ "from %(source)s to %(target)s.\n" +#~ "stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgid "" +#~ "enter: _run_flashcopy: execute FlashCopy from" +#~ " source %(source)s to target %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s failed" +#~ msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s" #~ msgstr "" -#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgid "enter: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgid "_create_copy: Source vdisk %(src_vdisk)s (%(src_id)s) does not exist" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgid "" +#~ "_create_copy: cannot get source vdisk " +#~ "%(src)s capacity from vdisk attributes " +#~ "%(attr)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgid "leave: _create_copy: snapshot %(tgt_vdisk)s from vdisk %(src_vdisk)s" #~ msgstr "" -#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgid "enter: _get_flashcopy_mapping_attributes: mapping %s" #~ msgstr "" -#~ msgid "VSA BE remove_export for %s failed" +#~ msgid "" +#~ "leave: _get_flashcopy_mapping_attributes: mapping " +#~ "%(fc_map_id)s, attributes %(attributes)s" #~ msgstr "" -#~ msgid "Failed to retrieve QoS info" +#~ msgid "enter: _is_vdisk_defined: vdisk %s " #~ msgstr "" -#~ msgid "invalid drive data" +#~ msgid "leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s " #~ msgstr "" -#~ msgid "drive_name not defined" +#~ msgid "enter: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "invalid drive type name %s" +#~ msgid "warning: Tried to delete vdisk %s but it does not exist." #~ msgstr "" -#~ msgid "*** Experimental VSA code ***" +#~ msgid "leave: _delete_vdisk: vdisk %s" #~ msgstr "" -#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgid "" +#~ "_add_vdisk_copy %(name)s - did not find" +#~ " success message in CLI output.\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Creating VSA: %s" +#~ msgid "_get_vdisk_copy_attrs: Could not get vdisk copy data" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " -#~ "volume %(vol_name)s, %(vol_size)d GB, type " -#~ "%(vol_type_id)s" +#~ msgid "_get_pool_attrs: Pool %s does not exist" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgid "enter: _execute_command_and_parse_attributes: command %s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgid "" +#~ "leave: _execute_command_and_parse_attributes:\n" +#~ "command: %(cmd)s\n" +#~ "attributes: %(attr)s" #~ msgstr "" -#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgid "" +#~ "_get_hdr_dic: attribute headers and values do not match.\n" +#~ " Headers: %(header)s\n" +#~ " Values: %(row)s" #~ msgstr "" -#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ "stdout: %(out)s\n" +#~ "stderr: %(err)s\n" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "%(fun)s: Failed with unexpected CLI output.\n" +#~ " Command: %(cmd)s\n" +#~ " stdout: %(out)s\n" +#~ " stderr: %(err)s" #~ msgstr "" -#~ msgid "Unable to delete volume %s" +#~ msgid "Did not find expected column in %(fun)s: %(hdr)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgid "" +#~ "Volume size %(size)s cannot be lesser" +#~ " than the snapshot size %(snap_size)s. " +#~ "They must be >= original snapshot " +#~ "size." #~ msgstr "" -#~ msgid "Going to try to terminate VSA ID %s" +#~ msgid "" +#~ "Clones currently disallowed when %(size)s " +#~ "< %(source_size)s. They must be >= " +#~ "original volume size." #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgid "" +#~ "Size of specified image %(image_size)s " +#~ "is larger than volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Create call received for VSA %s" +#~ msgid "" +#~ "Image minDisk size %(min_disk)s is " +#~ "larger than the volume size " +#~ "%(volume_size)s." #~ msgstr "" -#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgid "Updating volume %(volume_id)s with %(update)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgid "Volume %s: resetting 'creating' status failed" #~ msgstr "" -#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s" #~ msgstr "" -#~ msgid "" -#~ "VSA ID %(vsa_id)d: Not all volumes " -#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgid "Marking volume %s as bootable" #~ msgstr "" #~ msgid "" -#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " -#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ "Attempting download of %(image_id)s " +#~ "(%(image_location)s) to volume %(volume_id)s" #~ msgstr "" -#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgid "" +#~ "Downloaded image %(image_id)s (%(image_location)s)" +#~ " to volume %(volume_id)s successfully" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgid "" +#~ "Creating volume glance metadata for " +#~ "volume %(volume_id)s backed by image " +#~ "%(image_id)s with: %(vol_metadata)s" #~ msgstr "" -#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgid "" +#~ "Cloning %(volume_id)s from image %(image_id)s" +#~ " at location %(image_location)s" #~ msgstr "" diff --git a/cinder/manager.py b/cinder/manager.py index 68f3609e51..0ef53c0194 100644 --- a/cinder/manager.py +++ b/cinder/manager.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -53,90 +51,28 @@ """ + +from oslo.config import cfg + from cinder.db import base -from cinder import flags from cinder.openstack.common import log as logging +from cinder.openstack.common import periodic_task from cinder.openstack.common.rpc import dispatcher as rpc_dispatcher from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import version -FLAGS = flags.FLAGS - - +CONF = cfg.CONF LOG = logging.getLogger(__name__) -def periodic_task(*args, **kwargs): - """Decorator to indicate that a method is a periodic task. - - This decorator can be used in two ways: - - 1. Without arguments '@periodic_task', this will be run on every tick - of the periodic scheduler. - - 2. With arguments, @periodic_task(ticks_between_runs=N), this will be - run on every N ticks of the periodic scheduler. - """ - def decorator(f): - f._periodic_task = True - f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0) - return f - - # NOTE(sirp): The `if` is necessary to allow the decorator to be used with - # and without parens. - # - # In the 'with-parens' case (with kwargs present), this function needs to - # return a decorator function since the interpreter will invoke it like: - # - # periodic_task(*args, **kwargs)(f) - # - # In the 'without-parens' case, the original function will be passed - # in as the first argument, like: - # - # periodic_task(f) - if kwargs: - return decorator - else: - return decorator(args[0]) - - -class ManagerMeta(type): - def __init__(cls, names, bases, dict_): - """Metaclass that allows us to collect decorated periodic tasks.""" - super(ManagerMeta, cls).__init__(names, bases, dict_) - - # NOTE(sirp): if the attribute is not present then we must be the base - # class, so, go ahead an initialize it. If the attribute is present, - # then we're a subclass so make a copy of it so we don't step on our - # parent's toes. - try: - cls._periodic_tasks = cls._periodic_tasks[:] - except AttributeError: - cls._periodic_tasks = [] - - try: - cls._ticks_to_skip = cls._ticks_to_skip.copy() - except AttributeError: - cls._ticks_to_skip = {} - - for value in cls.__dict__.values(): - if getattr(value, '_periodic_task', False): - task = value - name = task.__name__ - cls._periodic_tasks.append((name, task)) - cls._ticks_to_skip[name] = task._ticks_between_runs - - -class Manager(base.Base): - __metaclass__ = ManagerMeta - +class Manager(base.Base, periodic_task.PeriodicTasks): # Set RPC API version to 1.0 by default. RPC_API_VERSION = '1.0' def __init__(self, host=None, db_driver=None): if not host: - host = FLAGS.host + host = CONF.host self.host = host super(Manager, self).__init__(db_driver) @@ -150,26 +86,7 @@ def create_rpc_dispatcher(self): def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" - for task_name, task in self._periodic_tasks: - full_task_name = '.'.join([self.__class__.__name__, task_name]) - - ticks_to_skip = self._ticks_to_skip[task_name] - if ticks_to_skip > 0: - LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s" - " ticks left until next run"), locals()) - self._ticks_to_skip[task_name] -= 1 - continue - - self._ticks_to_skip[task_name] = task._ticks_between_runs - LOG.debug(_("Running periodic task %(full_task_name)s"), locals()) - - try: - task(self, context) - except Exception as e: - if raise_on_error: - raise - LOG.exception(_("Error during %(full_task_name)s: %(e)s"), - locals()) + return self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self): """Handle initialization if this is a standalone service. @@ -184,8 +101,8 @@ def service_version(self, context): def service_config(self, context): config = {} - for key in FLAGS: - config[key] = FLAGS.get(key, None) + for key in CONF: + config[key] = CONF.get(key, None) return config @@ -209,10 +126,13 @@ def update_service_capabilities(self, capabilities): """Remember these capabilities to send on next periodic update.""" self.last_capabilities = capabilities - @periodic_task + @periodic_task.periodic_task def _publish_service_capabilities(self, context): """Pass data back to the scheduler at a periodic interval.""" if self.last_capabilities: LOG.debug(_('Notifying Schedulers of capabilities ...')) - self.scheduler_rpcapi.update_service_capabilities(context, - self.service_name, self.host, self.last_capabilities) + self.scheduler_rpcapi.update_service_capabilities( + context, + self.service_name, + self.host, + self.last_capabilities) diff --git a/cinder/openstack/common/cfg.py b/cinder/openstack/common/cfg.py deleted file mode 100644 index c55eda60ef..0000000000 --- a/cinder/openstack/common/cfg.py +++ /dev/null @@ -1,1653 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -r""" -Configuration options which may be set on the command line or in config files. - -The schema for each option is defined using the Opt sub-classes, e.g.: - -:: - - common_opts = [ - cfg.StrOpt('bind_host', - default='0.0.0.0', - help='IP address to listen on'), - cfg.IntOpt('bind_port', - default=9292, - help='Port number to listen on') - ] - -Options can be strings, integers, floats, booleans, lists or 'multi strings':: - - enabled_apis_opt = cfg.ListOpt('enabled_apis', - default=['ec2', 'osapi_compute'], - help='List of APIs to enable by default') - - DEFAULT_EXTENSIONS = [ - 'nova.api.openstack.compute.contrib.standard_extensions' - ] - osapi_compute_extension_opt = cfg.MultiStrOpt('osapi_compute_extension', - default=DEFAULT_EXTENSIONS) - -Option schemas are registered with the config manager at runtime, but before -the option is referenced:: - - class ExtensionManager(object): - - enabled_apis_opt = cfg.ListOpt(...) - - def __init__(self, conf): - self.conf = conf - self.conf.register_opt(enabled_apis_opt) - ... - - def _load_extensions(self): - for ext_factory in self.conf.osapi_compute_extension: - .... - -A common usage pattern is for each option schema to be defined in the module or -class which uses the option:: - - opts = ... - - def add_common_opts(conf): - conf.register_opts(opts) - - def get_bind_host(conf): - return conf.bind_host - - def get_bind_port(conf): - return conf.bind_port - -An option may optionally be made available via the command line. Such options -must registered with the config manager before the command line is parsed (for -the purposes of --help and CLI arg validation):: - - cli_opts = [ - cfg.BoolOpt('verbose', - short='v', - default=False, - help='Print more verbose output'), - cfg.BoolOpt('debug', - short='d', - default=False, - help='Print debugging output'), - ] - - def add_common_opts(conf): - conf.register_cli_opts(cli_opts) - -The config manager has two CLI options defined by default, --config-file -and --config-dir:: - - class ConfigOpts(object): - - def __call__(self, ...): - - opts = [ - MultiStrOpt('config-file', - ...), - StrOpt('config-dir', - ...), - ] - - self.register_cli_opts(opts) - -Option values are parsed from any supplied config files using -openstack.common.iniparser. If none are specified, a default set is used -e.g. glance-api.conf and glance-common.conf:: - - glance-api.conf: - [DEFAULT] - bind_port = 9292 - - glance-common.conf: - [DEFAULT] - bind_host = 0.0.0.0 - -Option values in config files override those on the command line. Config files -are parsed in order, with values in later files overriding those in earlier -files. - -The parsing of CLI args and config files is initiated by invoking the config -manager e.g.:: - - conf = ConfigOpts() - conf.register_opt(BoolOpt('verbose', ...)) - conf(sys.argv[1:]) - if conf.verbose: - ... - -Options can be registered as belonging to a group:: - - rabbit_group = cfg.OptGroup(name='rabbit', - title='RabbitMQ options') - - rabbit_host_opt = cfg.StrOpt('host', - default='localhost', - help='IP/hostname to listen on'), - rabbit_port_opt = cfg.IntOpt('port', - default=5672, - help='Port number to listen on') - - def register_rabbit_opts(conf): - conf.register_group(rabbit_group) - # options can be registered under a group in either of these ways: - conf.register_opt(rabbit_host_opt, group=rabbit_group) - conf.register_opt(rabbit_port_opt, group='rabbit') - -If it no group attributes are required other than the group name, the group -need not be explicitly registered e.g. - - def register_rabbit_opts(conf): - # The group will automatically be created, equivalent calling:: - # conf.register_group(OptGroup(name='rabbit')) - conf.register_opt(rabbit_port_opt, group='rabbit') - -If no group is specified, options belong to the 'DEFAULT' section of config -files:: - - glance-api.conf: - [DEFAULT] - bind_port = 9292 - ... - - [rabbit] - host = localhost - port = 5672 - use_ssl = False - userid = guest - password = guest - virtual_host = / - -Command-line options in a group are automatically prefixed with the -group name:: - - --rabbit-host localhost --rabbit-port 9999 - -Option values in the default group are referenced as attributes/properties on -the config manager; groups are also attributes on the config manager, with -attributes for each of the options associated with the group:: - - server.start(app, conf.bind_port, conf.bind_host, conf) - - self.connection = kombu.connection.BrokerConnection( - hostname=conf.rabbit.host, - port=conf.rabbit.port, - ...) - -Option values may reference other values using PEP 292 string substitution:: - - opts = [ - cfg.StrOpt('state_path', - default=os.path.join(os.path.dirname(__file__), '../'), - help='Top-level directory for maintaining nova state'), - cfg.StrOpt('sqlite_db', - default='nova.sqlite', - help='file name for sqlite'), - cfg.StrOpt('sql_connection', - default='sqlite:///$state_path/$sqlite_db', - help='connection string for sql database'), - ] - -Note that interpolation can be avoided by using '$$'. - -For command line utilities that dispatch to other command line utilities, the -disable_interspersed_args() method is available. If this this method is called, -then parsing e.g.:: - - script --verbose cmd --debug /tmp/mything - -will no longer return:: - - ['cmd', '/tmp/mything'] - -as the leftover arguments, but will instead return:: - - ['cmd', '--debug', '/tmp/mything'] - -i.e. argument parsing is stopped at the first non-option argument. - -Options may be declared as required so that an error is raised if the user -does not supply a value for the option. - -Options may be declared as secret so that their values are not leaked into -log files: - - opts = [ - cfg.StrOpt('s3_store_access_key', secret=True), - cfg.StrOpt('s3_store_secret_key', secret=True), - ... - ] - -This module also contains a global instance of the CommonConfigOpts class -in order to support a common usage pattern in OpenStack: - - from openstack.common import cfg - - opts = [ - cfg.StrOpt('bind_host' default='0.0.0.0'), - cfg.IntOpt('bind_port', default=9292), - ] - - CONF = cfg.CONF - CONF.register_opts(opts) - - def start(server, app): - server.start(app, CONF.bind_port, CONF.bind_host) - -""" - -import collections -import copy -import functools -import glob -import optparse -import os -import string -import sys - -from cinder.openstack.common import iniparser - - -class Error(Exception): - """Base class for cfg exceptions.""" - - def __init__(self, msg=None): - self.msg = msg - - def __str__(self): - return self.msg - - -class ArgsAlreadyParsedError(Error): - """Raised if a CLI opt is registered after parsing.""" - - def __str__(self): - ret = "arguments already parsed" - if self.msg: - ret += ": " + self.msg - return ret - - -class NoSuchOptError(Error, AttributeError): - """Raised if an opt which doesn't exist is referenced.""" - - def __init__(self, opt_name, group=None): - self.opt_name = opt_name - self.group = group - - def __str__(self): - if self.group is None: - return "no such option: %s" % self.opt_name - else: - return "no such option in group %s: %s" % (self.group.name, - self.opt_name) - - -class NoSuchGroupError(Error): - """Raised if a group which doesn't exist is referenced.""" - - def __init__(self, group_name): - self.group_name = group_name - - def __str__(self): - return "no such group: %s" % self.group_name - - -class DuplicateOptError(Error): - """Raised if multiple opts with the same name are registered.""" - - def __init__(self, opt_name): - self.opt_name = opt_name - - def __str__(self): - return "duplicate option: %s" % self.opt_name - - -class RequiredOptError(Error): - """Raised if an option is required but no value is supplied by the user.""" - - def __init__(self, opt_name, group=None): - self.opt_name = opt_name - self.group = group - - def __str__(self): - if self.group is None: - return "value required for option: %s" % self.opt_name - else: - return "value required for option: %s.%s" % (self.group.name, - self.opt_name) - - -class TemplateSubstitutionError(Error): - """Raised if an error occurs substituting a variable in an opt value.""" - - def __str__(self): - return "template substitution error: %s" % self.msg - - -class ConfigFilesNotFoundError(Error): - """Raised if one or more config files are not found.""" - - def __init__(self, config_files): - self.config_files = config_files - - def __str__(self): - return ('Failed to read some config files: %s' % - string.join(self.config_files, ',')) - - -class ConfigFileParseError(Error): - """Raised if there is an error parsing a config file.""" - - def __init__(self, config_file, msg): - self.config_file = config_file - self.msg = msg - - def __str__(self): - return 'Failed to parse %s: %s' % (self.config_file, self.msg) - - -class ConfigFileValueError(Error): - """Raised if a config file value does not match its opt type.""" - pass - - -def _fixpath(p): - """Apply tilde expansion and absolutization to a path.""" - return os.path.abspath(os.path.expanduser(p)) - - -def _get_config_dirs(project=None): - """Return a list of directors where config files may be located. - - :param project: an optional project name - - If a project is specified, following directories are returned:: - - ~/.${project}/ - ~/ - /etc/${project}/ - /etc/ - - Otherwise, these directories:: - - ~/ - /etc/ - """ - cfg_dirs = [ - _fixpath(os.path.join('~', '.' + project)) if project else None, - _fixpath('~'), - os.path.join('/etc', project) if project else None, - '/etc' - ] - - return filter(bool, cfg_dirs) - - -def _search_dirs(dirs, basename, extension=""): - """Search a list of directories for a given filename. - - Iterator over the supplied directories, returning the first file - found with the supplied name and extension. - - :param dirs: a list of directories - :param basename: the filename, e.g. 'glance-api' - :param extension: the file extension, e.g. '.conf' - :returns: the path to a matching file, or None - """ - for d in dirs: - path = os.path.join(d, '%s%s' % (basename, extension)) - if os.path.exists(path): - return path - - -def find_config_files(project=None, prog=None, extension='.conf'): - """Return a list of default configuration files. - - :param project: an optional project name - :param prog: the program name, defaulting to the basename of sys.argv[0] - :param extension: the type of the config file - - We default to two config files: [${project}.conf, ${prog}.conf] - - And we look for those config files in the following directories:: - - ~/.${project}/ - ~/ - /etc/${project}/ - /etc/ - - We return an absolute path for (at most) one of each the default config - files, for the topmost directory it exists in. - - For example, if project=foo, prog=bar and /etc/foo/foo.conf, /etc/bar.conf - and ~/.foo/bar.conf all exist, then we return ['/etc/foo/foo.conf', - '~/.foo/bar.conf'] - - If no project name is supplied, we only look for ${prog.conf}. - """ - if prog is None: - prog = os.path.basename(sys.argv[0]) - - cfg_dirs = _get_config_dirs(project) - - config_files = [] - if project: - config_files.append(_search_dirs(cfg_dirs, project, extension)) - config_files.append(_search_dirs(cfg_dirs, prog, extension)) - - return filter(bool, config_files) - - -def _is_opt_registered(opts, opt): - """Check whether an opt with the same name is already registered. - - The same opt may be registered multiple times, with only the first - registration having any effect. However, it is an error to attempt - to register a different opt with the same name. - - :param opts: the set of opts already registered - :param opt: the opt to be registered - :returns: True if the opt was previously registered, False otherwise - :raises: DuplicateOptError if a naming conflict is detected - """ - if opt.dest in opts: - if opts[opt.dest]['opt'] != opt: - raise DuplicateOptError(opt.name) - return True - else: - return False - - -class Opt(object): - - """Base class for all configuration options. - - An Opt object has no public methods, but has a number of public string - properties: - - name: - the name of the option, which may include hyphens - dest: - the (hyphen-less) ConfigOpts property which contains the option value - short: - a single character CLI option name - default: - the default value of the option - metavar: - the name shown as the argument to a CLI option in --help output - help: - an string explaining how the options value is used - """ - multi = False - - def __init__(self, name, dest=None, short=None, default=None, - metavar=None, help=None, secret=False, required=False, - deprecated_name=None): - """Construct an Opt object. - - The only required parameter is the option's name. However, it is - common to also supply a default and help string for all options. - - :param name: the option's name - :param dest: the name of the corresponding ConfigOpts property - :param short: a single character CLI option name - :param default: the default value of the option - :param metavar: the option argument to show in --help - :param help: an explanation of how the option is used - :param secret: true iff the value should be obfuscated in log output - :param required: true iff a value must be supplied for this option - :param deprecated_name: deprecated name option. Acts like an alias - """ - self.name = name - if dest is None: - self.dest = self.name.replace('-', '_') - else: - self.dest = dest - self.short = short - self.default = default - self.metavar = metavar - self.help = help - self.secret = secret - self.required = required - if deprecated_name is not None: - self.deprecated_name = deprecated_name.replace('-', '_') - else: - self.deprecated_name = None - - def __ne__(self, another): - return vars(self) != vars(another) - - def _get_from_config_parser(self, cparser, section): - """Retrieves the option value from a MultiConfigParser object. - - This is the method ConfigOpts uses to look up the option value from - config files. Most opt types override this method in order to perform - type appropriate conversion of the returned value. - - :param cparser: a ConfigParser object - :param section: a section name - """ - return self._cparser_get_with_deprecated(cparser, section) - - def _cparser_get_with_deprecated(self, cparser, section): - """If cannot find option as dest try deprecated_name alias.""" - if self.deprecated_name is not None: - return cparser.get(section, [self.dest, self.deprecated_name]) - return cparser.get(section, [self.dest]) - - def _add_to_cli(self, parser, group=None): - """Makes the option available in the command line interface. - - This is the method ConfigOpts uses to add the opt to the CLI interface - as appropriate for the opt type. Some opt types may extend this method, - others may just extend the helper methods it uses. - - :param parser: the CLI option parser - :param group: an optional OptGroup object - """ - container = self._get_optparse_container(parser, group) - kwargs = self._get_optparse_kwargs(group) - prefix = self._get_optparse_prefix('', group) - self._add_to_optparse(container, self.name, self.short, kwargs, prefix, - self.deprecated_name) - - def _add_to_optparse(self, container, name, short, kwargs, prefix='', - deprecated_name=None): - """Add an option to an optparse parser or group. - - :param container: an optparse.OptionContainer object - :param name: the opt name - :param short: the short opt name - :param kwargs: the keyword arguments for add_option() - :param prefix: an optional prefix to prepend to the opt name - :raises: DuplicateOptError if a naming confict is detected - """ - args = ['--' + prefix + name] - if short: - args += ['-' + short] - if deprecated_name: - args += ['--' + prefix + deprecated_name] - for a in args: - if container.has_option(a): - raise DuplicateOptError(a) - container.add_option(*args, **kwargs) - - def _get_optparse_container(self, parser, group): - """Returns an optparse.OptionContainer. - - :param parser: an optparse.OptionParser - :param group: an (optional) OptGroup object - :returns: an optparse.OptionGroup if a group is given, else the parser - """ - if group is not None: - return group._get_optparse_group(parser) - else: - return parser - - def _get_optparse_kwargs(self, group, **kwargs): - """Build a dict of keyword arguments for optparse's add_option(). - - Most opt types extend this method to customize the behaviour of the - options added to optparse. - - :param group: an optional group - :param kwargs: optional keyword arguments to add to - :returns: a dict of keyword arguments - """ - dest = self.dest - if group is not None: - dest = group.name + '_' + dest - kwargs.update({'dest': dest, - 'metavar': self.metavar, - 'help': self.help, }) - return kwargs - - def _get_optparse_prefix(self, prefix, group): - """Build a prefix for the CLI option name, if required. - - CLI options in a group are prefixed with the group's name in order - to avoid conflicts between similarly named options in different - groups. - - :param prefix: an existing prefix to append to (e.g. 'no' or '') - :param group: an optional OptGroup object - :returns: a CLI option prefix including the group name, if appropriate - """ - if group is not None: - return group.name + '-' + prefix - else: - return prefix - - -class StrOpt(Opt): - """ - String opts do not have their values transformed and are returned as - str objects. - """ - pass - - -class BoolOpt(Opt): - - """ - Bool opts are set to True or False on the command line using --optname or - --noopttname respectively. - - In config files, boolean values are case insensitive and can be set using - 1/0, yes/no, true/false or on/off. - """ - - _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, - '0': False, 'no': False, 'false': False, 'off': False} - - def _get_from_config_parser(self, cparser, section): - """Retrieve the opt value as a boolean from ConfigParser.""" - def convert_bool(v): - value = self._boolean_states.get(v.lower()) - if value is None: - raise ValueError('Unexpected boolean value %r' % v) - - return value - - return [convert_bool(v) for v in - self._cparser_get_with_deprecated(cparser, section)] - - def _add_to_cli(self, parser, group=None): - """Extends the base class method to add the --nooptname option.""" - super(BoolOpt, self)._add_to_cli(parser, group) - self._add_inverse_to_optparse(parser, group) - - def _add_inverse_to_optparse(self, parser, group): - """Add the --nooptname option to the option parser.""" - container = self._get_optparse_container(parser, group) - kwargs = self._get_optparse_kwargs(group, action='store_false') - prefix = self._get_optparse_prefix('no', group) - kwargs["help"] = "The inverse of --" + self.name - self._add_to_optparse(container, self.name, None, kwargs, prefix, - self.deprecated_name) - - def _get_optparse_kwargs(self, group, action='store_true', **kwargs): - """Extends the base optparse keyword dict for boolean options.""" - return super(BoolOpt, - self)._get_optparse_kwargs(group, action=action, **kwargs) - - -class IntOpt(Opt): - - """Int opt values are converted to integers using the int() builtin.""" - - def _get_from_config_parser(self, cparser, section): - """Retrieve the opt value as a integer from ConfigParser.""" - return [int(v) for v in self._cparser_get_with_deprecated(cparser, - section)] - - def _get_optparse_kwargs(self, group, **kwargs): - """Extends the base optparse keyword dict for integer options.""" - return super(IntOpt, - self)._get_optparse_kwargs(group, type='int', **kwargs) - - -class FloatOpt(Opt): - - """Float opt values are converted to floats using the float() builtin.""" - - def _get_from_config_parser(self, cparser, section): - """Retrieve the opt value as a float from ConfigParser.""" - return [float(v) for v in - self._cparser_get_with_deprecated(cparser, section)] - - def _get_optparse_kwargs(self, group, **kwargs): - """Extends the base optparse keyword dict for float options.""" - return super(FloatOpt, - self)._get_optparse_kwargs(group, type='float', **kwargs) - - -class ListOpt(Opt): - - """ - List opt values are simple string values separated by commas. The opt value - is a list containing these strings. - """ - - def _get_from_config_parser(self, cparser, section): - """Retrieve the opt value as a list from ConfigParser.""" - return [v.split(',') for v in - self._cparser_get_with_deprecated(cparser, section)] - - def _get_optparse_kwargs(self, group, **kwargs): - """Extends the base optparse keyword dict for list options.""" - return super(ListOpt, - self)._get_optparse_kwargs(group, - type='string', - action='callback', - callback=self._parse_list, - **kwargs) - - def _parse_list(self, option, opt, value, parser): - """An optparse callback for parsing an option value into a list.""" - setattr(parser.values, self.dest, value.split(',')) - - -class MultiStrOpt(Opt): - - """ - Multistr opt values are string opts which may be specified multiple times. - The opt value is a list containing all the string values specified. - """ - multi = True - - def _get_optparse_kwargs(self, group, **kwargs): - """Extends the base optparse keyword dict for multi str options.""" - return super(MultiStrOpt, - self)._get_optparse_kwargs(group, action='append') - - def _cparser_get_with_deprecated(self, cparser, section): - """If cannot find option as dest try deprecated_name alias.""" - if self.deprecated_name is not None: - return cparser.get(section, [self.dest, self.deprecated_name], - multi=True) - return cparser.get(section, [self.dest], multi=True) - - -class OptGroup(object): - - """ - Represents a group of opts. - - CLI opts in the group are automatically prefixed with the group name. - - Each group corresponds to a section in config files. - - An OptGroup object has no public methods, but has a number of public string - properties: - - name: - the name of the group - title: - the group title as displayed in --help - help: - the group description as displayed in --help - """ - - def __init__(self, name, title=None, help=None): - """Constructs an OptGroup object. - - :param name: the group name - :param title: the group title for --help - :param help: the group description for --help - """ - self.name = name - if title is None: - self.title = "%s options" % title - else: - self.title = title - self.help = help - - self._opts = {} # dict of dicts of (opt:, override:, default:) - self._optparse_group = None - - def _register_opt(self, opt): - """Add an opt to this group. - - :param opt: an Opt object - :returns: False if previously registered, True otherwise - :raises: DuplicateOptError if a naming conflict is detected - """ - if _is_opt_registered(self._opts, opt): - return False - - self._opts[opt.dest] = {'opt': opt} - - return True - - def _unregister_opt(self, opt): - """Remove an opt from this group. - - :param opt: an Opt object - """ - if opt.dest in self._opts: - del self._opts[opt.dest] - - def _get_optparse_group(self, parser): - """Build an optparse.OptionGroup for this group.""" - if self._optparse_group is None: - self._optparse_group = optparse.OptionGroup(parser, self.title, - self.help) - return self._optparse_group - - def _clear(self): - """Clear this group's option parsing state.""" - self._optparse_group = None - - -class ParseError(iniparser.ParseError): - def __init__(self, msg, lineno, line, filename): - super(ParseError, self).__init__(msg, lineno, line) - self.filename = filename - - def __str__(self): - return 'at %s:%d, %s: %r' % (self.filename, self.lineno, - self.msg, self.line) - - -class ConfigParser(iniparser.BaseParser): - def __init__(self, filename, sections): - super(ConfigParser, self).__init__() - self.filename = filename - self.sections = sections - self.section = None - - def parse(self): - with open(self.filename) as f: - return super(ConfigParser, self).parse(f) - - def new_section(self, section): - self.section = section - self.sections.setdefault(self.section, {}) - - def assignment(self, key, value): - if not self.section: - raise self.error_no_section() - - self.sections[self.section].setdefault(key, []) - self.sections[self.section][key].append('\n'.join(value)) - - def parse_exc(self, msg, lineno, line=None): - return ParseError(msg, lineno, line, self.filename) - - def error_no_section(self): - return self.parse_exc('Section must be started before assignment', - self.lineno) - - -class MultiConfigParser(object): - def __init__(self): - self.parsed = [] - - def read(self, config_files): - read_ok = [] - - for filename in config_files: - sections = {} - parser = ConfigParser(filename, sections) - - try: - parser.parse() - except IOError: - continue - self.parsed.insert(0, sections) - read_ok.append(filename) - - return read_ok - - def get(self, section, names, multi=False): - rvalue = [] - for sections in self.parsed: - if section not in sections: - continue - for name in names: - if name in sections[section]: - if multi: - rvalue = sections[section][name] + rvalue - else: - return sections[section][name] - if multi and rvalue != []: - return rvalue - raise KeyError - - -class ConfigOpts(collections.Mapping): - - """ - Config options which may be set on the command line or in config files. - - ConfigOpts is a configuration option manager with APIs for registering - option schemas, grouping options, parsing option values and retrieving - the values of options. - """ - - def __init__(self): - """Construct a ConfigOpts object.""" - self._opts = {} # dict of dicts of (opt:, override:, default:) - self._groups = {} - - self._args = None - self._oparser = None - self._cparser = None - self._cli_values = {} - self.__cache = {} - self._config_opts = [] - self._disable_interspersed_args = False - - def _setup(self, project, prog, version, usage, default_config_files): - """Initialize a ConfigOpts object for option parsing.""" - if prog is None: - prog = os.path.basename(sys.argv[0]) - - if default_config_files is None: - default_config_files = find_config_files(project, prog) - - self._oparser = optparse.OptionParser(prog=prog, - version=version, - usage=usage) - if self._disable_interspersed_args: - self._oparser.disable_interspersed_args() - - self._config_opts = [ - MultiStrOpt('config-file', - default=default_config_files, - metavar='PATH', - help='Path to a config file to use. Multiple config ' - 'files can be specified, with values in later ' - 'files taking precedence. The default files ' - ' used are: %s' % (default_config_files, )), - StrOpt('config-dir', - metavar='DIR', - help='Path to a config directory to pull *.conf ' - 'files from. This file set is sorted, so as to ' - 'provide a predictable parse order if individual ' - 'options are over-ridden. The set is parsed after ' - 'the file(s), if any, specified via --config-file, ' - 'hence over-ridden options in the directory take ' - 'precedence.'), - ] - self.register_cli_opts(self._config_opts) - - self.project = project - self.prog = prog - self.version = version - self.usage = usage - self.default_config_files = default_config_files - - def __clear_cache(f): - @functools.wraps(f) - def __inner(self, *args, **kwargs): - if kwargs.pop('clear_cache', True): - self.__cache.clear() - return f(self, *args, **kwargs) - - return __inner - - def __call__(self, - args=None, - project=None, - prog=None, - version=None, - usage=None, - default_config_files=None): - """Parse command line arguments and config files. - - Calling a ConfigOpts object causes the supplied command line arguments - and config files to be parsed, causing opt values to be made available - as attributes of the object. - - The object may be called multiple times, each time causing the previous - set of values to be overwritten. - - Automatically registers the --config-file option with either a supplied - list of default config files, or a list from find_config_files(). - - If the --config-dir option is set, any *.conf files from this - directory are pulled in, after all the file(s) specified by the - --config-file option. - - :param args: command line arguments (defaults to sys.argv[1:]) - :param project: the toplevel project name, used to locate config files - :param prog: the name of the program (defaults to sys.argv[0] basename) - :param version: the program version (for --version) - :param usage: a usage string (%prog will be expanded) - :param default_config_files: config files to use by default - :returns: the list of arguments left over after parsing options - :raises: SystemExit, ConfigFilesNotFoundError, ConfigFileParseError, - RequiredOptError, DuplicateOptError - """ - self.clear() - - self._setup(project, prog, version, usage, default_config_files) - - self._cli_values, leftovers = self._parse_cli_opts(args) - - self._parse_config_files() - - self._check_required_opts() - - return leftovers - - def __getattr__(self, name): - """Look up an option value and perform string substitution. - - :param name: the opt name (or 'dest', more precisely) - :returns: the option value (after string subsititution) or a GroupAttr - :raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError - """ - return self._get(name) - - def __getitem__(self, key): - """Look up an option value and perform string substitution.""" - return self.__getattr__(key) - - def __contains__(self, key): - """Return True if key is the name of a registered opt or group.""" - return key in self._opts or key in self._groups - - def __iter__(self): - """Iterate over all registered opt and group names.""" - for key in self._opts.keys() + self._groups.keys(): - yield key - - def __len__(self): - """Return the number of options and option groups.""" - return len(self._opts) + len(self._groups) - - def reset(self): - """Clear the object state and unset overrides and defaults.""" - self._unset_defaults_and_overrides() - self.clear() - - @__clear_cache - def clear(self): - """Clear the state of the object to before it was called.""" - self._args = None - self._cli_values.clear() - self._oparser = None - self._cparser = None - self.unregister_opts(self._config_opts) - for group in self._groups.values(): - group._clear() - - @__clear_cache - def register_opt(self, opt, group=None): - """Register an option schema. - - Registering an option schema makes any option value which is previously - or subsequently parsed from the command line or config files available - as an attribute of this object. - - :param opt: an instance of an Opt sub-class - :param group: an optional OptGroup object or group name - :return: False if the opt was already register, True otherwise - :raises: DuplicateOptError - """ - if group is not None: - return self._get_group(group, autocreate=True)._register_opt(opt) - - if _is_opt_registered(self._opts, opt): - return False - - self._opts[opt.dest] = {'opt': opt} - - return True - - @__clear_cache - def register_opts(self, opts, group=None): - """Register multiple option schemas at once.""" - for opt in opts: - self.register_opt(opt, group, clear_cache=False) - - @__clear_cache - def register_cli_opt(self, opt, group=None): - """Register a CLI option schema. - - CLI option schemas must be registered before the command line and - config files are parsed. This is to ensure that all CLI options are - show in --help and option validation works as expected. - - :param opt: an instance of an Opt sub-class - :param group: an optional OptGroup object or group name - :return: False if the opt was already register, True otherwise - :raises: DuplicateOptError, ArgsAlreadyParsedError - """ - if self._args is not None: - raise ArgsAlreadyParsedError("cannot register CLI option") - - return self.register_opt(opt, group, clear_cache=False) - - @__clear_cache - def register_cli_opts(self, opts, group=None): - """Register multiple CLI option schemas at once.""" - for opt in opts: - self.register_cli_opt(opt, group, clear_cache=False) - - def register_group(self, group): - """Register an option group. - - An option group must be registered before options can be registered - with the group. - - :param group: an OptGroup object - """ - if group.name in self._groups: - return - - self._groups[group.name] = copy.copy(group) - - @__clear_cache - def unregister_opt(self, opt, group=None): - """Unregister an option. - - :param opt: an Opt object - :param group: an optional OptGroup object or group name - :raises: ArgsAlreadyParsedError, NoSuchGroupError - """ - if self._args is not None: - raise ArgsAlreadyParsedError("reset before unregistering options") - - if group is not None: - self._get_group(group)._unregister_opt(opt) - elif opt.dest in self._opts: - del self._opts[opt.dest] - - @__clear_cache - def unregister_opts(self, opts, group=None): - """Unregister multiple CLI option schemas at once.""" - for opt in opts: - self.unregister_opt(opt, group, clear_cache=False) - - def import_opt(self, name, module_str, group=None): - """Import an option definition from a module. - - Import a module and check that a given option is registered. - - This is intended for use with global configuration objects - like cfg.CONF where modules commonly register options with - CONF at module load time. If one module requires an option - defined by another module it can use this method to explicitly - declare the dependency. - - :param name: the name/dest of the opt - :param module_str: the name of a module to import - :param group: an option OptGroup object or group name - :raises: NoSuchOptError, NoSuchGroupError - """ - __import__(module_str) - self._get_opt_info(name, group) - - @__clear_cache - def set_override(self, name, override, group=None): - """Override an opt value. - - Override the command line, config file and default values of a - given option. - - :param name: the name/dest of the opt - :param override: the override value - :param group: an option OptGroup object or group name - :raises: NoSuchOptError, NoSuchGroupError - """ - opt_info = self._get_opt_info(name, group) - opt_info['override'] = override - - @__clear_cache - def set_default(self, name, default, group=None): - """Override an opt's default value. - - Override the default value of given option. A command line or - config file value will still take precedence over this default. - - :param name: the name/dest of the opt - :param default: the default value - :param group: an option OptGroup object or group name - :raises: NoSuchOptError, NoSuchGroupError - """ - opt_info = self._get_opt_info(name, group) - opt_info['default'] = default - - @__clear_cache - def clear_override(self, name, group=None): - """Clear an override an opt value. - - Clear a previously set override of the command line, config file - and default values of a given option. - - :param name: the name/dest of the opt - :param group: an option OptGroup object or group name - :raises: NoSuchOptError, NoSuchGroupError - """ - opt_info = self._get_opt_info(name, group) - opt_info.pop('override', None) - - @__clear_cache - def clear_default(self, name, group=None): - """Clear an override an opt's default value. - - Clear a previously set override of the default value of given option. - - :param name: the name/dest of the opt - :param group: an option OptGroup object or group name - :raises: NoSuchOptError, NoSuchGroupError - """ - opt_info = self._get_opt_info(name, group) - opt_info.pop('default', None) - - def _all_opt_infos(self): - """A generator function for iteration opt infos.""" - for info in self._opts.values(): - yield info, None - for group in self._groups.values(): - for info in group._opts.values(): - yield info, group - - def _all_opts(self): - """A generator function for iteration opts.""" - for info, group in self._all_opt_infos(): - yield info['opt'], group - - def _unset_defaults_and_overrides(self): - """Unset any default or override on all options.""" - for info, group in self._all_opt_infos(): - info.pop('default', None) - info.pop('override', None) - - def disable_interspersed_args(self): - """Set parsing to stop on the first non-option. - - If this this method is called, then parsing e.g. - - script --verbose cmd --debug /tmp/mything - - will no longer return: - - ['cmd', '/tmp/mything'] - - as the leftover arguments, but will instead return: - - ['cmd', '--debug', '/tmp/mything'] - - i.e. argument parsing is stopped at the first non-option argument. - """ - self._disable_interspersed_args = True - - def enable_interspersed_args(self): - """Set parsing to not stop on the first non-option. - - This it the default behaviour.""" - self._disable_interspersed_args = False - - def find_file(self, name): - """Locate a file located alongside the config files. - - Search for a file with the supplied basename in the directories - which we have already loaded config files from and other known - configuration directories. - - The directory, if any, supplied by the config_dir option is - searched first. Then the config_file option is iterated over - and each of the base directories of the config_files values - are searched. Failing both of these, the standard directories - searched by the module level find_config_files() function is - used. The first matching file is returned. - - :param basename: the filename, e.g. 'policy.json' - :returns: the path to a matching file, or None - """ - dirs = [] - if self.config_dir: - dirs.append(_fixpath(self.config_dir)) - - for cf in reversed(self.config_file): - dirs.append(os.path.dirname(_fixpath(cf))) - - dirs.extend(_get_config_dirs(self.project)) - - return _search_dirs(dirs, name) - - def log_opt_values(self, logger, lvl): - """Log the value of all registered opts. - - It's often useful for an app to log its configuration to a log file at - startup for debugging. This method dumps to the entire config state to - the supplied logger at a given log level. - - :param logger: a logging.Logger object - :param lvl: the log level (e.g. logging.DEBUG) arg to logger.log() - """ - logger.log(lvl, "*" * 80) - logger.log(lvl, "Configuration options gathered from:") - logger.log(lvl, "command line args: %s", self._args) - logger.log(lvl, "config files: %s", self.config_file) - logger.log(lvl, "=" * 80) - - def _sanitize(opt, value): - """Obfuscate values of options declared secret""" - return value if not opt.secret else '*' * len(str(value)) - - for opt_name in sorted(self._opts): - opt = self._get_opt_info(opt_name)['opt'] - logger.log(lvl, "%-30s = %s", opt_name, - _sanitize(opt, getattr(self, opt_name))) - - for group_name in self._groups: - group_attr = self.GroupAttr(self, self._get_group(group_name)) - for opt_name in sorted(self._groups[group_name]._opts): - opt = self._get_opt_info(opt_name, group_name)['opt'] - logger.log(lvl, "%-30s = %s", - "%s.%s" % (group_name, opt_name), - _sanitize(opt, getattr(group_attr, opt_name))) - - logger.log(lvl, "*" * 80) - - def print_usage(self, file=None): - """Print the usage message for the current program.""" - self._oparser.print_usage(file) - - def print_help(self, file=None): - """Print the help message for the current program.""" - self._oparser.print_help(file) - - def _get(self, name, group=None): - if isinstance(group, OptGroup): - key = (group.name, name) - else: - key = (group, name) - try: - return self.__cache[key] - except KeyError: - value = self._substitute(self._do_get(name, group)) - self.__cache[key] = value - return value - - def _do_get(self, name, group=None): - """Look up an option value. - - :param name: the opt name (or 'dest', more precisely) - :param group: an OptGroup - :returns: the option value, or a GroupAttr object - :raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError, - TemplateSubstitutionError - """ - if group is None and name in self._groups: - return self.GroupAttr(self, self._get_group(name)) - - info = self._get_opt_info(name, group) - opt = info['opt'] - - if 'override' in info: - return info['override'] - - values = [] - if self._cparser is not None: - section = group.name if group is not None else 'DEFAULT' - try: - value = opt._get_from_config_parser(self._cparser, section) - except KeyError: - pass - except ValueError as ve: - raise ConfigFileValueError(str(ve)) - else: - if not opt.multi: - # No need to continue since the last value wins - return value[-1] - values.extend(value) - - name = name if group is None else group.name + '_' + name - value = self._cli_values.get(name) - if value is not None: - if not opt.multi: - return value - - return value + values - - if values: - return values - - if 'default' in info: - return info['default'] - - return opt.default - - def _substitute(self, value): - """Perform string template substitution. - - Substitute any template variables (e.g. $foo, ${bar}) in the supplied - string value(s) with opt values. - - :param value: the string value, or list of string values - :returns: the substituted string(s) - """ - if isinstance(value, list): - return [self._substitute(i) for i in value] - elif isinstance(value, str): - tmpl = string.Template(value) - return tmpl.safe_substitute(self.StrSubWrapper(self)) - else: - return value - - def _get_group(self, group_or_name, autocreate=False): - """Looks up a OptGroup object. - - Helper function to return an OptGroup given a parameter which can - either be the group's name or an OptGroup object. - - The OptGroup object returned is from the internal dict of OptGroup - objects, which will be a copy of any OptGroup object that users of - the API have access to. - - :param group_or_name: the group's name or the OptGroup object itself - :param autocreate: whether to auto-create the group if it's not found - :raises: NoSuchGroupError - """ - group = group_or_name if isinstance(group_or_name, OptGroup) else None - group_name = group.name if group else group_or_name - - if not group_name in self._groups: - if not group is None or not autocreate: - raise NoSuchGroupError(group_name) - - self.register_group(OptGroup(name=group_name)) - - return self._groups[group_name] - - def _get_opt_info(self, opt_name, group=None): - """Return the (opt, override, default) dict for an opt. - - :param opt_name: an opt name/dest - :param group: an optional group name or OptGroup object - :raises: NoSuchOptError, NoSuchGroupError - """ - if group is None: - opts = self._opts - else: - group = self._get_group(group) - opts = group._opts - - if not opt_name in opts: - raise NoSuchOptError(opt_name, group) - - return opts[opt_name] - - def _parse_config_files(self): - """Parse the config files from --config-file and --config-dir. - - :raises: ConfigFilesNotFoundError, ConfigFileParseError - """ - config_files = list(self.config_file) - - if self.config_dir: - config_dir_glob = os.path.join(self.config_dir, '*.conf') - config_files += sorted(glob.glob(config_dir_glob)) - - config_files = [_fixpath(p) for p in config_files] - - self._cparser = MultiConfigParser() - - try: - read_ok = self._cparser.read(config_files) - except iniparser.ParseError as pe: - raise ConfigFileParseError(pe.filename, str(pe)) - - if read_ok != config_files: - not_read_ok = filter(lambda f: f not in read_ok, config_files) - raise ConfigFilesNotFoundError(not_read_ok) - - def _check_required_opts(self): - """Check that all opts marked as required have values specified. - - :raises: RequiredOptError - """ - for info, group in self._all_opt_infos(): - opt = info['opt'] - - if opt.required: - if ('default' in info or 'override' in info): - continue - - if self._get(opt.name, group) is None: - raise RequiredOptError(opt.name, group) - - def _parse_cli_opts(self, args): - """Parse command line options. - - Initializes the command line option parser and parses the supplied - command line arguments. - - :param args: the command line arguments - :returns: a dict of parsed option values - :raises: SystemExit, DuplicateOptError - - """ - self._args = args - - for opt, group in self._all_opts(): - opt._add_to_cli(self._oparser, group) - - values, leftovers = self._oparser.parse_args(args) - - return vars(values), leftovers - - class GroupAttr(collections.Mapping): - - """ - A helper class representing the option values of a group as a mapping - and attributes. - """ - - def __init__(self, conf, group): - """Construct a GroupAttr object. - - :param conf: a ConfigOpts object - :param group: an OptGroup object - """ - self.conf = conf - self.group = group - - def __getattr__(self, name): - """Look up an option value and perform template substitution.""" - return self.conf._get(name, self.group) - - def __getitem__(self, key): - """Look up an option value and perform string substitution.""" - return self.__getattr__(key) - - def __contains__(self, key): - """Return True if key is the name of a registered opt or group.""" - return key in self.group._opts - - def __iter__(self): - """Iterate over all registered opt and group names.""" - for key in self.group._opts.keys(): - yield key - - def __len__(self): - """Return the number of options and option groups.""" - return len(self.group._opts) - - class StrSubWrapper(object): - - """ - A helper class exposing opt values as a dict for string substitution. - """ - - def __init__(self, conf): - """Construct a StrSubWrapper object. - - :param conf: a ConfigOpts object - """ - self.conf = conf - - def __getitem__(self, key): - """Look up an opt value from the ConfigOpts object. - - :param key: an opt name - :returns: an opt value - :raises: TemplateSubstitutionError if attribute is a group - """ - value = getattr(self.conf, key) - if isinstance(value, self.conf.GroupAttr): - raise TemplateSubstitutionError( - 'substituting group %s not supported' % key) - return value - - -class CommonConfigOpts(ConfigOpts): - - DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" - DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - - common_cli_opts = [ - BoolOpt('debug', - short='d', - default=False, - help='Print debugging output'), - BoolOpt('verbose', - short='v', - default=False, - help='Print more verbose output'), - ] - - logging_cli_opts = [ - StrOpt('log-config', - metavar='PATH', - help='If this option is specified, the logging configuration ' - 'file specified is used and overrides any other logging ' - 'options specified. Please see the Python logging module ' - 'documentation for details on logging configuration ' - 'files.'), - StrOpt('log-format', - default=DEFAULT_LOG_FORMAT, - metavar='FORMAT', - help='A logging.Formatter log message format string which may ' - 'use any of the available logging.LogRecord attributes. ' - 'Default: %default'), - StrOpt('log-date-format', - default=DEFAULT_LOG_DATE_FORMAT, - metavar='DATE_FORMAT', - help='Format string for %(asctime)s in log records. ' - 'Default: %default'), - StrOpt('log-file', - metavar='PATH', - help='(Optional) Name of log file to output to. ' - 'If not set, logging will go to stdout.'), - StrOpt('log-dir', - help='(Optional) The directory to keep log files in ' - '(will be prepended to --logfile)'), - BoolOpt('use-syslog', - default=False, - help='Use syslog for logging.'), - StrOpt('syslog-log-facility', - default='LOG_USER', - help='syslog facility to receive log lines') - ] - - def __init__(self): - super(CommonConfigOpts, self).__init__() - self.register_cli_opts(self.common_cli_opts) - self.register_cli_opts(self.logging_cli_opts) - - -CONF = CommonConfigOpts() diff --git a/cinder/openstack/common/config/generator.py b/cinder/openstack/common/config/generator.py new file mode 100755 index 0000000000..f72baab36e --- /dev/null +++ b/cinder/openstack/common/config/generator.py @@ -0,0 +1,268 @@ +# Copyright 2012 SINA Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Extracts OpenStack config option info from module(s).""" + +from __future__ import print_function + +import imp +import os +import re +import socket +import sys +import textwrap + +from oslo.config import cfg +import six + +from cinder.openstack.common import gettextutils +from cinder.openstack.common import importutils + +gettextutils.install('cinder') + +STROPT = "StrOpt" +BOOLOPT = "BoolOpt" +INTOPT = "IntOpt" +FLOATOPT = "FloatOpt" +LISTOPT = "ListOpt" +MULTISTROPT = "MultiStrOpt" + +OPT_TYPES = { + STROPT: 'string value', + BOOLOPT: 'boolean value', + INTOPT: 'integer value', + FLOATOPT: 'floating point value', + LISTOPT: 'list value', + MULTISTROPT: 'multi valued', +} + +OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT, + FLOATOPT, LISTOPT, + MULTISTROPT])) + +PY_EXT = ".py" +BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), + "../../../../")) +WORDWRAP_WIDTH = 60 + + +def generate(srcfiles): + mods_by_pkg = dict() + for filepath in srcfiles: + pkg_name = filepath.split(os.sep)[1] + mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]), + os.path.basename(filepath).split('.')[0]]) + mods_by_pkg.setdefault(pkg_name, list()).append(mod_str) + # NOTE(lzyeval): place top level modules before packages + pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys()) + pkg_names.sort() + ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys()) + ext_names.sort() + pkg_names.extend(ext_names) + + # opts_by_group is a mapping of group name to an options list + # The options list is a list of (module, options) tuples + opts_by_group = {'DEFAULT': []} + + extra_modules = os.getenv("OSLO_CONFIG_GENERATOR_EXTRA_MODULES", "") + if extra_modules: + for module_name in extra_modules.split(','): + module_name = module_name.strip() + module = _import_module(module_name) + if module: + for group, opts in _list_opts(module): + opts_by_group.setdefault(group, []).append((module_name, + opts)) + + for pkg_name in pkg_names: + mods = mods_by_pkg.get(pkg_name) + mods.sort() + for mod_str in mods: + if mod_str.endswith('.__init__'): + mod_str = mod_str[:mod_str.rfind(".")] + + mod_obj = _import_module(mod_str) + if not mod_obj: + raise RuntimeError("Unable to import module %s" % mod_str) + + for group, opts in _list_opts(mod_obj): + opts_by_group.setdefault(group, []).append((mod_str, opts)) + + print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', [])) + for group, opts in opts_by_group.items(): + print_group_opts(group, opts) + + +def _import_module(mod_str): + try: + if mod_str.startswith('bin.'): + imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:])) + return sys.modules[mod_str[4:]] + else: + return importutils.import_module(mod_str) + except Exception as e: + sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e))) + return None + + +def _is_in_group(opt, group): + "Check if opt is in group." + for key, value in group._opts.items(): + if value['opt'] == opt: + return True + return False + + +def _guess_groups(opt, mod_obj): + # is it in the DEFAULT group? + if _is_in_group(opt, cfg.CONF): + return 'DEFAULT' + + # what other groups is it in? + for key, value in cfg.CONF.items(): + if isinstance(value, cfg.CONF.GroupAttr): + if _is_in_group(opt, value._group): + return value._group.name + + raise RuntimeError( + "Unable to find group for option %s, " + "maybe it's defined twice in the same group?" + % opt.name + ) + + +def _list_opts(obj): + def is_opt(o): + return (isinstance(o, cfg.Opt) and + not isinstance(o, cfg.SubCommandOpt)) + + opts = list() + for attr_str in dir(obj): + attr_obj = getattr(obj, attr_str) + if is_opt(attr_obj): + opts.append(attr_obj) + elif (isinstance(attr_obj, list) and + all(map(lambda x: is_opt(x), attr_obj))): + opts.extend(attr_obj) + + ret = {} + for opt in opts: + ret.setdefault(_guess_groups(opt, obj), []).append(opt) + return ret.items() + + +def print_group_opts(group, opts_by_module): + print("[%s]" % group) + print('') + for mod, opts in opts_by_module: + print('#') + print('# Options defined in %s' % mod) + print('#') + print('') + for opt in opts: + _print_opt(opt) + print('') + + +def _get_my_ip(): + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + csock.connect(('8.8.8.8', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.error: + return None + + +def _sanitize_default(name, value): + """Set up a reasonably sensible default for pybasedir, my_ip and host.""" + if value.startswith(sys.prefix): + # NOTE(jd) Don't use os.path.join, because it is likely to think the + # second part is an absolute pathname and therefore drop the first + # part. + value = os.path.normpath("/usr/" + value[len(sys.prefix):]) + elif value.startswith(BASEDIR): + return value.replace(BASEDIR, '/usr/lib/python/site-packages') + elif BASEDIR in value: + return value.replace(BASEDIR, '') + elif value == _get_my_ip(): + return '10.0.0.1' + elif value == socket.gethostname() and 'host' in name: + return 'cinder' + elif value.strip() != value: + return '"%s"' % value + return value + + +def _print_opt(opt): + opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help + if not opt_help: + sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name) + opt_help = "" + opt_type = None + try: + opt_type = OPTION_REGEX.search(str(type(opt))).group(0) + except (ValueError, AttributeError) as err: + sys.stderr.write("%s\n" % str(err)) + sys.exit(1) + opt_help += ' (' + OPT_TYPES[opt_type] + ')' + print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))) + if opt.deprecated_opts: + for deprecated_opt in opt.deprecated_opts: + if deprecated_opt.name: + deprecated_group = (deprecated_opt.group if + deprecated_opt.group else "DEFAULT") + print('# Deprecated group/name - [%s]/%s' % + (deprecated_group, + deprecated_opt.name)) + try: + if opt_default is None: + print('#%s=' % opt_name) + elif opt_type == STROPT: + assert(isinstance(opt_default, six.string_types)) + print('#%s=%s' % (opt_name, _sanitize_default(opt_name, + opt_default))) + elif opt_type == BOOLOPT: + assert(isinstance(opt_default, bool)) + print('#%s=%s' % (opt_name, str(opt_default).lower())) + elif opt_type == INTOPT: + assert(isinstance(opt_default, int) and + not isinstance(opt_default, bool)) + print('#%s=%s' % (opt_name, opt_default)) + elif opt_type == FLOATOPT: + assert(isinstance(opt_default, float)) + print('#%s=%s' % (opt_name, opt_default)) + elif opt_type == LISTOPT: + assert(isinstance(opt_default, list)) + print('#%s=%s' % (opt_name, ','.join(opt_default))) + elif opt_type == MULTISTROPT: + assert(isinstance(opt_default, list)) + if not opt_default: + opt_default = [''] + for default in opt_default: + print('#%s=%s' % (opt_name, default)) + print('') + except Exception: + sys.stderr.write('Error in option "%s"\n' % opt_name) + sys.exit(1) + + +def main(): + generate(sys.argv[1:]) + +if __name__ == '__main__': + main() diff --git a/cinder/openstack/common/context.py b/cinder/openstack/common/context.py index dd7dd04c38..7c5f6a76a6 100644 --- a/cinder/openstack/common/context.py +++ b/cinder/openstack/common/context.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -23,23 +23,25 @@ """ import itertools -import uuid + +from cinder.openstack.common import uuidutils def generate_request_id(): - return 'req-' + str(uuid.uuid4()) + return 'req-%s' % uuidutils.generate_uuid() class RequestContext(object): - """ + """Helper class to represent useful information about a request context. + Stores information about the security context under which the user accesses the system, as well as additional request information. """ - def __init__(self, auth_tok=None, user=None, tenant=None, is_admin=False, + def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, read_only=False, show_deleted=False, request_id=None): - self.auth_tok = auth_tok + self.auth_token = auth_token self.user = user self.tenant = tenant self.is_admin = is_admin @@ -55,7 +57,7 @@ def to_dict(self): 'is_admin': self.is_admin, 'read_only': self.read_only, 'show_deleted': self.show_deleted, - 'auth_token': self.auth_tok, + 'auth_token': self.auth_token, 'request_id': self.request_id} diff --git a/cinder/openstack/common/crypto/__init__.py b/cinder/openstack/common/crypto/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/openstack/common/crypto/utils.py b/cinder/openstack/common/crypto/utils.py new file mode 100644 index 0000000000..db0240eaab --- /dev/null +++ b/cinder/openstack/common/crypto/utils.py @@ -0,0 +1,179 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 + +from Crypto.Hash import HMAC +from Crypto import Random + +from cinder.openstack.common.gettextutils import _ # noqa +from cinder.openstack.common import importutils + + +class CryptoutilsException(Exception): + """Generic Exception for Crypto utilities.""" + + message = _("An unknown error occurred in crypto utils.") + + +class CipherBlockLengthTooBig(CryptoutilsException): + """The block size is too big.""" + + def __init__(self, requested, permitted): + msg = _("Block size of %(given)d is too big, max = %(maximum)d") + message = msg % {'given': requested, 'maximum': permitted} + super(CryptoutilsException, self).__init__(message) + + +class HKDFOutputLengthTooLong(CryptoutilsException): + """The amount of Key Material asked is too much.""" + + def __init__(self, requested, permitted): + msg = _("Length of %(given)d is too long, max = %(maximum)d") + message = msg % {'given': requested, 'maximum': permitted} + super(CryptoutilsException, self).__init__(message) + + +class HKDF(object): + """An HMAC-based Key Derivation Function implementation (RFC5869) + + This class creates an object that allows to use HKDF to derive keys. + """ + + def __init__(self, hashtype='SHA256'): + self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype) + self.max_okm_length = 255 * self.hashfn.digest_size + + def extract(self, ikm, salt=None): + """An extract function that can be used to derive a robust key given + weak Input Key Material (IKM) which could be a password. + Returns a pseudorandom key (of HashLen octets) + + :param ikm: input keying material (ex a password) + :param salt: optional salt value (a non-secret random value) + """ + if salt is None: + salt = '\x00' * self.hashfn.digest_size + + return HMAC.new(salt, ikm, self.hashfn).digest() + + def expand(self, prk, info, length): + """An expand function that will return arbitrary length output that can + be used as keys. + Returns a buffer usable as key material. + + :param prk: a pseudorandom key of at least HashLen octets + :param info: optional string (can be a zero-length string) + :param length: length of output keying material (<= 255 * HashLen) + """ + if length > self.max_okm_length: + raise HKDFOutputLengthTooLong(length, self.max_okm_length) + + N = (length + self.hashfn.digest_size - 1) / self.hashfn.digest_size + + okm = "" + tmp = "" + for block in range(1, N + 1): + tmp = HMAC.new(prk, tmp + info + chr(block), self.hashfn).digest() + okm += tmp + + return okm[:length] + + +MAX_CB_SIZE = 256 + + +class SymmetricCrypto(object): + """Symmetric Key Crypto object. + + This class creates a Symmetric Key Crypto object that can be used + to encrypt, decrypt, or sign arbitrary data. + + :param enctype: Encryption Cipher name (default: AES) + :param hashtype: Hash/HMAC type name (default: SHA256) + """ + + def __init__(self, enctype='AES', hashtype='SHA256'): + self.cipher = importutils.import_module('Crypto.Cipher.' + enctype) + self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype) + + def new_key(self, size): + return Random.new().read(size) + + def encrypt(self, key, msg, b64encode=True): + """Encrypt the provided msg and returns the cyphertext optionally + base64 encoded. + + Uses AES-128-CBC with a Random IV by default. + + The plaintext is padded to reach blocksize length. + The last byte of the block is the length of the padding. + The length of the padding does not include the length byte itself. + + :param key: The Encryption key. + :param msg: the plain text. + + :returns encblock: a block of encrypted data. + """ + iv = Random.new().read(self.cipher.block_size) + cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv) + + # CBC mode requires a fixed block size. Append padding and length of + # padding. + if self.cipher.block_size > MAX_CB_SIZE: + raise CipherBlockLengthTooBig(self.cipher.block_size, MAX_CB_SIZE) + r = len(msg) % self.cipher.block_size + padlen = self.cipher.block_size - r - 1 + msg += '\x00' * padlen + msg += chr(padlen) + + enc = iv + cipher.encrypt(msg) + if b64encode: + enc = base64.b64encode(enc) + return enc + + def decrypt(self, key, msg, b64decode=True): + """Decrypts the provided ciphertext, optionally base 64 encoded, and + returns the plaintext message, after padding is removed. + + Uses AES-128-CBC with an IV by default. + + :param key: The Encryption key. + :param msg: the ciphetext, the first block is the IV + """ + if b64decode: + msg = base64.b64decode(msg) + iv = msg[:self.cipher.block_size] + cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv) + + padded = cipher.decrypt(msg[self.cipher.block_size:]) + l = ord(padded[-1]) + 1 + plain = padded[:-l] + return plain + + def sign(self, key, msg, b64encode=True): + """Signs a message string and returns a base64 encoded signature. + + Uses HMAC-SHA-256 by default. + + :param key: The Signing key. + :param msg: the message to sign. + """ + h = HMAC.new(key, msg, self.hashfn) + out = h.digest() + if b64encode: + out = base64.b64encode(out) + return out diff --git a/cinder/api/openstack/volume/views/__init__.py b/cinder/openstack/common/db/__init__.py similarity index 93% rename from cinder/api/openstack/volume/views/__init__.py rename to cinder/openstack/common/db/__init__.py index d65c689a83..1b9b60dec1 100644 --- a/cinder/api/openstack/volume/views/__init__.py +++ b/cinder/openstack/common/db/__init__.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2011 OpenStack LLC. +# Copyright 2012 Cloudscaling Group, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/openstack/common/db/api.py b/cinder/openstack/common/db/api.py new file mode 100644 index 0000000000..5a76184b32 --- /dev/null +++ b/cinder/openstack/common/db/api.py @@ -0,0 +1,106 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Multiple DB API backend support. + +Supported configuration options: + +The following two parameters are in the 'database' group: +`backend`: DB backend name or full module path to DB backend module. +`use_tpool`: Enable thread pooling of DB API calls. + +A DB backend module should implement a method named 'get_backend' which +takes no arguments. The method can return any object that implements DB +API methods. + +*NOTE*: There are bugs in eventlet when using tpool combined with +threading locks. The python logging module happens to use such locks. To +work around this issue, be sure to specify thread=False with +eventlet.monkey_patch(). + +A bug for eventlet has been filed here: + +https://bitbucket.org/eventlet/eventlet/issue/137/ +""" +import functools + +from oslo.config import cfg + +from cinder.openstack.common import importutils +from cinder.openstack.common import lockutils + + +db_opts = [ + cfg.StrOpt('backend', + default='sqlalchemy', + deprecated_name='db_backend', + deprecated_group='DEFAULT', + help='The backend to use for db'), + cfg.BoolOpt('use_tpool', + default=False, + deprecated_name='dbapi_use_tpool', + deprecated_group='DEFAULT', + help='Enable the experimental use of thread pooling for ' + 'all DB API calls') +] + +CONF = cfg.CONF +CONF.register_opts(db_opts, 'database') + + +class DBAPI(object): + def __init__(self, backend_mapping=None): + if backend_mapping is None: + backend_mapping = {} + self.__backend = None + self.__backend_mapping = backend_mapping + + @lockutils.synchronized('dbapi_backend', 'cinder-') + def __get_backend(self): + """Get the actual backend. May be a module or an instance of + a class. Doesn't matter to us. We do this synchronized as it's + possible multiple greenthreads started very quickly trying to do + DB calls and eventlet can switch threads before self.__backend gets + assigned. + """ + if self.__backend: + # Another thread assigned it + return self.__backend + backend_name = CONF.database.backend + self.__use_tpool = CONF.database.use_tpool + if self.__use_tpool: + from eventlet import tpool + self.__tpool = tpool + # Import the untranslated name if we don't have a + # mapping. + backend_path = self.__backend_mapping.get(backend_name, + backend_name) + backend_mod = importutils.import_module(backend_path) + self.__backend = backend_mod.get_backend() + return self.__backend + + def __getattr__(self, key): + backend = self.__backend or self.__get_backend() + attr = getattr(backend, key) + if not self.__use_tpool or not hasattr(attr, '__call__'): + return attr + + def tpool_wrapper(*args, **kwargs): + return self.__tpool.execute(attr, *args, **kwargs) + + functools.update_wrapper(tpool_wrapper, attr) + return tpool_wrapper diff --git a/cinder/openstack/common/db/exception.py b/cinder/openstack/common/db/exception.py new file mode 100644 index 0000000000..190d234b04 --- /dev/null +++ b/cinder/openstack/common/db/exception.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""DB related custom exceptions.""" + +from cinder.openstack.common.gettextutils import _ + + +class DBError(Exception): + """Wraps an implementation specific exception.""" + def __init__(self, inner_exception=None): + self.inner_exception = inner_exception + super(DBError, self).__init__(str(inner_exception)) + + +class DBDuplicateEntry(DBError): + """Wraps an implementation specific exception.""" + def __init__(self, columns=[], inner_exception=None): + self.columns = columns + super(DBDuplicateEntry, self).__init__(inner_exception) + + +class DBDeadlock(DBError): + def __init__(self, inner_exception=None): + super(DBDeadlock, self).__init__(inner_exception) + + +class DBInvalidUnicodeParameter(Exception): + message = _("Invalid Parameter: " + "Unicode is not supported by the current database.") diff --git a/cinder/rootwrap/__init__.py b/cinder/openstack/common/db/sqlalchemy/__init__.py similarity index 93% rename from cinder/rootwrap/__init__.py rename to cinder/openstack/common/db/sqlalchemy/__init__.py index 671d3c173e..1b9b60dec1 100644 --- a/cinder/rootwrap/__init__.py +++ b/cinder/openstack/common/db/sqlalchemy/__init__.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2011 OpenStack, LLC. +# Copyright 2012 Cloudscaling Group, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/openstack/common/db/sqlalchemy/models.py b/cinder/openstack/common/db/sqlalchemy/models.py new file mode 100644 index 0000000000..33a9103915 --- /dev/null +++ b/cinder/openstack/common/db/sqlalchemy/models.py @@ -0,0 +1,105 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# Copyright 2012 Cloudscaling Group, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models. +""" + +from sqlalchemy import Column, Integer +from sqlalchemy import DateTime +from sqlalchemy.orm import object_mapper + +from cinder.openstack.common.db.sqlalchemy.session import get_session +from cinder.openstack.common import timeutils + + +class ModelBase(object): + """Base class for models.""" + __table_initialized__ = False + + def save(self, session=None): + """Save this object.""" + if not session: + session = get_session() + # NOTE(boris-42): This part of code should be look like: + # sesssion.add(self) + # session.flush() + # But there is a bug in sqlalchemy and eventlet that + # raises NoneType exception if there is no running + # transaction and rollback is called. As long as + # sqlalchemy has this bug we have to create transaction + # explicity. + with session.begin(subtransactions=True): + session.add(self) + session.flush() + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default=None): + return getattr(self, key, default) + + def __iter__(self): + columns = dict(object_mapper(self).columns).keys() + # NOTE(russellb): Allow models to specify other keys that can be looked + # up, beyond the actual db columns. An example would be the 'name' + # property for an Instance. + if hasattr(self, '_extra_keys'): + columns.extend(self._extra_keys()) + self._i = iter(columns) + return self + + def next(self): + n = self._i.next() + return n, getattr(self, n) + + def update(self, values): + """Make the model object behave like a dict.""" + for k, v in values.iteritems(): + setattr(self, k, v) + + def iteritems(self): + """Make the model object behave like a dict. + + Includes attributes from joins.""" + local = dict(self) + joined = dict([(k, v) for k, v in self.__dict__.iteritems() + if not k[0] == '_']) + local.update(joined) + return local.iteritems() + + +class TimestampMixin(object): + created_at = Column(DateTime, default=timeutils.utcnow) + updated_at = Column(DateTime, onupdate=timeutils.utcnow) + + +class SoftDeleteMixin(object): + deleted_at = Column(DateTime) + deleted = Column(Integer, default=0) + + def soft_delete(self, session=None): + """Mark this object as deleted.""" + self.deleted = self.id + self.deleted_at = timeutils.utcnow() + self.save(session=session) diff --git a/cinder/openstack/common/db/sqlalchemy/session.py b/cinder/openstack/common/db/sqlalchemy/session.py new file mode 100644 index 0000000000..e7d1d1bc53 --- /dev/null +++ b/cinder/openstack/common/db/sqlalchemy/session.py @@ -0,0 +1,697 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Session Handling for SQLAlchemy backend. + +Initializing: + +* Call set_defaults with the minimal of the following kwargs: + sql_connection, sqlite_db + + Example: + + session.set_defaults( + sql_connection="sqlite:///var/lib/cinder/sqlite.db", + sqlite_db="/var/lib/cinder/sqlite.db") + +Recommended ways to use sessions within this framework: + +* Don't use them explicitly; this is like running with AUTOCOMMIT=1. + model_query() will implicitly use a session when called without one + supplied. This is the ideal situation because it will allow queries + to be automatically retried if the database connection is interrupted. + + Note: Automatic retry will be enabled in a future patch. + + It is generally fine to issue several queries in a row like this. Even though + they may be run in separate transactions and/or separate sessions, each one + will see the data from the prior calls. If needed, undo- or rollback-like + functionality should be handled at a logical level. For an example, look at + the code around quotas and reservation_rollback(). + + Examples: + + def get_foo(context, foo): + return model_query(context, models.Foo).\ + filter_by(foo=foo).\ + first() + + def update_foo(context, id, newfoo): + model_query(context, models.Foo).\ + filter_by(id=id).\ + update({'foo': newfoo}) + + def create_foo(context, values): + foo_ref = models.Foo() + foo_ref.update(values) + foo_ref.save() + return foo_ref + + +* Within the scope of a single method, keeping all the reads and writes within + the context managed by a single session. In this way, the session's __exit__ + handler will take care of calling flush() and commit() for you. + If using this approach, you should not explicitly call flush() or commit(). + Any error within the context of the session will cause the session to emit + a ROLLBACK. If the connection is dropped before this is possible, the + database will implicitly rollback the transaction. + + Note: statements in the session scope will not be automatically retried. + + If you create models within the session, they need to be added, but you + do not need to call model.save() + + def create_many_foo(context, foos): + session = get_session() + with session.begin(): + for foo in foos: + foo_ref = models.Foo() + foo_ref.update(foo) + session.add(foo_ref) + + def update_bar(context, foo_id, newbar): + session = get_session() + with session.begin(): + foo_ref = model_query(context, models.Foo, session).\ + filter_by(id=foo_id).\ + first() + model_query(context, models.Bar, session).\ + filter_by(id=foo_ref['bar_id']).\ + update({'bar': newbar}) + + Note: update_bar is a trivially simple example of using "with session.begin". + Whereas create_many_foo is a good example of when a transaction is needed, + it is always best to use as few queries as possible. The two queries in + update_bar can be better expressed using a single query which avoids + the need for an explicit transaction. It can be expressed like so: + + def update_bar(context, foo_id, newbar): + subq = model_query(context, models.Foo.id).\ + filter_by(id=foo_id).\ + limit(1).\ + subquery() + model_query(context, models.Bar).\ + filter_by(id=subq.as_scalar()).\ + update({'bar': newbar}) + + For reference, this emits approximagely the following SQL statement: + + UPDATE bar SET bar = ${newbar} + WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); + +* Passing an active session between methods. Sessions should only be passed + to private methods. The private method must use a subtransaction; otherwise + SQLAlchemy will throw an error when you call session.begin() on an existing + transaction. Public methods should not accept a session parameter and should + not be involved in sessions within the caller's scope. + + Note that this incurs more overhead in SQLAlchemy than the above means + due to nesting transactions, and it is not possible to implicitly retry + failed database operations when using this approach. + + This also makes code somewhat more difficult to read and debug, because a + single database transaction spans more than one method. Error handling + becomes less clear in this situation. When this is needed for code clarity, + it should be clearly documented. + + def myfunc(foo): + session = get_session() + with session.begin(): + # do some database things + bar = _private_func(foo, session) + return bar + + def _private_func(foo, session=None): + if not session: + session = get_session() + with session.begin(subtransaction=True): + # do some other database things + return bar + + +There are some things which it is best to avoid: + +* Don't keep a transaction open any longer than necessary. + + This means that your "with session.begin()" block should be as short + as possible, while still containing all the related calls for that + transaction. + +* Avoid "with_lockmode('UPDATE')" when possible. + + In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match + any rows, it will take a gap-lock. This is a form of write-lock on the + "gap" where no rows exist, and prevents any other writes to that space. + This can effectively prevent any INSERT into a table by locking the gap + at the end of the index. Similar problems will occur if the SELECT FOR UPDATE + has an overly broad WHERE clause, or doesn't properly use an index. + + One idea proposed at ODS Fall '12 was to use a normal SELECT to test the + number of rows matching a query, and if only one row is returned, + then issue the SELECT FOR UPDATE. + + The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE. + However, this can not be done until the "deleted" columns are removed and + proper UNIQUE constraints are added to the tables. + + +Enabling soft deletes: + +* To use/enable soft-deletes, the SoftDeleteMixin must be added + to your model class. For example: + + class NovaBase(models.SoftDeleteMixin, models.ModelBase): + pass + + +Efficient use of soft deletes: + +* There are two possible ways to mark a record as deleted: + model.soft_delete() and query.soft_delete(). + + model.soft_delete() method works with single already fetched entry. + query.soft_delete() makes only one db request for all entries that correspond + to query. + +* In almost all cases you should use query.soft_delete(). Some examples: + + def soft_delete_bar(): + count = model_query(BarModel).find(some_condition).soft_delete() + if count == 0: + raise Exception("0 entries were soft deleted") + + def complex_soft_delete_with_synchronization_bar(session=None): + if session is None: + session = get_session() + with session.begin(subtransactions=True): + count = model_query(BarModel).\ + find(some_condition).\ + soft_delete(synchronize_session=True) + # Here synchronize_session is required, because we + # don't know what is going on in outer session. + if count == 0: + raise Exception("0 entries were soft deleted") + +* There is only one situation where model.soft_delete() is appropriate: when + you fetch a single record, work with it, and mark it as deleted in the same + transaction. + + def soft_delete_bar_model(): + session = get_session() + with session.begin(): + bar_ref = model_query(BarModel).find(some_condition).first() + # Work with bar_ref + bar_ref.soft_delete(session=session) + + However, if you need to work with all entries that correspond to query and + then soft delete them you should use query.soft_delete() method: + + def soft_delete_multi_models(): + session = get_session() + with session.begin(): + query = model_query(BarModel, session=session).\ + find(some_condition) + model_refs = query.all() + # Work with model_refs + query.soft_delete(synchronize_session=False) + # synchronize_session=False should be set if there is no outer + # session and these entries are not used after this. + + When working with many rows, it is very important to use query.soft_delete, + which issues a single query. Using model.soft_delete(), as in the following + example, is very inefficient. + + for bar_ref in bar_refs: + bar_ref.soft_delete(session=session) + # This will produce count(bar_refs) db requests. +""" + +import os.path +import re +import time + +from eventlet import greenthread +from oslo.config import cfg +import six +from sqlalchemy import exc as sqla_exc +import sqlalchemy.interfaces +from sqlalchemy.interfaces import PoolListener +import sqlalchemy.orm +from sqlalchemy.pool import NullPool, StaticPool +from sqlalchemy.sql.expression import literal_column + +from cinder.openstack.common.db import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import timeutils + +DEFAULT = 'DEFAULT' + +sqlite_db_opts = [ + cfg.StrOpt('sqlite_db', + default='cinder.sqlite', + help='the filename to use with sqlite'), + cfg.BoolOpt('sqlite_synchronous', + default=True, + help='If true, use synchronous mode for sqlite'), +] + +database_opts = [ + cfg.StrOpt('connection', + default='sqlite:///' + + os.path.abspath(os.path.join(os.path.dirname(__file__), + '../', '$sqlite_db')), + help='The SQLAlchemy connection string used to connect to the ' + 'database', + deprecated_name='sql_connection', + deprecated_group=DEFAULT, + secret=True), + cfg.IntOpt('idle_timeout', + default=3600, + deprecated_name='sql_idle_timeout', + deprecated_group=DEFAULT, + help='timeout before idle sql connections are reaped'), + cfg.IntOpt('min_pool_size', + default=1, + deprecated_name='sql_min_pool_size', + deprecated_group=DEFAULT, + help='Minimum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_pool_size', + default=5, + deprecated_name='sql_max_pool_size', + deprecated_group=DEFAULT, + help='Maximum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_retries', + default=10, + deprecated_name='sql_max_retries', + deprecated_group=DEFAULT, + help='maximum db connection retries during startup. ' + '(setting -1 implies an infinite retry count)'), + cfg.IntOpt('retry_interval', + default=10, + deprecated_name='sql_retry_interval', + deprecated_group=DEFAULT, + help='interval between retries of opening a sql connection'), + cfg.IntOpt('max_overflow', + default=None, + deprecated_name='sql_max_overflow', + deprecated_group=DEFAULT, + help='If set, use this value for max_overflow with sqlalchemy'), + cfg.IntOpt('connection_debug', + default=0, + deprecated_name='sql_connection_debug', + deprecated_group=DEFAULT, + help='Verbosity of SQL debugging information. 0=None, ' + '100=Everything'), + cfg.BoolOpt('connection_trace', + default=False, + deprecated_name='sql_connection_trace', + deprecated_group=DEFAULT, + help='Add python stack traces to SQL as comment strings'), +] + +CONF = cfg.CONF +CONF.register_opts(sqlite_db_opts) +CONF.register_opts(database_opts, 'database') +LOG = logging.getLogger(__name__) + +_ENGINE = None +_MAKER = None + + +def set_defaults(sql_connection, sqlite_db): + """Set defaults for configuration variables.""" + cfg.set_defaults(database_opts, + connection=sql_connection) + cfg.set_defaults(sqlite_db_opts, + sqlite_db=sqlite_db) + + +def cleanup(): + global _ENGINE, _MAKER + + if _MAKER: + _MAKER.close_all() + _MAKER = None + if _ENGINE: + _ENGINE.dispose() + _ENGINE = None + + +class SqliteForeignKeysListener(PoolListener): + """ + Ensures that the foreign key constraints are enforced in SQLite. + + The foreign key constraints are disabled by default in SQLite, + so the foreign key constraints will be enabled here for every + database connection + """ + def connect(self, dbapi_con, con_record): + dbapi_con.execute('pragma foreign_keys=ON') + + +def get_session(autocommit=True, expire_on_commit=False, + sqlite_fk=False): + """Return a SQLAlchemy session.""" + global _MAKER + + if _MAKER is None: + engine = get_engine(sqlite_fk=sqlite_fk) + _MAKER = get_maker(engine, autocommit, expire_on_commit) + + session = _MAKER() + return session + + +# note(boris-42): In current versions of DB backends unique constraint +# violation messages follow the structure: +# +# sqlite: +# 1 column - (IntegrityError) column c1 is not unique +# N columns - (IntegrityError) column c1, c2, ..., N are not unique +# +# postgres: +# 1 column - (IntegrityError) duplicate key value violates unique +# constraint "users_c1_key" +# N columns - (IntegrityError) duplicate key value violates unique +# constraint "name_of_our_constraint" +# +# mysql: +# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key +# 'c1'") +# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined +# with -' for key 'name_of_our_constraint'") +_DUP_KEY_RE_DB = { + "sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), + "postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"), + "mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$") +} + + +def _raise_if_duplicate_entry_error(integrity_error, engine_name): + """ + In this function will be raised DBDuplicateEntry exception if integrity + error wrap unique constraint violation. + """ + + def get_columns_from_uniq_cons_or_name(columns): + # note(boris-42): UniqueConstraint name convention: "uniq_c1_x_c2_x_c3" + # means that columns c1, c2, c3 are in UniqueConstraint. + uniqbase = "uniq_" + if not columns.startswith(uniqbase): + if engine_name == "postgresql": + return [columns[columns.index("_") + 1:columns.rindex("_")]] + return [columns] + return columns[len(uniqbase):].split("_x_") + + if engine_name not in ["mysql", "sqlite", "postgresql"]: + return + + m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message) + if not m: + return + columns = m.group(1) + + if engine_name == "sqlite": + columns = columns.strip().split(", ") + else: + columns = get_columns_from_uniq_cons_or_name(columns) + raise exception.DBDuplicateEntry(columns, integrity_error) + + +# NOTE(comstud): In current versions of DB backends, Deadlock violation +# messages follow the structure: +# +# mysql: +# (OperationalError) (1213, 'Deadlock found when trying to get lock; try ' +# 'restarting transaction') +_DEADLOCK_RE_DB = { + "mysql": re.compile(r"^.*\(1213, 'Deadlock.*") +} + + +def _raise_if_deadlock_error(operational_error, engine_name): + """ + Raise DBDeadlock exception if OperationalError contains a Deadlock + condition. + """ + re = _DEADLOCK_RE_DB.get(engine_name) + if re is None: + return + m = re.match(operational_error.message) + if not m: + return + raise exception.DBDeadlock(operational_error) + + +def _wrap_db_error(f): + def _wrap(*args, **kwargs): + try: + return f(*args, **kwargs) + except UnicodeEncodeError: + raise exception.DBInvalidUnicodeParameter() + # note(boris-42): We should catch unique constraint violation and + # wrap it by our own DBDuplicateEntry exception. Unique constraint + # violation is wrapped by IntegrityError. + except sqla_exc.OperationalError as e: + _raise_if_deadlock_error(e, get_engine().name) + # NOTE(comstud): A lot of code is checking for OperationalError + # so let's not wrap it for now. + raise + except sqla_exc.IntegrityError as e: + # note(boris-42): SqlAlchemy doesn't unify errors from different + # DBs so we must do this. Also in some tables (for example + # instance_types) there are more than one unique constraint. This + # means we should get names of columns, which values violate + # unique constraint, from error message. + _raise_if_duplicate_entry_error(e, get_engine().name) + raise exception.DBError(e) + except Exception as e: + LOG.exception(_('DB exception wrapped.')) + raise exception.DBError(e) + _wrap.func_name = f.func_name + return _wrap + + +def get_engine(sqlite_fk=False): + """Return a SQLAlchemy engine.""" + global _ENGINE + if _ENGINE is None: + _ENGINE = create_engine(CONF.database.connection, + sqlite_fk=sqlite_fk) + return _ENGINE + + +def _synchronous_switch_listener(dbapi_conn, connection_rec): + """Switch sqlite connections to non-synchronous mode.""" + dbapi_conn.execute("PRAGMA synchronous = OFF") + + +def _add_regexp_listener(dbapi_con, con_record): + """Add REGEXP function to sqlite connections.""" + + def regexp(expr, item): + reg = re.compile(expr) + return reg.search(six.text_type(item)) is not None + dbapi_con.create_function('regexp', 2, regexp) + + +def _greenthread_yield(dbapi_con, con_record): + """ + Ensure other greenthreads get a chance to execute by forcing a context + switch. With common database backends (eg MySQLdb and sqlite), there is + no implicit yield caused by network I/O since they are implemented by + C libraries that eventlet cannot monkey patch. + """ + greenthread.sleep(0) + + +def _ping_listener(dbapi_conn, connection_rec, connection_proxy): + """ + Ensures that MySQL connections checked out of the + pool are alive. + + Borrowed from: + http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f + """ + try: + dbapi_conn.cursor().execute('select 1') + except dbapi_conn.OperationalError as ex: + if ex.args[0] in (2006, 2013, 2014, 2045, 2055): + LOG.warn(_('Got mysql server has gone away: %s'), ex) + raise sqla_exc.DisconnectionError("Database server went away") + else: + raise + + +def _is_db_connection_error(args): + """Return True if error in connecting to db.""" + # NOTE(adam_g): This is currently MySQL specific and needs to be extended + # to support Postgres and others. + conn_err_codes = ('2002', '2003', '2006') + for err_code in conn_err_codes: + if args.find(err_code) != -1: + return True + return False + + +def create_engine(sql_connection, sqlite_fk=False): + """Return a new SQLAlchemy engine.""" + connection_dict = sqlalchemy.engine.url.make_url(sql_connection) + + engine_args = { + "pool_recycle": CONF.database.idle_timeout, + "echo": False, + 'convert_unicode': True, + } + + # Map our SQL debug level to SQLAlchemy's options + if CONF.database.connection_debug >= 100: + engine_args['echo'] = 'debug' + elif CONF.database.connection_debug >= 50: + engine_args['echo'] = True + + if "sqlite" in connection_dict.drivername: + if sqlite_fk: + engine_args["listeners"] = [SqliteForeignKeysListener()] + engine_args["poolclass"] = NullPool + + if CONF.database.connection == "sqlite://": + engine_args["poolclass"] = StaticPool + engine_args["connect_args"] = {'check_same_thread': False} + else: + engine_args['pool_size'] = CONF.database.max_pool_size + if CONF.database.max_overflow is not None: + engine_args['max_overflow'] = CONF.database.max_overflow + + engine = sqlalchemy.create_engine(sql_connection, **engine_args) + + sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield) + + if 'mysql' in connection_dict.drivername: + sqlalchemy.event.listen(engine, 'checkout', _ping_listener) + elif 'sqlite' in connection_dict.drivername: + if not CONF.sqlite_synchronous: + sqlalchemy.event.listen(engine, 'connect', + _synchronous_switch_listener) + sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) + + if (CONF.database.connection_trace and + engine.dialect.dbapi.__name__ == 'MySQLdb'): + _patch_mysqldb_with_stacktrace_comments() + + try: + engine.connect() + except sqla_exc.OperationalError as e: + if not _is_db_connection_error(e.args[0]): + raise + + remaining = CONF.database.max_retries + if remaining == -1: + remaining = 'infinite' + while True: + msg = _('SQL connection failed. %s attempts left.') + LOG.warn(msg % remaining) + if remaining != 'infinite': + remaining -= 1 + time.sleep(CONF.database.retry_interval) + try: + engine.connect() + break + except sqla_exc.OperationalError as e: + if (remaining != 'infinite' and remaining == 0) or \ + not _is_db_connection_error(e.args[0]): + raise + return engine + + +class Query(sqlalchemy.orm.query.Query): + """Subclass of sqlalchemy.query with soft_delete() method.""" + def soft_delete(self, synchronize_session='evaluate'): + return self.update({'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow()}, + synchronize_session=synchronize_session) + + +class Session(sqlalchemy.orm.session.Session): + """Custom Session class to avoid SqlAlchemy Session monkey patching.""" + @_wrap_db_error + def query(self, *args, **kwargs): + return super(Session, self).query(*args, **kwargs) + + @_wrap_db_error + def flush(self, *args, **kwargs): + return super(Session, self).flush(*args, **kwargs) + + @_wrap_db_error + def execute(self, *args, **kwargs): + return super(Session, self).execute(*args, **kwargs) + + +def get_maker(engine, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy sessionmaker using the given engine.""" + return sqlalchemy.orm.sessionmaker(bind=engine, + class_=Session, + autocommit=autocommit, + expire_on_commit=expire_on_commit, + query_cls=Query) + + +def _patch_mysqldb_with_stacktrace_comments(): + """Adds current stack trace as a comment in queries by patching + MySQLdb.cursors.BaseCursor._do_query. + """ + import MySQLdb.cursors + import traceback + + old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query + + def _do_query(self, q): + stack = '' + for file, line, method, function in traceback.extract_stack(): + # exclude various common things from trace + if file.endswith('session.py') and method == '_do_query': + continue + if file.endswith('api.py') and method == 'wrapper': + continue + if file.endswith('utils.py') and method == '_inner': + continue + if file.endswith('exception.py') and method == '_wrap': + continue + # db/api is just a wrapper around db/sqlalchemy/api + if file.endswith('db/api.py'): + continue + # only trace inside cinder + index = file.rfind('cinder') + if index == -1: + continue + stack += "File:%s:%s Method:%s() Line:%s | " \ + % (file[index:], line, method, function) + + # strip trailing " | " from stack + if stack: + stack = stack[:-3] + qq = "%s /* %s */" % (q, stack) + else: + qq = q + old_mysql_do_query(self, qq) + + setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) diff --git a/cinder/openstack/common/db/sqlalchemy/utils.py b/cinder/openstack/common/db/sqlalchemy/utils.py new file mode 100644 index 0000000000..6e30d553f3 --- /dev/null +++ b/cinder/openstack/common/db/sqlalchemy/utils.py @@ -0,0 +1,132 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010-2011 OpenStack Foundation. +# Copyright 2012 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of paginate query.""" + +import sqlalchemy + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class InvalidSortKey(Exception): + message = _("Sort key supplied was not valid.") + + +# copy from glance/db/sqlalchemy/api.py +def paginate_query(query, model, limit, sort_keys, marker=None, + sort_dir=None, sort_dirs=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort_key, specified by sort_keys. + (If sort_keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort_key, this would be easy: sort_key > X. + With a compound-values sort_key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + + We also have to cope with different sort_directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sort_keys: array of attributes by which results should be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :param sort_dir: direction in which results should be sorted (asc, desc) + :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys + + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + + if 'id' not in sort_keys: + # TODO(justinsb): If this ever gives a false-positive, check + # the actual primary key, rather than assuming its id + LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) + + assert(not (sort_dir and sort_dirs)) + + # Default the sort direction to ascending + if sort_dirs is None and sort_dir is None: + sort_dir = 'asc' + + # Ensure a per-column sort direction + if sort_dirs is None: + sort_dirs = [sort_dir for _sort_key in sort_keys] + + assert(len(sort_dirs) == len(sort_keys)) + + # Add sorting + for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): + sort_dir_func = { + 'asc': sqlalchemy.asc, + 'desc': sqlalchemy.desc, + }[current_sort_dir] + + try: + sort_key_attr = getattr(model, current_sort_key) + except AttributeError: + raise InvalidSortKey() + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker is not None: + marker_values = [] + for sort_key in sort_keys: + v = getattr(marker, sort_key) + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in range(0, len(sort_keys)): + crit_attrs = [] + for j in range(0, i): + model_attr = getattr(model, sort_keys[j]) + crit_attrs.append((model_attr == marker_values[j])) + + model_attr = getattr(model, sort_keys[i]) + if sort_dirs[i] == 'desc': + crit_attrs.append((model_attr < marker_values[i])) + elif sort_dirs[i] == 'asc': + crit_attrs.append((model_attr > marker_values[i])) + else: + raise ValueError(_("Unknown sort direction, " + "must be 'desc' or 'asc'")) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit is not None: + query = query.limit(limit) + + return query diff --git a/cinder/openstack/common/eventlet_backdoor.py b/cinder/openstack/common/eventlet_backdoor.py new file mode 100644 index 0000000000..f1526f6b07 --- /dev/null +++ b/cinder/openstack/common/eventlet_backdoor.py @@ -0,0 +1,144 @@ +# Copyright (c) 2012 OpenStack Foundation. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import errno +import gc +import os +import pprint +import socket +import sys +import traceback + +import eventlet +import eventlet.backdoor +import greenlet +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging + +help_for_backdoor_port = ( + "Acceptable values are 0, , and :, where 0 results " + "in listening on a random tcp port number; results in listening " + "on the specified port number (and not enabling backdoor if that port " + "is in use); and : results in listening on the smallest " + "unused port number within the specified range of port numbers. The " + "chosen port is displayed in the service's log file.") +eventlet_backdoor_opts = [ + cfg.StrOpt('backdoor_port', + default=None, + help="Enable eventlet backdoor. %s" % help_for_backdoor_port) +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) +LOG = logging.getLogger(__name__) + + +class EventletBackdoorConfigValueError(Exception): + def __init__(self, port_range, help_msg, ex): + msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' + '%(help)s' % + {'range': port_range, 'ex': ex, 'help': help_msg}) + super(EventletBackdoorConfigValueError, self).__init__(msg) + self.port_range = port_range + + +def _dont_use_this(): + print("Don't use this, just disconnect instead") + + +def _find_objects(t): + return [o for o in gc.get_objects() if isinstance(o, t)] + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print(i, gt) + traceback.print_stack(gt.gr_frame) + print() + + +def _print_nativethreads(): + for threadId, stack in sys._current_frames().items(): + print(threadId) + traceback.print_stack(stack) + print() + + +def _parse_port_range(port_range): + if ':' not in port_range: + start, end = port_range, port_range + else: + start, end = port_range.split(':', 1) + try: + start, end = int(start), int(end) + if end < start: + raise ValueError + return start, end + except ValueError as ex: + raise EventletBackdoorConfigValueError(port_range, ex, + help_for_backdoor_port) + + +def _listen(host, start_port, end_port, listen_func): + try_port = start_port + while True: + try: + return listen_func((host, try_port)) + except socket.error as exc: + if (exc.errno != errno.EADDRINUSE or + try_port >= end_port): + raise + try_port += 1 + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + 'pnt': _print_nativethreads, + } + + if CONF.backdoor_port is None: + return None + + start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = _listen('localhost', start_port, end_port, eventlet.listen) + + # In the case of backdoor port being zero, a port number is assigned by + # listen(). In any case, pull the port number out here. + port = sock.getsockname()[1] + LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') % + {'port': port, 'pid': os.getpid()}) + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/cinder/openstack/common/exception.py b/cinder/openstack/common/exception.py deleted file mode 100644 index ba32da550b..0000000000 --- a/cinder/openstack/common/exception.py +++ /dev/null @@ -1,147 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Exceptions common to OpenStack projects -""" - -import logging - - -class ProcessExecutionError(IOError): - def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, - description=None): - if description is None: - description = "Unexpected error while running command." - if exit_code is None: - exit_code = '-' - message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % ( - description, cmd, exit_code, stdout, stderr) - IOError.__init__(self, message) - - -class Error(Exception): - def __init__(self, message=None): - super(Error, self).__init__(message) - - -class ApiError(Error): - def __init__(self, message='Unknown', code='Unknown'): - self.message = message - self.code = code - super(ApiError, self).__init__('%s: %s' % (code, message)) - - -class NotFound(Error): - pass - - -class UnknownScheme(Error): - - msg = "Unknown scheme '%s' found in URI" - - def __init__(self, scheme): - msg = self.__class__.msg % scheme - super(UnknownScheme, self).__init__(msg) - - -class BadStoreUri(Error): - - msg = "The Store URI %s was malformed. Reason: %s" - - def __init__(self, uri, reason): - msg = self.__class__.msg % (uri, reason) - super(BadStoreUri, self).__init__(msg) - - -class Duplicate(Error): - pass - - -class NotAuthorized(Error): - pass - - -class NotEmpty(Error): - pass - - -class Invalid(Error): - pass - - -class BadInputError(Exception): - """Error resulting from a client sending bad input to a server""" - pass - - -class MissingArgumentError(Error): - pass - - -class DatabaseMigrationError(Error): - pass - - -class ClientConnectionError(Exception): - """Error resulting from a client connecting to a server""" - pass - - -def wrap_exception(f): - def _wrap(*args, **kw): - try: - return f(*args, **kw) - except Exception, e: - if not isinstance(e, Error): - #exc_type, exc_value, exc_traceback = sys.exc_info() - logging.exception('Uncaught exception') - #logging.error(traceback.extract_stack(exc_traceback)) - raise Error(str(e)) - raise - _wrap.func_name = f.func_name - return _wrap - - -class OpenstackException(Exception): - """ - Base Exception - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - """ - message = "An unknown exception occurred" - - def __init__(self, **kwargs): - try: - self._error_string = self.message % kwargs - - except Exception: - # at least get the core message out if something happened - self._error_string = self.message - - def __str__(self): - return self._error_string - - -class MalformedRequestBody(OpenstackException): - message = "Malformed message body: %(reason)s" - - -class InvalidContentType(OpenstackException): - message = "Invalid content type %(content_type)s" diff --git a/cinder/openstack/common/excutils.py b/cinder/openstack/common/excutils.py index 5dd4830176..ccb2d072e9 100644 --- a/cinder/openstack/common/excutils.py +++ b/cinder/openstack/common/excutils.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation. # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -24,6 +24,8 @@ import sys import traceback +from cinder.openstack.common.gettextutils import _ + @contextlib.contextmanager def save_and_reraise_exception(): @@ -43,7 +45,7 @@ def save_and_reraise_exception(): try: yield except Exception: - logging.error('Original exception being dropped: %s' % - (traceback.format_exception(type_, value, tb))) + logging.error(_('Original exception being dropped: %s'), + traceback.format_exception(type_, value, tb)) raise raise type_, value, tb diff --git a/cinder/openstack/common/fileutils.py b/cinder/openstack/common/fileutils.py new file mode 100644 index 0000000000..4301f2c131 --- /dev/null +++ b/cinder/openstack/common/fileutils.py @@ -0,0 +1,110 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import contextlib +import errno +import os + +from cinder.openstack.common import excutils +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +_FILE_CACHE = {} + + +def ensure_tree(path): + """Create a directory (and any ancestor directories required) + + :param path: Directory to create + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + if not os.path.isdir(path): + raise + else: + raise + + +def read_cached_file(filename, force_reload=False): + """Read from a file if it has been modified. + + :param force_reload: Whether to reload the file. + :returns: A tuple with a boolean specifying if the data is fresh + or not. + """ + global _FILE_CACHE + + if force_reload and filename in _FILE_CACHE: + del _FILE_CACHE[filename] + + reloaded = False + mtime = os.path.getmtime(filename) + cache_info = _FILE_CACHE.setdefault(filename, {}) + + if not cache_info or mtime > cache_info.get('mtime', 0): + LOG.debug(_("Reloading cached file %s") % filename) + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + reloaded = True + return (reloaded, cache_info['data']) + + +def delete_if_exists(path): + """Delete a file, but ignore file not found error. + + :param path: File to delete + """ + + try: + os.unlink(path) + except OSError as e: + if e.errno == errno.ENOENT: + return + else: + raise + + +@contextlib.contextmanager +def remove_path_on_error(path): + """Protect code that wants to operate on PATH atomically. + Any exception will cause PATH to be removed. + + :param path: File to work with + """ + try: + yield + except Exception: + with excutils.save_and_reraise_exception(): + delete_if_exists(path) + + +def file_open(*args, **kwargs): + """Open file + + see built-in file() documentation for more details + + Note: The reason this is kept in a separate module is to easily + be able to provide a stub module that doesn't alter system + state at all (for unit tests) + """ + return file(*args, **kwargs) diff --git a/cinder/openstack/common/gettextutils.py b/cinder/openstack/common/gettextutils.py index 235350cc49..4a4b1b6975 100644 --- a/cinder/openstack/common/gettextutils.py +++ b/cinder/openstack/common/gettextutils.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -20,14 +19,393 @@ Usual usage in an openstack.common module: - from openstack.common.gettextutils import _ + from cinder.openstack.common.gettextutils import _ """ +import copy import gettext +import locale +from logging import handlers +import os +import re + +from babel import localedata +import six + +_localedir = os.environ.get('cinder'.upper() + '_LOCALEDIR') +_t = gettext.translation('cinder', localedir=_localedir, fallback=True) + +_AVAILABLE_LANGUAGES = {} +USE_LAZY = False + +def enable_lazy(): + """Convenience function for configuring _() to use lazy gettext -t = gettext.translation('openstack-common', 'locale', fallback=True) + Call this at the start of execution to enable the gettextutils._ + function to use lazy gettext functionality. This is useful if + your project is importing _ directly instead of using the + gettextutils.install() way of importing the _ function. + """ + global USE_LAZY + USE_LAZY = True def _(msg): - return t.ugettext(msg) + if USE_LAZY: + return Message(msg, domain='cinder') + else: + if six.PY3: + return _t.gettext(msg) + return _t.ugettext(msg) + + +def install(domain, lazy=False): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + + :param domain: the translation domain + :param lazy: indicates whether or not to install the lazy _() function. + The lazy _() introduces a way to do deferred translation + of messages by installing a _ that builds Message objects, + instead of strings, which can then be lazily translated into + any available locale. + """ + if lazy: + # NOTE(mrodden): Lazy gettext functionality. + # + # The following introduces a deferred way to do translations on + # messages in OpenStack. We override the standard _() function + # and % (format string) operation to build Message objects that can + # later be translated when we have more information. + def _lazy_gettext(msg): + """Create and return a Message object. + + Lazy gettext function for a given domain, it is a factory method + for a project/module to get a lazy gettext function for its own + translation domain (i.e. nova, glance, cinder, etc.) + + Message encapsulates a string so that we can translate + it later when needed. + """ + return Message(msg, domain=domain) + + from six import moves + moves.builtins.__dict__['_'] = _lazy_gettext + else: + localedir = '%s_LOCALEDIR' % domain.upper() + if six.PY3: + gettext.install(domain, + localedir=os.environ.get(localedir)) + else: + gettext.install(domain, + localedir=os.environ.get(localedir), + unicode=True) + + +class Message(six.text_type): + """A Message object is a unicode object that can be translated. + + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ + + def __new__(cls, msgid, msgtext=None, params=None, domain='cinder', *args): + """Create a new Message object. + + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) + if six.PY3: + translator = lang.gettext + else: + translator = lang.ugettext + + translated_message = translator(msgid) + return translated_message + + def __mod__(self, other): + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + unicode_mod = super(Message, self).__mod__(other) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=self._sanitize_mod_params(other), + domain=self.domain) + return modded + + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. + + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + params = self._trim_dictionary_parameters(other) + else: + params = self._copy_param(other) + return params + + def _trim_dictionary_parameters(self, dict_param): + """Return a dict that only has matching entries in the msgid.""" + # NOTE(luisg): Here we trim down the dictionary passed as parameters + # to avoid carrying a lot of unnecessary weight around in the message + # object, for example if someone passes in Message() % locals() but + # only some params are used, and additionally we prevent errors for + # non-deepcopyable objects by unicoding() them. + + # Look for %(param) keys in msgid; + # Skip %% and deal with the case where % is first character on the line + keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid) + + # If we don't find any %(param) keys but have a %s + if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid): + # Apparently the full dictionary is the parameter + params = self._copy_param(dict_param) + else: + params = {} + for key in keys: + params[key] = self._copy_param(dict_param[key]) + + return params + + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except TypeError: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) + + def __add__(self, other): + msg = _('Message objects do not support addition.') + raise TypeError(msg) + + def __radd__(self, other): + return self.__add__(other) + + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) + + +def get_available_languages(domain): + """Lists the available languages for the given translation domain. + + :param domain: the domain to get languages for + """ + if domain in _AVAILABLE_LANGUAGES: + return copy.copy(_AVAILABLE_LANGUAGES[domain]) + + localedir = '%s_LOCALEDIR' % domain.upper() + find = lambda x: gettext.find(domain, + localedir=os.environ.get(localedir), + languages=[x]) + + # NOTE(mrodden): en_US should always be available (and first in case + # order matters) since our in-line message strings are en_US + language_list = ['en_US'] + # NOTE(luisg): Babel <1.0 used a function called list(), which was + # renamed to locale_identifiers() in >=1.0, the requirements master list + # requires >=0.9.6, uncapped, so defensively work with both. We can remove + # this check when the master list updates to >=1.0, and update all projects + list_identifiers = (getattr(localedata, 'list', None) or + getattr(localedata, 'locale_identifiers')) + locale_identifiers = list_identifiers() + for i in locale_identifiers: + if find(i) is not None: + language_list.append(i) + _AVAILABLE_LANGUAGES[domain] = language_list + return copy.copy(language_list) + + +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. + + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. + + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if + it could not be translated + """ + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) + if isinstance(message, Message): + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj + + +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. + + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. + """ + + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler + + :param locale: locale to use for translating messages + :param target: logging.Handler object to forward + LogRecord objects to after translation + """ + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) + self.locale = locale + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) + + def emit(self, record): + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) + + self.target.emit(record) diff --git a/cinder/openstack/common/imageutils.py b/cinder/openstack/common/imageutils.py new file mode 100644 index 0000000000..312ffb5d1f --- /dev/null +++ b/cinder/openstack/common/imageutils.py @@ -0,0 +1,144 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods to deal with images. +""" + +import re + +from cinder.openstack.common.gettextutils import _ # noqa +from cinder.openstack.common import strutils + + +class QemuImgInfo(object): + BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:" + r"\s+(.*?)\)\s*$"), re.I) + TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$") + SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I) + + def __init__(self, cmd_output=None): + details = self._parse(cmd_output or '') + self.image = details.get('image') + self.backing_file = details.get('backing_file') + self.file_format = details.get('file_format') + self.virtual_size = details.get('virtual_size') + self.cluster_size = details.get('cluster_size') + self.disk_size = details.get('disk_size') + self.snapshots = details.get('snapshot_list', []) + self.encryption = details.get('encryption') + + def __str__(self): + lines = [ + 'image: %s' % self.image, + 'file_format: %s' % self.file_format, + 'virtual_size: %s' % self.virtual_size, + 'disk_size: %s' % self.disk_size, + 'cluster_size: %s' % self.cluster_size, + 'backing_file: %s' % self.backing_file, + ] + if self.snapshots: + lines.append("snapshots: %s" % self.snapshots) + return "\n".join(lines) + + def _canonicalize(self, field): + # Standardize on underscores/lc/no dash and no spaces + # since qemu seems to have mixed outputs here... and + # this format allows for better integration with python + # - ie for usage in kwargs and such... + field = field.lower().strip() + for c in (" ", "-"): + field = field.replace(c, '_') + return field + + def _extract_bytes(self, details): + # Replace it with the byte amount + real_size = self.SIZE_RE.search(details) + if real_size: + details = real_size.group(1) + try: + details = strutils.to_bytes(details) + except TypeError: + pass + return details + + def _extract_details(self, root_cmd, root_details, lines_after): + real_details = root_details + if root_cmd == 'backing_file': + # Replace it with the real backing file + backing_match = self.BACKING_FILE_RE.match(root_details) + if backing_match: + real_details = backing_match.group(2).strip() + elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']: + # Replace it with the byte amount (if we can convert it) + real_details = self._extract_bytes(root_details) + elif root_cmd == 'file_format': + real_details = real_details.strip().lower() + elif root_cmd == 'snapshot_list': + # Next line should be a header, starting with 'ID' + if not lines_after or not lines_after[0].startswith("ID"): + msg = _("Snapshot list encountered but no header found!") + raise ValueError(msg) + del lines_after[0] + real_details = [] + # This is the sprintf pattern we will try to match + # "%-10s%-20s%7s%20s%15s" + # ID TAG VM SIZE DATE VM CLOCK (current header) + while lines_after: + line = lines_after[0] + line_pieces = line.split() + if len(line_pieces) != 6: + break + # Check against this pattern in the final position + # "%02d:%02d:%02d.%03d" + date_pieces = line_pieces[5].split(":") + if len(date_pieces) != 3: + break + real_details.append({ + 'id': line_pieces[0], + 'tag': line_pieces[1], + 'vm_size': line_pieces[2], + 'date': line_pieces[3], + 'vm_clock': line_pieces[4] + " " + line_pieces[5], + }) + del lines_after[0] + return real_details + + def _parse(self, cmd_output): + # Analysis done of qemu-img.c to figure out what is going on here + # Find all points start with some chars and then a ':' then a newline + # and then handle the results of those 'top level' items in a separate + # function. + # + # TODO(harlowja): newer versions might have a json output format + # we should switch to that whenever possible. + # see: http://bit.ly/XLJXDX + contents = {} + lines = [x for x in cmd_output.splitlines() if x.strip()] + while lines: + line = lines.pop(0) + top_level = self.TOP_LEVEL_RE.match(line) + if top_level: + root = self._canonicalize(top_level.group(1)) + if not root: + continue + root_details = top_level.group(2).strip() + details = self._extract_details(root, root_details, lines) + contents[root] = details + return contents diff --git a/cinder/openstack/common/importutils.py b/cinder/openstack/common/importutils.py index f45372b4db..4fd9ae2bc2 100644 --- a/cinder/openstack/common/importutils.py +++ b/cinder/openstack/common/importutils.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -24,12 +22,12 @@ def import_class(import_str): - """Returns a class from a string including module and class""" + """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) - except (ValueError, AttributeError), exc: + except (ValueError, AttributeError): raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info()))) @@ -41,8 +39,9 @@ def import_object(import_str, *args, **kwargs): def import_object_ns(name_space, import_str, *args, **kwargs): - """ - Import a class and return an instance of it, first by trying + """Tries to import object from default namespace. + + Imports a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace. """ @@ -57,3 +56,11 @@ def import_module(import_str): """Import a module.""" __import__(import_str) return sys.modules[import_str] + + +def try_import(import_str, default=None): + """Try to import a module and if it fails return default.""" + try: + return import_module(import_str) + except ImportError: + return default diff --git a/cinder/openstack/common/iniparser.py b/cinder/openstack/common/iniparser.py deleted file mode 100644 index 241284449e..0000000000 --- a/cinder/openstack/common/iniparser.py +++ /dev/null @@ -1,130 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class ParseError(Exception): - def __init__(self, message, lineno, line): - self.msg = message - self.line = line - self.lineno = lineno - - def __str__(self): - return 'at line %d, %s: %r' % (self.lineno, self.msg, self.line) - - -class BaseParser(object): - lineno = 0 - parse_exc = ParseError - - def _assignment(self, key, value): - self.assignment(key, value) - return None, [] - - def _get_section(self, line): - if line[-1] != ']': - return self.error_no_section_end_bracket(line) - if len(line) <= 2: - return self.error_no_section_name(line) - - return line[1:-1] - - def _split_key_value(self, line): - colon = line.find(':') - equal = line.find('=') - if colon < 0 and equal < 0: - return self.error_invalid_assignment(line) - - if colon < 0 or (equal >= 0 and equal < colon): - key, value = line[:equal], line[equal + 1:] - else: - key, value = line[:colon], line[colon + 1:] - - value = value.strip() - if ((value and value[0] == value[-1]) and - (value[0] == "\"" or value[0] == "'")): - value = value[1:-1] - return key.strip(), [value] - - def parse(self, lineiter): - key = None - value = [] - - for line in lineiter: - self.lineno += 1 - - line = line.rstrip() - if not line: - # Blank line, ends multi-line values - if key: - key, value = self._assignment(key, value) - continue - elif line[0] in (' ', '\t'): - # Continuation of previous assignment - if key is None: - self.error_unexpected_continuation(line) - else: - value.append(line.lstrip()) - continue - - if key: - # Flush previous assignment, if any - key, value = self._assignment(key, value) - - if line[0] == '[': - # Section start - section = self._get_section(line) - if section: - self.new_section(section) - elif line[0] in '#;': - self.comment(line[1:].lstrip()) - else: - key, value = self._split_key_value(line) - if not key: - return self.error_empty_key(line) - - if key: - # Flush previous assignment, if any - self._assignment(key, value) - - def assignment(self, key, value): - """Called when a full assignment is parsed""" - raise NotImplementedError() - - def new_section(self, section): - """Called when a new section is started""" - raise NotImplementedError() - - def comment(self, comment): - """Called when a comment is parsed""" - pass - - def error_invalid_assignment(self, line): - raise self.parse_exc("No ':' or '=' found in assignment", - self.lineno, line) - - def error_empty_key(self, line): - raise self.parse_exc('Key cannot be empty', self.lineno, line) - - def error_unexpected_continuation(self, line): - raise self.parse_exc('Unexpected continuation line', - self.lineno, line) - - def error_no_section_end_bracket(self, line): - raise self.parse_exc('Invalid section (must end with ])', - self.lineno, line) - - def error_no_section_name(self, line): - raise self.parse_exc('Empty section name', self.lineno, line) diff --git a/cinder/openstack/common/jsonutils.py b/cinder/openstack/common/jsonutils.py index bab6a36514..205f710f09 100644 --- a/cinder/openstack/common/jsonutils.py +++ b/cinder/openstack/common/jsonutils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara @@ -34,15 +32,40 @@ import datetime +import functools import inspect import itertools import json -import xmlrpclib +try: + import xmlrpclib +except ImportError: + # NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3 + # however the function and object call signatures + # remained the same. This whole try/except block should + # be removed and replaced with a call to six.moves once + # six 1.4.2 is released. See http://bit.ly/1bqrVzu + import xmlrpc.client as xmlrpclib +import six + +from cinder.openstack.common import gettextutils +from cinder.openstack.common import importutils from cinder.openstack.common import timeutils +netaddr = importutils.try_import("netaddr") + +_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + +_simple_types = (six.string_types + six.integer_types + + (type(None), bool, float)) -def to_primitive(value, convert_instances=False, level=0): + +def to_primitive(value, convert_instances=False, convert_datetime=True, + level=0, max_depth=3): """Convert a complex object into primitives. Handy for JSON serialization. We can optionally handle instances, @@ -56,19 +79,32 @@ def to_primitive(value, convert_instances=False, level=0): Therefore, convert_instances=True is lossy ... be aware. """ - nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod, - inspect.isfunction, inspect.isgeneratorfunction, - inspect.isgenerator, inspect.istraceback, inspect.isframe, - inspect.iscode, inspect.isbuiltin, inspect.isroutine, - inspect.isabstract] - for test in nasty: - if test(value): - return unicode(value) - - # value of itertools.count doesn't get caught by inspects - # above and results in infinite loop when list(value) is called. + # handle obvious types first - order of basic types determined by running + # full tests on nova project, resulting in the following counts: + # 572754 + # 460353 + # 379632 + # 274610 + # 199918 + # 114200 + # 51817 + # 26164 + # 6491 + # 283 + # 19 + if isinstance(value, _simple_types): + return value + + if isinstance(value, datetime.datetime): + if convert_datetime: + return timeutils.strtime(value) + else: + return value + + # value of itertools.count doesn't get caught by nasty_type_tests + # and results in infinite loop when list(value) is called. if type(value) == itertools.count: - return unicode(value) + return six.text_type(value) # FIXME(vish): Workaround for LP bug 852095. Without this workaround, # tests that raise an exception in a mocked method that @@ -78,52 +114,50 @@ def to_primitive(value, convert_instances=False, level=0): if getattr(value, '__module__', None) == 'mox': return 'mock' - if level > 3: + if level > max_depth: return '?' # The try block may not be necessary after the class check above, # but just in case ... try: + recursive = functools.partial(to_primitive, + convert_instances=convert_instances, + convert_datetime=convert_datetime, + level=level, + max_depth=max_depth) + if isinstance(value, dict): + return dict((k, recursive(v)) for k, v in six.iteritems(value)) + elif isinstance(value, (list, tuple)): + return [recursive(lv) for lv in value] + # It's not clear why xmlrpclib created their own DateTime type, but # for our purposes, make it a datetime type which is explicitly # handled if isinstance(value, xmlrpclib.DateTime): value = datetime.datetime(*tuple(value.timetuple())[:6]) - if isinstance(value, (list, tuple)): - o = [] - for v in value: - o.append(to_primitive(v, convert_instances=convert_instances, - level=level)) - return o - elif isinstance(value, dict): - o = {} - for k, v in value.iteritems(): - o[k] = to_primitive(v, convert_instances=convert_instances, - level=level) - return o - elif isinstance(value, datetime.datetime): + if convert_datetime and isinstance(value, datetime.datetime): return timeutils.strtime(value) + elif isinstance(value, gettextutils.Message): + return value.data elif hasattr(value, 'iteritems'): - return to_primitive(dict(value.iteritems()), - convert_instances=convert_instances, - level=level + 1) + return recursive(dict(value.iteritems()), level=level + 1) elif hasattr(value, '__iter__'): - return to_primitive(list(value), - convert_instances=convert_instances, - level=level) + return recursive(list(value)) elif convert_instances and hasattr(value, '__dict__'): # Likely an instance of something. Watch for cycles. # Ignore class member vars. - return to_primitive(value.__dict__, - convert_instances=convert_instances, - level=level + 1) + return recursive(value.__dict__, level=level + 1) + elif netaddr and isinstance(value, netaddr.IPAddress): + return six.text_type(value) else: + if any(test(value) for test in _nasty_type_tests): + return six.text_type(value) return value - except TypeError, e: + except TypeError: # Class objects are tricky since they may define something like # __iter__ defined but it isn't callable as list(). - return unicode(value) + return six.text_type(value) def dumps(value, default=to_primitive, **kwargs): diff --git a/cinder/openstack/common/local.py b/cinder/openstack/common/local.py index 19d962732c..0819d5b97c 100644 --- a/cinder/openstack/common/local.py +++ b/cinder/openstack/common/local.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,23 +13,33 @@ # License for the specific language governing permissions and limitations # under the License. -"""Greenthread local storage of variables using weak references""" +"""Local storage of variables using weak references""" +import threading import weakref -from eventlet import corolocal - -class WeakLocal(corolocal.local): +class WeakLocal(threading.local): def __getattribute__(self, attr): - rval = corolocal.local.__getattribute__(self, attr) + rval = super(WeakLocal, self).__getattribute__(attr) if rval: + # NOTE(mikal): this bit is confusing. What is stored is a weak + # reference, not the value itself. We therefore need to lookup + # the weak reference and return the inner value here. rval = rval() return rval def __setattr__(self, attr, value): value = weakref.ref(value) - return corolocal.local.__setattr__(self, attr, value) + return super(WeakLocal, self).__setattr__(attr, value) +# NOTE(mikal): the name "store" should be deprecated in the future store = WeakLocal() + +# A "weak" store uses weak references and allows an object to fall out of scope +# when it falls out of scope in the code that uses the thread local storage. A +# "strong" store will hold a reference to the object so that it never falls out +# of scope. +weak_store = WeakLocal() +strong_store = threading.local() diff --git a/cinder/openstack/common/lockutils.py b/cinder/openstack/common/lockutils.py new file mode 100644 index 0000000000..005ad3cf9d --- /dev/null +++ b/cinder/openstack/common/lockutils.py @@ -0,0 +1,279 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import errno +import functools +import os +import shutil +import tempfile +import time +import weakref + +from eventlet import semaphore +from oslo.config import cfg + +from cinder.openstack.common import fileutils +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import local +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +util_opts = [ + cfg.BoolOpt('disable_process_locking', default=False, + help='Whether to disable inter-process locks'), + cfg.StrOpt('lock_path', + help=('Directory to use for lock files. Default to a ' + 'temp directory')) +] + + +CONF = cfg.CONF +CONF.register_opts(util_opts) + + +def set_defaults(lock_path): + cfg.set_defaults(util_opts, lock_path=lock_path) + + +class _InterProcessLock(object): + """Lock implementation which allows multiple locks, working around + issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does + not require any cleanup. Since the lock is always held on a file + descriptor rather than outside of the process, the lock gets dropped + automatically if the process crashes, even if __exit__ is not executed. + + There are no guarantees regarding usage by multiple green threads in a + single process here. This lock works only between processes. Exclusive + access between local threads should be achieved using the semaphores + in the @synchronized decorator. + + Note these locks are released when the descriptor is closed, so it's not + safe to close the file descriptor while another green thread holds the + lock. Just opening and closing the lock file can break synchronisation, + so lock files must be accessed only using this abstraction. + """ + + def __init__(self, name): + self.lockfile = None + self.fname = name + + def __enter__(self): + self.lockfile = open(self.fname, 'w') + + while True: + try: + # Using non-blocking locks since green threads are not + # patched to deal with blocking locking calls. + # Also upon reading the MSDN docs for locking(), it seems + # to have a laughable 10 attempts "blocking" mechanism. + self.trylock() + return self + except IOError as e: + if e.errno in (errno.EACCES, errno.EAGAIN): + # external locks synchronise things like iptables + # updates - give it some time to prevent busy spinning + time.sleep(0.01) + else: + raise + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + self.unlock() + self.lockfile.close() + except IOError: + LOG.exception(_("Could not release the acquired lock `%s`"), + self.fname) + + def trylock(self): + raise NotImplementedError() + + def unlock(self): + raise NotImplementedError() + + +class _WindowsLock(_InterProcessLock): + def trylock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) + + def unlock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) + + +class _PosixLock(_InterProcessLock): + def trylock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) + + def unlock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_UN) + + +if os.name == 'nt': + import msvcrt + InterProcessLock = _WindowsLock +else: + import fcntl + InterProcessLock = _PosixLock + +_semaphores = weakref.WeakValueDictionary() + + +def synchronized(name, lock_file_prefix, external=False, lock_path=None): + """Synchronization decorator. + + Decorating a method like so:: + + @synchronized('mylock') + def foo(self, *args): + ... + + ensures that only one thread will execute the foo method at a time. + + Different methods can share the same lock:: + + @synchronized('mylock') + def foo(self, *args): + ... + + @synchronized('mylock') + def bar(self, *args): + ... + + This way only one of either foo or bar can be executing at a time. + + :param lock_file_prefix: The lock_file_prefix argument is used to provide + lock files on disk with a meaningful prefix. The prefix should end with a + hyphen ('-') if specified. + + :param external: The external keyword argument denotes whether this lock + should work across multiple processes. This means that if two different + workers both run a a method decorated with @synchronized('mylock', + external=True), only one of them will execute at a time. + + :param lock_path: The lock_path keyword argument is used to specify a + special location for external lock files to live. If nothing is set, then + CONF.lock_path is used as a default. + """ + + def wrap(f): + @functools.wraps(f) + def inner(*args, **kwargs): + # NOTE(soren): If we ever go natively threaded, this will be racy. + # See http://stackoverflow.com/questions/5390569/dyn + # amically-allocating-and-destroying-mutexes + sem = _semaphores.get(name, semaphore.Semaphore()) + if name not in _semaphores: + # this check is not racy - we're already holding ref locally + # so GC won't remove the item and there was no IO switch + # (only valid in greenthreads) + _semaphores[name] = sem + + with sem: + LOG.debug(_('Got semaphore "%(lock)s" for method ' + '"%(method)s"...'), {'lock': name, + 'method': f.__name__}) + + # NOTE(mikal): I know this looks odd + if not hasattr(local.strong_store, 'locks_held'): + local.strong_store.locks_held = [] + local.strong_store.locks_held.append(name) + + try: + if external and not CONF.disable_process_locking: + LOG.debug(_('Attempting to grab file lock "%(lock)s" ' + 'for method "%(method)s"...'), + {'lock': name, 'method': f.__name__}) + cleanup_dir = False + + # We need a copy of lock_path because it is non-local + local_lock_path = lock_path + if not local_lock_path: + local_lock_path = CONF.lock_path + + if not local_lock_path: + cleanup_dir = True + local_lock_path = tempfile.mkdtemp() + + if not os.path.exists(local_lock_path): + fileutils.ensure_tree(local_lock_path) + + # NOTE(mikal): the lock name cannot contain directory + # separators + safe_name = name.replace(os.sep, '_') + lock_file_name = '%s%s' % (lock_file_prefix, safe_name) + lock_file_path = os.path.join(local_lock_path, + lock_file_name) + + try: + lock = InterProcessLock(lock_file_path) + with lock: + LOG.debug(_('Got file lock "%(lock)s" at ' + '%(path)s for method ' + '"%(method)s"...'), + {'lock': name, + 'path': lock_file_path, + 'method': f.__name__}) + retval = f(*args, **kwargs) + finally: + LOG.debug(_('Released file lock "%(lock)s" at ' + '%(path)s for method "%(method)s"...'), + {'lock': name, + 'path': lock_file_path, + 'method': f.__name__}) + # NOTE(vish): This removes the tempdir if we needed + # to create one. This is used to + # cleanup the locks left behind by unit + # tests. + if cleanup_dir: + shutil.rmtree(local_lock_path) + else: + retval = f(*args, **kwargs) + + finally: + local.strong_store.locks_held.remove(name) + + return retval + return inner + return wrap + + +def synchronized_with_prefix(lock_file_prefix): + """Partial object generator for the synchronization decorator. + + Redefine @synchronized in each project like so:: + + (in nova/utils.py) + from nova.openstack.common import lockutils + + synchronized = lockutils.synchronized_with_prefix('nova-') + + + (in nova/foo.py) + from nova import utils + + @utils.synchronized('mylock') + def bar(self, *args): + ... + + The lock_file_prefix argument is used to provide lock files on disk with a + meaningful prefix. The prefix should end with a hyphen ('-') if specified. + """ + + return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) diff --git a/cinder/openstack/common/log.py b/cinder/openstack/common/log.py index 963dcd043b..70d9c4c468 100644 --- a/cinder/openstack/common/log.py +++ b/cinder/openstack/common/log.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -29,53 +27,140 @@ """ -import cStringIO import inspect import itertools import logging import logging.config import logging.handlers import os -import stat +import re import sys import traceback -from cinder.openstack.common import cfg +from oslo.config import cfg +import six +from six import moves + from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils from cinder.openstack.common import jsonutils from cinder.openstack.common import local -from cinder.openstack.common import notifier +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS = [] +_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS.append(reg_ex) + + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config-append', + metavar='PATH', + deprecated_name='log-config', + help='The name of logging configuration file. It does not ' + 'disable existing loggers, but just appends specified ' + 'logging configuration to any other existing logging ' + 'options. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + cfg.StrOpt('log-format', + default=None, + metavar='FORMAT', + help='DEPRECATED. ' + 'A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'This option is deprecated. Please use ' + 'logging_context_format_string and ' + 'logging_default_format_string instead.'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If no default is set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The base directory used for relative ' + '--log-file paths'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='syslog facility to receive log lines') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error') +] + log_opts = [ cfg.StrOpt('logging_context_format_string', - default='%(asctime)s %(levelname)s %(name)s [%(request_id)s ' - '%(user_id)s %(project_id)s] %(instance)s' - '%(message)s', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user_identity)s] ' + '%(instance)s%(message)s', help='format string to use for log messages with context'), cfg.StrOpt('logging_default_format_string', - default='%(asctime)s %(process)d %(levelname)s %(name)s [-]' - ' %(instance)s%(message)s', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', help='format string to use for log messages without context'), cfg.StrOpt('logging_debug_format_suffix', default='%(funcName)s %(pathname)s:%(lineno)d', help='data to append to log format when level is DEBUG'), cfg.StrOpt('logging_exception_prefix', - default='%(asctime)s %(process)d TRACE %(name)s %(instance)s', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', help='prefix each line of exception output with this format'), cfg.ListOpt('default_log_levels', default=[ + 'amqp=WARN', 'amqplib=WARN', - 'sqlalchemy=WARN', 'boto=WARN', + 'qpid=WARN', + 'sqlalchemy=WARN', 'suds=INFO', - 'keystone=INFO', - 'eventlet.wsgi.server=WARN' + 'iso8601=WARN', ], help='list of logger=LEVEL pairs'), cfg.BoolOpt('publish_errors', default=False, help='publish error events'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='make deprecations fatal'), # NOTE(mikal): there are two options here because sometimes we are handed # a full instance (and could include more information), and other times we @@ -90,24 +175,9 @@ 'format it like this'), ] - -generic_log_opts = [ - cfg.StrOpt('logdir', - default=None, - help='Log output to a per-service log file in named directory'), - cfg.StrOpt('logfile', - default=None, - help='Log output to a named file'), - cfg.BoolOpt('use_stderr', - default=True, - help='Log output to standard error'), - cfg.StrOpt('logfile_mode', - default='0644', - help='Default file mode used when creating log files'), -] - - CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) CONF.register_opts(generic_log_opts) CONF.register_opts(log_opts) @@ -145,8 +215,8 @@ def _get_binary_name(): def _get_log_file_path(binary=None): - logfile = CONF.log_file or CONF.logfile - logdir = CONF.log_dir or CONF.logdir + logfile = CONF.log_file + logdir = CONF.log_dir if logfile and not logdir: return logfile @@ -158,8 +228,64 @@ def _get_log_file_path(binary=None): binary = binary or _get_binary_name() return '%s.log' % (os.path.join(logdir, binary),) + return None + + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + secret = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS: + message = re.sub(pattern, secret, message) + return message -class ContextAdapter(logging.LoggerAdapter): + +class BaseLoggerAdapter(logging.LoggerAdapter): + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + +class LazyAdapter(BaseLoggerAdapter): + def __init__(self, name='unknown', version='unknown'): + self._logger = None + self.extra = {} + self.name = name + self.version = version + + @property + def logger(self): + if not self._logger: + self._logger = getLogger(self.name, self.version) + return self._logger + + +class ContextAdapter(BaseLoggerAdapter): warn = logging.LoggerAdapter.warning def __init__(self, logger, project_name, version_string): @@ -167,10 +293,26 @@ def __init__(self, logger, project_name, version_string): self.project = project_name self.version = version_string - def audit(self, msg, *args, **kwargs): - self.log(logging.AUDIT, msg, *args, **kwargs) + @property + def handlers(self): + return self.logger.handlers + + def deprecated(self, msg, *args, **kwargs): + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + else: + self.warn(stdmsg, *args, **kwargs) def process(self, msg, kwargs): + # NOTE(mrodden): catch any Message/other object and + # coerce to unicode before they can get + # to the python logging and possibly + # cause string encoding trouble + if not isinstance(msg, six.string_types): + msg = six.text_type(msg) + if 'extra' not in kwargs: kwargs['extra'] = {} extra = kwargs['extra'] @@ -182,18 +324,20 @@ def process(self, msg, kwargs): extra.update(_dictify_context(context)) instance = kwargs.pop('instance', None) + instance_uuid = (extra.get('instance_uuid', None) or + kwargs.pop('instance_uuid', None)) instance_extra = '' if instance: instance_extra = CONF.instance_format % instance - else: - instance_uuid = kwargs.pop('instance_uuid', None) - if instance_uuid: - instance_extra = (CONF.instance_uuid_format - % {'uuid': instance_uuid}) - extra.update({'instance': instance_extra}) - - extra.update({"project": self.project}) - extra.update({"version": self.version}) + elif instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra['instance'] = instance_extra + + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version extra['extra'] = extra.copy() return msg, kwargs @@ -207,7 +351,7 @@ def __init__(self, fmt=None, datefmt=None): def formatException(self, ei, strip_newlines=True): lines = traceback.format_exception(*ei) if strip_newlines: - lines = [itertools.ifilter( + lines = [moves.filter( lambda x: x, line.rstrip().splitlines()) for line in lines] lines = list(itertools.chain(*lines)) @@ -244,38 +388,49 @@ def format(self, record): return jsonutils.dumps(message) -class PublishErrorsHandler(logging.Handler): - def emit(self, record): - if ('cinder.openstack.common.notifier.log_notifier' in - CONF.notification_driver): - return - notifier.api.notify(None, 'error.publisher', - 'error_notification', - notifier.api.ERROR, - dict(error=record.msg)) - - def _create_logging_excepthook(product_name): - def logging_excepthook(type, value, tb): + def logging_excepthook(exc_type, value, tb): extra = {} - if CONF.verbose: - extra['exc_info'] = (type, value, tb) + if CONF.verbose or CONF.debug: + extra['exc_info'] = (exc_type, value, tb) getLogger(product_name).critical(str(value), **extra) return logging_excepthook +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config_append): + try: + logging.config.fileConfig(log_config_append, + disable_existing_loggers=False) + except moves.configparser.Error as exc: + raise LogConfigError(log_config_append, str(exc)) + + def setup(product_name): """Setup logging.""" + if CONF.log_config_append: + _load_log_config(CONF.log_config_append) + else: + _setup_logging_from_conf() sys.excepthook = _create_logging_excepthook(product_name) - if CONF.log_config: - try: - logging.config.fileConfig(CONF.log_config) - except Exception: - traceback.print_exc() - raise - else: - _setup_logging_from_conf(product_name) + +def set_defaults(logging_context_format_string): + cfg.set_defaults(log_opts, + logging_context_format_string= + logging_context_format_string) def _find_facility_from_conf(): @@ -302,8 +457,8 @@ def _find_facility_from_conf(): return facility -def _setup_logging_from_conf(product_name): - log_root = getLogger(product_name).logger +def _setup_logging_from_conf(): + log_root = getLogger(None).logger for handler in log_root.handlers: log_root.removeHandler(handler) @@ -318,44 +473,46 @@ def _setup_logging_from_conf(product_name): filelog = logging.handlers.WatchedFileHandler(logpath) log_root.addHandler(filelog) - mode = int(CONF.logfile_mode, 8) - st = os.stat(logpath) - if st.st_mode != (stat.S_IFREG | mode): - os.chmod(logpath, mode) - if CONF.use_stderr: streamlog = ColorHandler() log_root.addHandler(streamlog) - elif not CONF.log_file: + elif not logpath: # pass sys.stdout as a positional argument # python2.6 calls the argument strm, in 2.7 it's stream streamlog = logging.StreamHandler(sys.stdout) log_root.addHandler(streamlog) if CONF.publish_errors: - log_root.addHandler(PublishErrorsHandler(logging.ERROR)) + handler = importutils.import_object( + "cinder.openstack.common.log_handler.PublishErrorsHandler", + logging.ERROR) + log_root.addHandler(handler) + datefmt = CONF.log_date_format for handler in log_root.handlers: - datefmt = CONF.log_date_format + # NOTE(alaski): CONF.log_format overrides everything currently. This + # should be deprecated in favor of context aware formatting. if CONF.log_format: handler.setFormatter(logging.Formatter(fmt=CONF.log_format, datefmt=datefmt)) - handler.setFormatter(LegacyFormatter(datefmt=datefmt)) + log_root.info('Deprecated: log_format is now deprecated and will ' + 'be removed in the next release') + else: + handler.setFormatter(ContextFormatter(datefmt=datefmt)) - if CONF.verbose or CONF.debug: + if CONF.debug: log_root.setLevel(logging.DEBUG) - else: + elif CONF.verbose: log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) - level = logging.NOTSET for pair in CONF.default_log_levels: mod, _sep, level_name = pair.partition('=') level = logging.getLevelName(level_name) logger = logging.getLogger(mod) logger.setLevel(level) - for handler in log_root.handlers: - logger.addHandler(handler) _loggers = {} @@ -368,6 +525,16 @@ def getLogger(name='unknown', version='unknown'): return _loggers[name] +def getLazyLogger(name='unknown', version='unknown'): + """Returns lazy logger. + + Creates a pass-through logger that does not create the real logger + until it is really needed and delegates all calls to the real logger + once it is created. + """ + return LazyAdapter(name, version) + + class WritableLogger(object): """A thin wrapper that responds to `write` and logs.""" @@ -379,7 +546,7 @@ def write(self, msg): self.logger.log(self.level, msg) -class LegacyFormatter(logging.Formatter): +class ContextFormatter(logging.Formatter): """A context.RequestContext aware formatter configured through flags. The flags used to set format strings are: logging_context_format_string @@ -407,7 +574,7 @@ def format(self, record): self._fmt = CONF.logging_default_format_string if (record.levelno == logging.DEBUG and - CONF.logging_debug_format_suffix): + CONF.logging_debug_format_suffix): self._fmt += " " + CONF.logging_debug_format_suffix # Cache this on the record, Logger will respect our formated copy @@ -420,7 +587,7 @@ def formatException(self, exc_info, record=None): if not record: return logging.Formatter.formatException(self, exc_info) - stringbuffer = cStringIO.StringIO() + stringbuffer = moves.StringIO() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, stringbuffer) lines = stringbuffer.getvalue().split('\n') @@ -450,3 +617,10 @@ class ColorHandler(logging.StreamHandler): def format(self, record): record.color = self.LEVEL_COLORS[record.levelno] return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/cinder/openstack/common/log_handler.py b/cinder/openstack/common/log_handler.py new file mode 100644 index 0000000000..ebeb9ab737 --- /dev/null +++ b/cinder/openstack/common/log_handler.py @@ -0,0 +1,29 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import logging + +from oslo.config import cfg + +from cinder.openstack.common import notifier + + +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + if ('cinder.openstack.common.notifier.log_notifier' in + cfg.CONF.notification_driver): + return + notifier.api.notify(None, 'error.publisher', + 'error_notification', + notifier.api.ERROR, + dict(error=record.msg)) diff --git a/cinder/openstack/common/loopingcall.py b/cinder/openstack/common/loopingcall.py new file mode 100644 index 0000000000..eabb605ffb --- /dev/null +++ b/cinder/openstack/common/loopingcall.py @@ -0,0 +1,145 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from eventlet import event +from eventlet import greenthread + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + +LOG = logging.getLogger(__name__) + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCall. + + The poll-function passed to LoopingCall can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCall.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCall.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCallBase(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + self.done = None + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class FixedIntervalLoopingCall(LoopingCallBase): + """A fixed interval looping call.""" + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = timeutils.utcnow() + self.f(*self.args, **self.kw) + end = timeutils.utcnow() + if not self._running: + break + delay = interval - timeutils.delta_seconds(start, end) + if delay <= 0: + LOG.warn(_('task run outlasted interval by %s sec') % + -delay) + greenthread.sleep(delay if delay > 0 else 0) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in fixed duration looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + +# TODO(mikal): this class name is deprecated in Havana and should be removed +# in the I release +LoopingCall = FixedIntervalLoopingCall + + +class DynamicLoopingCall(LoopingCallBase): + """A looping call which sleeps until the next known event. + + The function called should return how long to sleep for before being + called again. + """ + + def start(self, initial_delay=None, periodic_interval_max=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + idle = self.f(*self.args, **self.kw) + if not self._running: + break + + if periodic_interval_max is not None: + idle = min(idle, periodic_interval_max) + LOG.debug(_('Dynamic looping call sleeping for %.02f ' + 'seconds'), idle) + greenthread.sleep(idle) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in dynamic looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done diff --git a/cinder/openstack/common/network_utils.py b/cinder/openstack/common/network_utils.py index 69f6732163..5807b67b56 100644 --- a/cinder/openstack/common/network_utils.py +++ b/cinder/openstack/common/network_utils.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 OpenStack LLC. +# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -19,14 +17,12 @@ Network-related utilities and helper functions. """ -import logging - -LOG = logging.getLogger(__name__) +from cinder.openstack.common.py3kcompat import urlutils def parse_host_port(address, default_port=None): - """ - Interpret a string as a host:port pair. + """Interpret a string as a host:port pair. + An IPv6 address MUST be escaped if accompanied by a port, because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 means both [2001:db8:85a3::8a2e:370:7334] and @@ -66,3 +62,18 @@ def parse_host_port(address, default_port=None): port = default_port return (host, None if port is None else int(port)) + + +def urlsplit(url, scheme='', allow_fragments=True): + """Parse a URL using urlparse.urlsplit(), splitting query and fragments. + This function papers over Python issue9374 when needed. + + The parameters are the same as urlparse.urlsplit. + """ + scheme, netloc, path, query, fragment = urlutils.urlsplit( + url, scheme, allow_fragments) + if allow_fragments and '#' in path: + path, fragment = path.split('#', 1) + if '?' in path: + path, query = path.split('?', 1) + return urlutils.SplitResult(scheme, netloc, path, query, fragment) diff --git a/cinder/openstack/common/notifier/__init__.py b/cinder/openstack/common/notifier/__init__.py index 482d54e4fd..45c3b46ae9 100644 --- a/cinder/openstack/common/notifier/__init__.py +++ b/cinder/openstack/common/notifier/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/openstack/common/notifier/api.py b/cinder/openstack/common/notifier/api.py index 8acd5785cf..b9b7908954 100644 --- a/cinder/openstack/common/notifier/api.py +++ b/cinder/openstack/common/notifier/api.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,11 +13,13 @@ # License for the specific language governing permissions and limitations # under the License. +import socket import uuid -from cinder.openstack.common import cfg +from oslo.config import cfg + from cinder.openstack.common import context -from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common.gettextutils import _ # noqa from cinder.openstack.common import importutils from cinder.openstack.common import jsonutils from cinder.openstack.common import log as logging @@ -29,13 +31,12 @@ notifier_opts = [ cfg.MultiStrOpt('notification_driver', default=[], - deprecated_name='list_notifier_drivers', help='Driver or drivers to handle sending notifications'), cfg.StrOpt('default_notification_level', default='INFO', help='Default notification level for outgoing notifications'), cfg.StrOpt('default_publisher_id', - default='$host', + default=None, help='Default publisher_id for outgoing notifications'), ] @@ -56,7 +57,7 @@ class BadPriorityException(Exception): def notify_decorator(name, fn): - """ decorator for notify which is used from utils.monkey_patch() + """Decorator for notify which is used from utils.monkey_patch(). :param name: name of the function :param function: - object of the function @@ -74,7 +75,7 @@ def wrapped_func(*args, **kwarg): ctxt = context.get_context_from_function_and_args(fn, args, kwarg) notify(ctxt, - CONF.default_publisher_id, + CONF.default_publisher_id or socket.gethostname(), name, CONF.default_notification_level, body) @@ -84,7 +85,10 @@ def wrapped_func(*args, **kwarg): def publisher_id(service, host=None): if not host: - host = CONF.host + try: + host = CONF.host + except AttributeError: + host = CONF.default_publisher_id or socket.gethostname() return "%s.%s" % (service, host) @@ -137,10 +141,11 @@ def notify(context, publisher_id, event_type, priority, payload): for driver in _get_drivers(): try: driver.notify(context, msg) - except Exception, e: + except Exception as e: LOG.exception(_("Problem '%(e)s' attempting to " "send to notification system. " - "Payload=%(payload)s") % locals()) + "Payload=%(payload)s") + % dict(e=e, payload=payload)) _drivers = None @@ -152,29 +157,16 @@ def _get_drivers(): if _drivers is None: _drivers = {} for notification_driver in CONF.notification_driver: - add_driver(notification_driver) - + try: + driver = importutils.import_module(notification_driver) + _drivers[notification_driver] = driver + except ImportError: + LOG.exception(_("Failed to load notifier %s. " + "These notifications will not be sent.") % + notification_driver) return _drivers.values() -def add_driver(notification_driver): - """Add a notification driver at runtime.""" - # Make sure the driver list is initialized. - _get_drivers() - if isinstance(notification_driver, basestring): - # Load and add - try: - driver = importutils.import_module(notification_driver) - _drivers[notification_driver] = driver - except ImportError as e: - LOG.exception(_("Failed to load notifier %s. " - "These notifications will not be sent.") % - notification_driver) - else: - # Driver is already loaded; just add the object. - _drivers[notification_driver] = notification_driver - - def _reset_drivers(): """Used by unit tests to reset the drivers.""" global _drivers diff --git a/cinder/openstack/common/notifier/log_notifier.py b/cinder/openstack/common/notifier/log_notifier.py index ae9b45b285..11a453d9e6 100644 --- a/cinder/openstack/common/notifier/log_notifier.py +++ b/cinder/openstack/common/notifier/log_notifier.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,8 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo.config import cfg -from cinder.openstack.common import cfg from cinder.openstack.common import jsonutils from cinder.openstack.common import log as logging @@ -24,7 +24,9 @@ def notify(_context, message): """Notifies the recipient of the desired event given the model. - Log notifications using openstack's default logging system""" + + Log notifications using OpenStack's default logging system. + """ priority = message.get('priority', CONF.default_notification_level) diff --git a/cinder/openstack/common/notifier/no_op_notifier.py b/cinder/openstack/common/notifier/no_op_notifier.py index ee1ddbdcac..13d946e362 100644 --- a/cinder/openstack/common/notifier/no_op_notifier.py +++ b/cinder/openstack/common/notifier/no_op_notifier.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,5 +15,5 @@ def notify(_context, message): - """Notifies the recipient of the desired event given the model""" + """Notifies the recipient of the desired event given the model.""" pass diff --git a/cinder/openstack/common/notifier/rabbit_notifier.py b/cinder/openstack/common/notifier/rabbit_notifier.py index 89b69ca7ec..2ffe9524e9 100644 --- a/cinder/openstack/common/notifier/rabbit_notifier.py +++ b/cinder/openstack/common/notifier/rabbit_notifier.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2012 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -14,33 +14,16 @@ # under the License. -from cinder.openstack.common import cfg -from cinder.openstack.common import context as req_context from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import log as logging -from cinder.openstack.common import rpc +from cinder.openstack.common.notifier import rpc_notifier LOG = logging.getLogger(__name__) -notification_topic_opt = cfg.ListOpt( - 'notification_topics', default=['notifications', ], - help='AMQP topic used for openstack notifications') - -CONF = cfg.CONF -CONF.register_opt(notification_topic_opt) - def notify(context, message): - """Sends a notification to the RabbitMQ""" - if not context: - context = req_context.get_admin_context() - priority = message.get('priority', - CONF.default_notification_level) - priority = priority.lower() - for topic in CONF.notification_topics: - topic = '%s.%s' % (topic, priority) - try: - rpc.notify(context, topic, message) - except Exception, e: - LOG.exception(_("Could not send notification to %(topic)s. " - "Payload=%(message)s"), locals()) + """Deprecated in Grizzly. Please use rpc_notifier instead.""" + + LOG.deprecated(_("The rabbit_notifier is now deprecated." + " Please use rpc_notifier instead.")) + rpc_notifier.notify(context, message) diff --git a/cinder/openstack/common/notifier/rpc_notifier.py b/cinder/openstack/common/notifier/rpc_notifier.py new file mode 100644 index 0000000000..83c94ed5cc --- /dev/null +++ b/cinder/openstack/common/notifier/rpc_notifier.py @@ -0,0 +1,47 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder.openstack.common import context as req_context +from cinder.openstack.common.gettextutils import _ # noqa +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc + +LOG = logging.getLogger(__name__) + +notification_topic_opt = cfg.ListOpt( + 'notification_topics', default=['notifications', ], + help='AMQP topic used for OpenStack notifications') + +CONF = cfg.CONF +CONF.register_opt(notification_topic_opt) + + +def notify(context, message): + """Sends a notification via RPC.""" + if not context: + context = req_context.get_admin_context() + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + for topic in CONF.notification_topics: + topic = '%s.%s' % (topic, priority) + try: + rpc.notify(context, topic, message) + except Exception: + LOG.exception(_("Could not send notification to %(topic)s. " + "Payload=%(message)s"), + {"topic": topic, "message": message}) diff --git a/cinder/openstack/common/notifier/rpc_notifier2.py b/cinder/openstack/common/notifier/rpc_notifier2.py new file mode 100644 index 0000000000..e264374ce0 --- /dev/null +++ b/cinder/openstack/common/notifier/rpc_notifier2.py @@ -0,0 +1,53 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +'''messaging based notification driver, with message envelopes''' + +from oslo.config import cfg + +from cinder.openstack.common import context as req_context +from cinder.openstack.common.gettextutils import _ # noqa +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc + +LOG = logging.getLogger(__name__) + +notification_topic_opt = cfg.ListOpt( + 'topics', default=['notifications', ], + help='AMQP topic(s) used for OpenStack notifications') + +opt_group = cfg.OptGroup(name='rpc_notifier2', + title='Options for rpc_notifier2') + +CONF = cfg.CONF +CONF.register_group(opt_group) +CONF.register_opt(notification_topic_opt, opt_group) + + +def notify(context, message): + """Sends a notification via RPC.""" + if not context: + context = req_context.get_admin_context() + priority = message.get('priority', + CONF.default_notification_level) + priority = priority.lower() + for topic in CONF.rpc_notifier2.topics: + topic = '%s.%s' % (topic, priority) + try: + rpc.notify(context, topic, message, envelope=True) + except Exception: + LOG.exception(_("Could not send notification to %(topic)s. " + "Payload=%(message)s"), + {"topic": topic, "message": message}) diff --git a/cinder/openstack/common/notifier/test_notifier.py b/cinder/openstack/common/notifier/test_notifier.py index 5e348803dc..96c1746bf4 100644 --- a/cinder/openstack/common/notifier/test_notifier.py +++ b/cinder/openstack/common/notifier/test_notifier.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/openstack/common/periodic_task.py b/cinder/openstack/common/periodic_task.py new file mode 100644 index 0000000000..44faab560b --- /dev/null +++ b/cinder/openstack/common/periodic_task.py @@ -0,0 +1,190 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import time + +from oslo.config import cfg +import six + +from cinder.openstack.common.gettextutils import _ # noqa +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + + +periodic_opts = [ + cfg.BoolOpt('run_external_periodic_tasks', + default=True, + help=('Some periodic tasks can be run in a separate process. ' + 'Should we run them here?')), +] + +CONF = cfg.CONF +CONF.register_opts(periodic_opts) + +LOG = logging.getLogger(__name__) + +DEFAULT_INTERVAL = 60.0 + + +class InvalidPeriodicTaskArg(Exception): + message = _("Unexpected argument for periodic task creation: %(arg)s.") + + +def periodic_task(*args, **kwargs): + """Decorator to indicate that a method is a periodic task. + + This decorator can be used in two ways: + + 1. Without arguments '@periodic_task', this will be run on every cycle + of the periodic scheduler. + + 2. With arguments: + @periodic_task(spacing=N [, run_immediately=[True|False]]) + this will be run on approximately every N seconds. If this number is + negative the periodic task will be disabled. If the run_immediately + argument is provided and has a value of 'True', the first run of the + task will be shortly after task scheduler starts. If + run_immediately is omitted or set to 'False', the first time the + task runs will be approximately N seconds after the task scheduler + starts. + """ + def decorator(f): + # Test for old style invocation + if 'ticks_between_runs' in kwargs: + raise InvalidPeriodicTaskArg(arg='ticks_between_runs') + + # Control if run at all + f._periodic_task = True + f._periodic_external_ok = kwargs.pop('external_process_ok', False) + if f._periodic_external_ok and not CONF.run_external_periodic_tasks: + f._periodic_enabled = False + else: + f._periodic_enabled = kwargs.pop('enabled', True) + + # Control frequency + f._periodic_spacing = kwargs.pop('spacing', 0) + f._periodic_immediate = kwargs.pop('run_immediately', False) + if f._periodic_immediate: + f._periodic_last_run = None + else: + f._periodic_last_run = timeutils.utcnow() + return f + + # NOTE(sirp): The `if` is necessary to allow the decorator to be used with + # and without parens. + # + # In the 'with-parens' case (with kwargs present), this function needs to + # return a decorator function since the interpreter will invoke it like: + # + # periodic_task(*args, **kwargs)(f) + # + # In the 'without-parens' case, the original function will be passed + # in as the first argument, like: + # + # periodic_task(f) + if kwargs: + return decorator + else: + return decorator(args[0]) + + +class _PeriodicTasksMeta(type): + def __init__(cls, names, bases, dict_): + """Metaclass that allows us to collect decorated periodic tasks.""" + super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) + + # NOTE(sirp): if the attribute is not present then we must be the base + # class, so, go ahead an initialize it. If the attribute is present, + # then we're a subclass so make a copy of it so we don't step on our + # parent's toes. + try: + cls._periodic_tasks = cls._periodic_tasks[:] + except AttributeError: + cls._periodic_tasks = [] + + try: + cls._periodic_last_run = cls._periodic_last_run.copy() + except AttributeError: + cls._periodic_last_run = {} + + try: + cls._periodic_spacing = cls._periodic_spacing.copy() + except AttributeError: + cls._periodic_spacing = {} + + for value in cls.__dict__.values(): + if getattr(value, '_periodic_task', False): + task = value + name = task.__name__ + + if task._periodic_spacing < 0: + LOG.info(_('Skipping periodic task %(task)s because ' + 'its interval is negative'), + {'task': name}) + continue + if not task._periodic_enabled: + LOG.info(_('Skipping periodic task %(task)s because ' + 'it is disabled'), + {'task': name}) + continue + + # A periodic spacing of zero indicates that this task should + # be run every pass + if task._periodic_spacing == 0: + task._periodic_spacing = None + + cls._periodic_tasks.append((name, task)) + cls._periodic_spacing[name] = task._periodic_spacing + cls._periodic_last_run[name] = task._periodic_last_run + + +@six.add_metaclass(_PeriodicTasksMeta) +class PeriodicTasks(object): + + def run_periodic_tasks(self, context, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + idle_for = DEFAULT_INTERVAL + for task_name, task in self._periodic_tasks: + full_task_name = '.'.join([self.__class__.__name__, task_name]) + + now = timeutils.utcnow() + spacing = self._periodic_spacing[task_name] + last_run = self._periodic_last_run[task_name] + + # If a periodic task is _nearly_ due, then we'll run it early + if spacing is not None and last_run is not None: + due = last_run + datetime.timedelta(seconds=spacing) + if not timeutils.is_soon(due, 0.2): + idle_for = min(idle_for, timeutils.delta_seconds(now, due)) + continue + + if spacing is not None: + idle_for = min(idle_for, spacing) + + LOG.debug(_("Running periodic task %(full_task_name)s"), + {"full_task_name": full_task_name}) + self._periodic_last_run[task_name] = timeutils.utcnow() + + try: + task(self, context) + except Exception as e: + if raise_on_error: + raise + LOG.exception(_("Error during %(full_task_name)s: %(e)s"), + {"full_task_name": full_task_name, "e": e}) + time.sleep(0) + + return idle_for diff --git a/cinder/openstack/common/policy.py b/cinder/openstack/common/policy.py index 0ca48ce9e8..a0a954c12b 100644 --- a/cinder/openstack/common/policy.py +++ b/cinder/openstack/common/policy.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2011 OpenStack, LLC. +# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/openstack/common/processutils.py b/cinder/openstack/common/processutils.py new file mode 100644 index 0000000000..059f570b12 --- /dev/null +++ b/cinder/openstack/common/processutils.py @@ -0,0 +1,246 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import os +import random +import shlex +import signal + +from eventlet.green import subprocess +from eventlet import greenthread + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class InvalidArgumentError(Exception): + def __init__(self, message=None): + super(InvalidArgumentError, self).__init__(message) + + +class UnknownArgumentError(Exception): + def __init__(self, message=None): + super(UnknownArgumentError, self).__init__(message) + + +class ProcessExecutionError(Exception): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + self.exit_code = exit_code + self.stderr = stderr + self.stdout = stdout + self.cmd = cmd + self.description = description + + if description is None: + description = "Unexpected error while running command." + if exit_code is None: + exit_code = '-' + message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" + % (description, cmd, exit_code, stdout, stderr)) + super(ProcessExecutionError, self).__init__(message) + + +class NoRootWrapSpecified(Exception): + def __init__(self, message=None): + super(NoRootWrapSpecified, self).__init__(message) + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def execute(*cmd, **kwargs): + """Helper method to shell out and execute a command through subprocess. + + Allows optional retry. + + :param cmd: Passed to subprocess.Popen. + :type cmd: string + :param process_input: Send to opened process. + :type proces_input: string + :param check_exit_code: Single bool, int, or list of allowed exit + codes. Defaults to [0]. Raise + :class:`ProcessExecutionError` unless + program exits with one of these code. + :type check_exit_code: boolean, int, or [int] + :param delay_on_retry: True | False. Defaults to True. If set to True, + wait a short amount of time before retrying. + :type delay_on_retry: boolean + :param attempts: How many times to retry cmd. + :type attempts: int + :param run_as_root: True | False. Defaults to False. If set to True, + the command is prefixed by the command specified + in the root_helper kwarg. + :type run_as_root: boolean + :param root_helper: command to prefix to commands called with + run_as_root=True + :type root_helper: string + :param shell: whether or not there should be a shell used to + execute this command. Defaults to false. + :type shell: boolean + :returns: (stdout, stderr) from process execution + :raises: :class:`UnknownArgumentError` on + receiving unknown arguments + :raises: :class:`ProcessExecutionError` + """ + + process_input = kwargs.pop('process_input', None) + check_exit_code = kwargs.pop('check_exit_code', [0]) + ignore_exit_code = False + delay_on_retry = kwargs.pop('delay_on_retry', True) + attempts = kwargs.pop('attempts', 1) + run_as_root = kwargs.pop('run_as_root', False) + root_helper = kwargs.pop('root_helper', '') + shell = kwargs.pop('shell', False) + + if isinstance(check_exit_code, bool): + ignore_exit_code = not check_exit_code + check_exit_code = [0] + elif isinstance(check_exit_code, int): + check_exit_code = [check_exit_code] + + if kwargs: + raise UnknownArgumentError(_('Got unknown keyword args ' + 'to utils.execute: %r') % kwargs) + + if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: + if not root_helper: + raise NoRootWrapSpecified( + message=('Command requested root, but did not specify a root ' + 'helper.')) + cmd = shlex.split(root_helper) + list(cmd) + + cmd = map(str, cmd) + + while attempts > 0: + attempts -= 1 + try: + LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) + _PIPE = subprocess.PIPE # pylint: disable=E1101 + + if os.name == 'nt': + preexec_fn = None + close_fds = False + else: + preexec_fn = _subprocess_setup + close_fds = True + + obj = subprocess.Popen(cmd, + stdin=_PIPE, + stdout=_PIPE, + stderr=_PIPE, + close_fds=close_fds, + preexec_fn=preexec_fn, + shell=shell) + result = None + if process_input is not None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + obj.stdin.close() # pylint: disable=E1101 + _returncode = obj.returncode # pylint: disable=E1101 + if _returncode: + LOG.debug(_('Result was %s') % _returncode) + if not ignore_exit_code and _returncode not in check_exit_code: + (stdout, stderr) = result + raise ProcessExecutionError(exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) + return result + except ProcessExecutionError: + if not attempts: + raise + else: + LOG.debug(_('%r failed. Retrying.'), cmd) + if delay_on_retry: + greenthread.sleep(random.randint(20, 200) / 100.0) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) + + +def trycmd(*args, **kwargs): + """A wrapper around execute() to more easily handle warnings and errors. + + Returns an (out, err) tuple of strings containing the output of + the command's stdout and stderr. If 'err' is not empty then the + command can be considered to have failed. + + :discard_warnings True | False. Defaults to False. If set to True, + then for succeeding commands, stderr is cleared + + """ + discard_warnings = kwargs.pop('discard_warnings', False) + + try: + out, err = execute(*args, **kwargs) + failed = False + except ProcessExecutionError as exn: + out, err = '', str(exn) + failed = True + + if not failed and discard_warnings and err: + # Handle commands that output to stderr but otherwise succeed + err = '' + + return out, err + + +def ssh_execute(ssh, cmd, process_input=None, + addl_env=None, check_exit_code=True): + LOG.debug(_('Running cmd (SSH): %s'), cmd) + if addl_env: + raise InvalidArgumentError(_('Environment not supported over SSH')) + + if process_input: + # This is (probably) fixable if we need it... + raise InvalidArgumentError(_('process_input not supported over SSH')) + + stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) + channel = stdout_stream.channel + + # NOTE(justinsb): This seems suspicious... + # ...other SSH clients have buffering issues with this approach + stdout = stdout_stream.read() + stderr = stderr_stream.read() + stdin_stream.close() + + exit_status = channel.recv_exit_status() + + # exit_status == -1 if no exit code was returned + if exit_status != -1: + LOG.debug(_('Result was %s') % exit_status) + if check_exit_code and exit_status != 0: + raise ProcessExecutionError(exit_code=exit_status, + stdout=stdout, + stderr=stderr, + cmd=cmd) + + return (stdout, stderr) diff --git a/cinder/openstack/common/py3kcompat/__init__.py b/cinder/openstack/common/py3kcompat/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/openstack/common/py3kcompat/urlutils.py b/cinder/openstack/common/py3kcompat/urlutils.py new file mode 100644 index 0000000000..84e457a44f --- /dev/null +++ b/cinder/openstack/common/py3kcompat/urlutils.py @@ -0,0 +1,67 @@ +# +# Copyright 2013 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +Python2/Python3 compatibility layer for OpenStack +""" + +import six + +if six.PY3: + # python3 + import urllib.error + import urllib.parse + import urllib.request + + urlencode = urllib.parse.urlencode + urljoin = urllib.parse.urljoin + quote = urllib.parse.quote + quote_plus = urllib.parse.quote_plus + parse_qsl = urllib.parse.parse_qsl + unquote = urllib.parse.unquote + unquote_plus = urllib.parse.unquote_plus + urlparse = urllib.parse.urlparse + urlsplit = urllib.parse.urlsplit + urlunsplit = urllib.parse.urlunsplit + SplitResult = urllib.parse.SplitResult + + urlopen = urllib.request.urlopen + URLError = urllib.error.URLError + pathname2url = urllib.request.pathname2url +else: + # python2 + import urllib + import urllib2 + import urlparse + + urlencode = urllib.urlencode + quote = urllib.quote + quote_plus = urllib.quote_plus + unquote = urllib.unquote + unquote_plus = urllib.unquote_plus + + parse = urlparse + parse_qsl = parse.parse_qsl + urljoin = parse.urljoin + urlparse = parse.urlparse + urlsplit = parse.urlsplit + urlunsplit = parse.urlunsplit + SplitResult = parse.SplitResult + + urlopen = urllib2.urlopen + URLError = urllib2.URLError + pathname2url = urllib.pathname2url diff --git a/cinder/openstack/common/rpc/__init__.py b/cinder/openstack/common/rpc/__init__.py index acc2daafcd..6c25e4cf92 100644 --- a/cinder/openstack/common/rpc/__init__.py +++ b/cinder/openstack/common/rpc/__init__.py @@ -25,8 +25,17 @@ rpc.proxy """ -from cinder.openstack.common import cfg +import inspect +import logging + +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import importutils +from cinder.openstack.common import local + + +LOG = logging.getLogger(__name__) rpc_opts = [ @@ -47,26 +56,27 @@ help='Seconds to wait before a cast expires (TTL). ' 'Only supported by impl_zmq.'), cfg.ListOpt('allowed_rpc_exception_modules', - default=['cinder.openstack.common.exception', - 'nova.exception', + default=['nova.exception', 'cinder.exception', + 'exceptions', ], help='Modules of exceptions that are permitted to be recreated' 'upon receiving exception data from an rpc call.'), cfg.BoolOpt('fake_rabbit', default=False, help='If passed, use a fake RabbitMQ provider'), - # - # The following options are not registered here, but are expected to be - # present. The project using this library must register these options with - # the configuration so that project-specific defaults may be defined. - # - #cfg.StrOpt('control_exchange', - # default='nova', - # help='AMQP exchange to connect to if using RabbitMQ or Qpid'), + cfg.StrOpt('control_exchange', + default='openstack', + help='AMQP exchange to connect to if using RabbitMQ or Qpid'), ] -cfg.CONF.register_opts(rpc_opts) +CONF = cfg.CONF +CONF.register_opts(rpc_opts) + + +def set_defaults(control_exchange): + cfg.set_defaults(rpc_opts, + control_exchange=control_exchange) def create_connection(new=True): @@ -82,10 +92,27 @@ def create_connection(new=True): :returns: An instance of openstack.common.rpc.common.Connection """ - return _get_impl().create_connection(cfg.CONF, new=new) + return _get_impl().create_connection(CONF, new=new) + + +def _check_for_lock(): + if not CONF.debug: + return None + + if ((hasattr(local.strong_store, 'locks_held') + and local.strong_store.locks_held)): + stack = ' :: '.join([frame[3] for frame in inspect.stack()]) + LOG.warn(_('A RPC is being made while holding a lock. The locks ' + 'currently held are %(locks)s. This is probably a bug. ' + 'Please report it. Include the following: [%(stack)s].'), + {'locks': local.strong_store.locks_held, + 'stack': stack}) + return True + + return False -def call(context, topic, msg, timeout=None): +def call(context, topic, msg, timeout=None, check_for_lock=False): """Invoke a remote method that returns something. :param context: Information that identifies the user that has made this @@ -99,13 +126,17 @@ def call(context, topic, msg, timeout=None): "args" : dict_of_kwargs } :param timeout: int, number of seconds to use for a response timeout. If set, this overrides the rpc_response_timeout option. + :param check_for_lock: if True, a warning is emitted if a RPC call is made + with a lock held. :returns: A dict from the remote method. :raises: openstack.common.rpc.common.Timeout if a complete response is not received before the timeout is reached. """ - return _get_impl().call(cfg.CONF, context, topic, msg, timeout) + if check_for_lock: + _check_for_lock() + return _get_impl().call(CONF, context, topic, msg, timeout) def cast(context, topic, msg): @@ -123,7 +154,7 @@ def cast(context, topic, msg): :returns: None """ - return _get_impl().cast(cfg.CONF, context, topic, msg) + return _get_impl().cast(CONF, context, topic, msg) def fanout_cast(context, topic, msg): @@ -144,10 +175,10 @@ def fanout_cast(context, topic, msg): :returns: None """ - return _get_impl().fanout_cast(cfg.CONF, context, topic, msg) + return _get_impl().fanout_cast(CONF, context, topic, msg) -def multicall(context, topic, msg, timeout=None): +def multicall(context, topic, msg, timeout=None, check_for_lock=False): """Invoke a remote method and get back an iterator. In this case, the remote method will be returning multiple values in @@ -165,6 +196,8 @@ def multicall(context, topic, msg, timeout=None): "args" : dict_of_kwargs } :param timeout: int, number of seconds to use for a response timeout. If set, this overrides the rpc_response_timeout option. + :param check_for_lock: if True, a warning is emitted if a RPC call is made + with a lock held. :returns: An iterator. The iterator will yield a tuple (N, X) where N is an index that starts at 0 and increases by one for each value @@ -174,20 +207,23 @@ def multicall(context, topic, msg, timeout=None): :raises: openstack.common.rpc.common.Timeout if a complete response is not received before the timeout is reached. """ - return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout) + if check_for_lock: + _check_for_lock() + return _get_impl().multicall(CONF, context, topic, msg, timeout) -def notify(context, topic, msg): +def notify(context, topic, msg, envelope=False): """Send notification event. :param context: Information that identifies the user that has made this request. :param topic: The topic to send the notification to. :param msg: This is a dict of content of event. + :param envelope: Set to True to enable message envelope for notifications. :returns: None """ - return _get_impl().notify(cfg.CONF, context, topic, msg) + return _get_impl().notify(cfg.CONF, context, topic, msg, envelope) def cleanup(): @@ -215,7 +251,7 @@ def cast_to_server(context, server_params, topic, msg): :returns: None """ - return _get_impl().cast_to_server(cfg.CONF, context, server_params, topic, + return _get_impl().cast_to_server(CONF, context, server_params, topic, msg) @@ -231,7 +267,7 @@ def fanout_cast_to_server(context, server_params, topic, msg): :returns: None """ - return _get_impl().fanout_cast_to_server(cfg.CONF, context, server_params, + return _get_impl().fanout_cast_to_server(CONF, context, server_params, topic, msg) @@ -250,7 +286,7 @@ def queue_get_for(context, topic, host): Messages sent to the 'foo.' topic are sent to the nova-foo service on . """ - return '%s.%s' % (topic, host) + return '%s:%s' % (topic, host) if host else topic _RPCIMPL = None @@ -261,10 +297,10 @@ def _get_impl(): global _RPCIMPL if _RPCIMPL is None: try: - _RPCIMPL = importutils.import_module(cfg.CONF.rpc_backend) + _RPCIMPL = importutils.import_module(CONF.rpc_backend) except ImportError: # For backwards compatibility with older nova config. - impl = cfg.CONF.rpc_backend.replace('nova.rpc', - 'nova.openstack.common.rpc') + impl = CONF.rpc_backend.replace('nova.rpc', + 'nova.openstack.common.rpc') _RPCIMPL = importutils.import_module(impl) return _RPCIMPL diff --git a/cinder/openstack/common/rpc/amqp.py b/cinder/openstack/common/rpc/amqp.py index f31ab939fa..587d0f91e4 100644 --- a/cinder/openstack/common/rpc/amqp.py +++ b/cinder/openstack/common/rpc/amqp.py @@ -25,22 +25,46 @@ AMQP, but is deprecated and predates this code. """ +import collections import inspect -import logging import sys import uuid from eventlet import greenpool from eventlet import pools +from eventlet import queue from eventlet import semaphore +# TODO(pekowsk): Remove import cfg and below comment in Havana. +# This import should no longer be needed when the amqp_rpc_single_reply_queue +# option is removed. +from oslo.config import cfg -from cinder.openstack.common import cfg from cinder.openstack.common import excutils from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import local +from cinder.openstack.common import log as logging from cinder.openstack.common.rpc import common as rpc_common +amqp_opts = [ + # TODO(pekowski): Remove this option in Havana. + cfg.BoolOpt('amqp_rpc_single_reply_queue', + default=False, + help='Enable a fast single reply queue if using AMQP based ' + 'RPC like RabbitMQ or Qpid.'), + cfg.BoolOpt('amqp_durable_queues', + default=False, + deprecated_name='rabbit_durable_queues', + deprecated_group='DEFAULT', + help='Use durable queues in amqp.'), + cfg.BoolOpt('amqp_auto_delete', + default=False, + help='Auto-delete queues in amqp.'), +] + +cfg.CONF.register_opts(amqp_opts) + +UNIQUE_ID = '_unique_id' LOG = logging.getLogger(__name__) @@ -52,15 +76,26 @@ def __init__(self, conf, connection_cls, *args, **kwargs): kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size) kwargs.setdefault("order_as_stack", True) super(Pool, self).__init__(*args, **kwargs) + self.reply_proxy = None # TODO(comstud): Timeout connections not used in a while def create(self): - LOG.debug('Pool creating new connection') + LOG.debug(_('Pool creating new connection')) return self.connection_cls(self.conf) def empty(self): while self.free_items: self.get().close() + # Force a new connection pool to be created. + # Note that this was added due to failing unit test cases. The issue + # is the above "while loop" gets all the cached connections from the + # pool and closes them, but never returns them to the pool, a pool + # leak. The unit tests hang waiting for an item to be returned to the + # pool. The unit tests get here via the teatDown() method. In the run + # time code, it gets here via cleanup() and only appears in service.py + # just before doing a sys.exit(), so cleanup() only happens once and + # the leakage is not a problem. + self.connection_cls.pool = None _pool_create_sem = semaphore.Semaphore() @@ -138,6 +173,12 @@ def create_consumer(self, topic, proxy, fanout=False): def create_worker(self, topic, proxy, pool_name): self.connection.create_worker(topic, proxy, pool_name) + def join_consumer_pool(self, callback, pool_name, topic, exchange_name): + self.connection.join_consumer_pool(callback, + pool_name, + topic, + exchange_name) + def consume_in_thread(self): self.connection.consume_in_thread() @@ -149,8 +190,45 @@ def __getattr__(self, key): raise rpc_common.InvalidRPCConnectionReuse() -def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None, - ending=False): +class ReplyProxy(ConnectionContext): + """ Connection class for RPC replies / callbacks """ + def __init__(self, conf, connection_pool): + self._call_waiters = {} + self._num_call_waiters = 0 + self._num_call_waiters_wrn_threshhold = 10 + self._reply_q = 'reply_' + uuid.uuid4().hex + super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False) + self.declare_direct_consumer(self._reply_q, self._process_data) + self.consume_in_thread() + + def _process_data(self, message_data): + msg_id = message_data.pop('_msg_id', None) + waiter = self._call_waiters.get(msg_id) + if not waiter: + LOG.warn(_('no calling threads waiting for msg_id : %s' + ', message : %s') % (msg_id, message_data)) + else: + waiter.put(message_data) + + def add_call_waiter(self, waiter, msg_id): + self._num_call_waiters += 1 + if self._num_call_waiters > self._num_call_waiters_wrn_threshhold: + LOG.warn(_('Number of call waiters is greater than warning ' + 'threshhold: %d. There could be a MulticallProxyWaiter ' + 'leak.') % self._num_call_waiters_wrn_threshhold) + self._num_call_waiters_wrn_threshhold *= 2 + self._call_waiters[msg_id] = waiter + + def del_call_waiter(self, msg_id): + self._num_call_waiters -= 1 + del self._call_waiters[msg_id] + + def get_reply_q(self): + return self._reply_q + + +def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None, + failure=None, ending=False, log_failure=True): """Sends a reply or an error on the channel signified by msg_id. Failure should be a sys.exc_info() tuple. @@ -158,7 +236,8 @@ def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None, """ with ConnectionContext(conf, connection_pool) as conn: if failure: - failure = rpc_common.serialize_remote_exception(failure) + failure = rpc_common.serialize_remote_exception(failure, + log_failure) try: msg = {'result': reply, 'failure': failure} @@ -168,13 +247,22 @@ def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None, 'failure': failure} if ending: msg['ending'] = True - conn.direct_send(msg_id, msg) + _add_unique_id(msg) + # If a reply_q exists, add the msg_id to the reply and pass the + # reply_q to direct_send() to use it as the response queue. + # Otherwise use the msg_id for backward compatibilty. + if reply_q: + msg['_msg_id'] = msg_id + conn.direct_send(reply_q, rpc_common.serialize_msg(msg)) + else: + conn.direct_send(msg_id, rpc_common.serialize_msg(msg)) class RpcContext(rpc_common.CommonRpcContext): """Context that supports replying to a rpc.call""" def __init__(self, **kwargs): self.msg_id = kwargs.pop('msg_id', None) + self.reply_q = kwargs.pop('reply_q', None) self.conf = kwargs.pop('conf') super(RpcContext, self).__init__(**kwargs) @@ -182,13 +270,14 @@ def deepcopy(self): values = self.to_dict() values['conf'] = self.conf values['msg_id'] = self.msg_id + values['reply_q'] = self.reply_q return self.__class__(**values) def reply(self, reply=None, failure=None, ending=False, - connection_pool=None): + connection_pool=None, log_failure=True): if self.msg_id: - msg_reply(self.conf, self.msg_id, connection_pool, reply, failure, - ending) + msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool, + reply, failure, ending, log_failure) if ending: self.msg_id = None @@ -204,6 +293,7 @@ def unpack_context(conf, msg): value = msg.pop(key) context_dict[key[9:]] = value context_dict['msg_id'] = msg.pop('_msg_id', None) + context_dict['reply_q'] = msg.pop('_reply_q', None) context_dict['conf'] = conf ctx = RpcContext.from_dict(context_dict) rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict()) @@ -224,15 +314,86 @@ def pack_context(msg, context): msg.update(context_d) -class ProxyCallback(object): - """Calls methods on a proxy object based on method and args.""" +class _MsgIdCache(object): + """This class checks any duplicate messages.""" - def __init__(self, conf, proxy, connection_pool): - self.proxy = proxy + # NOTE: This value is considered can be a configuration item, but + # it is not necessary to change its value in most cases, + # so let this value as static for now. + DUP_MSG_CHECK_SIZE = 16 + + def __init__(self, **kwargs): + self.prev_msgids = collections.deque([], + maxlen=self.DUP_MSG_CHECK_SIZE) + + def check_duplicate_message(self, message_data): + """AMQP consumers may read same message twice when exceptions occur + before ack is returned. This method prevents doing it. + """ + if UNIQUE_ID in message_data: + msg_id = message_data[UNIQUE_ID] + if msg_id not in self.prev_msgids: + self.prev_msgids.append(msg_id) + else: + raise rpc_common.DuplicateMessageError(msg_id=msg_id) + + +def _add_unique_id(msg): + """Add unique_id for checking duplicate messages.""" + unique_id = uuid.uuid4().hex + msg.update({UNIQUE_ID: unique_id}) + LOG.debug(_('UNIQUE_ID is %s.') % (unique_id)) + + +class _ThreadPoolWithWait(object): + """Base class for a delayed invocation manager used by + the Connection class to start up green threads + to handle incoming messages. + """ + + def __init__(self, conf, connection_pool): self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size) self.connection_pool = connection_pool self.conf = conf + def wait(self): + """Wait for all callback threads to exit.""" + self.pool.waitall() + + +class CallbackWrapper(_ThreadPoolWithWait): + """Wraps a straight callback to allow it to be invoked in a green + thread. + """ + + def __init__(self, conf, callback, connection_pool): + """ + :param conf: cfg.CONF instance + :param callback: a callable (probably a function) + :param connection_pool: connection pool as returned by + get_connection_pool() + """ + super(CallbackWrapper, self).__init__( + conf=conf, + connection_pool=connection_pool, + ) + self.callback = callback + + def __call__(self, message_data): + self.pool.spawn_n(self.callback, message_data) + + +class ProxyCallback(_ThreadPoolWithWait): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, conf, proxy, connection_pool): + super(ProxyCallback, self).__init__( + conf=conf, + connection_pool=connection_pool, + ) + self.proxy = proxy + self.msg_id_cache = _MsgIdCache() + def __call__(self, message_data): """Consumer callback to call a method on a proxy object. @@ -251,18 +412,21 @@ def __call__(self, message_data): if hasattr(local.store, 'context'): del local.store.context rpc_common._safe_log(LOG.debug, _('received %s'), message_data) + self.msg_id_cache.check_duplicate_message(message_data) ctxt = unpack_context(self.conf, message_data) method = message_data.get('method') args = message_data.get('args', {}) - version = message_data.get('version', None) + version = message_data.get('version') + namespace = message_data.get('namespace') if not method: LOG.warn(_('no method for message: %s') % message_data) ctxt.reply(_('No method for message: %s') % message_data, connection_pool=self.connection_pool) return - self.pool.spawn_n(self._process_data, ctxt, version, method, args) + self.pool.spawn_n(self._process_data, ctxt, version, method, + namespace, args) - def _process_data(self, ctxt, version, method, args): + def _process_data(self, ctxt, version, method, namespace, args): """Process a message in a new thread. If the proxy object we have has a dispatch method @@ -273,7 +437,8 @@ def _process_data(self, ctxt, version, method, args): """ ctxt.update_store() try: - rval = self.proxy.dispatch(ctxt, version, method, **args) + rval = self.proxy.dispatch(ctxt, version, method, namespace, + **args) # Check if the result was a generator if inspect.isgenerator(rval): for x in rval: @@ -282,12 +447,80 @@ def _process_data(self, ctxt, version, method, args): ctxt.reply(rval, None, connection_pool=self.connection_pool) # This final None tells multicall that it is done. ctxt.reply(ending=True, connection_pool=self.connection_pool) - except Exception as e: - LOG.exception('Exception during message handling') - ctxt.reply(None, sys.exc_info(), - connection_pool=self.connection_pool) + except rpc_common.ClientException as e: + LOG.debug(_('Expected exception during message handling (%s)') % + e._exc_info[1]) + ctxt.reply(None, e._exc_info, + connection_pool=self.connection_pool, + log_failure=False) + except Exception: + # sys.exc_info() is deleted by LOG.exception(). + exc_info = sys.exc_info() + LOG.error(_('Exception during message handling'), + exc_info=exc_info) + ctxt.reply(None, exc_info, connection_pool=self.connection_pool) + + +class MulticallProxyWaiter(object): + def __init__(self, conf, msg_id, timeout, connection_pool): + self._msg_id = msg_id + self._timeout = timeout or conf.rpc_response_timeout + self._reply_proxy = connection_pool.reply_proxy + self._done = False + self._got_ending = False + self._conf = conf + self._dataqueue = queue.LightQueue() + # Add this caller to the reply proxy's call_waiters + self._reply_proxy.add_call_waiter(self, self._msg_id) + self.msg_id_cache = _MsgIdCache() + + def put(self, data): + self._dataqueue.put(data) + + def done(self): + if self._done: + return + self._done = True + # Remove this caller from reply proxy's call_waiters + self._reply_proxy.del_call_waiter(self._msg_id) + + def _process_data(self, data): + result = None + self.msg_id_cache.check_duplicate_message(data) + if data['failure']: + failure = data['failure'] + result = rpc_common.deserialize_remote_exception(self._conf, + failure) + elif data.get('ending', False): + self._got_ending = True + else: + result = data['result'] + return result + + def __iter__(self): + """Return a result until we get a reply with an 'ending" flag""" + if self._done: + raise StopIteration + while True: + try: + data = self._dataqueue.get(timeout=self._timeout) + result = self._process_data(data) + except queue.Empty: + self.done() + raise rpc_common.Timeout() + except Exception: + with excutils.save_and_reraise_exception(): + self.done() + if self._got_ending: + self.done() + raise StopIteration + if isinstance(result, Exception): + self.done() + raise result + yield result +#TODO(pekowski): Remove MulticallWaiter() in Havana. class MulticallWaiter(object): def __init__(self, conf, connection, timeout): self._connection = connection @@ -297,6 +530,7 @@ def __init__(self, conf, connection, timeout): self._done = False self._got_ending = False self._conf = conf + self.msg_id_cache = _MsgIdCache() def done(self): if self._done: @@ -308,6 +542,7 @@ def done(self): def __call__(self, data): """The consume() callback will call this. Store the result.""" + self.msg_id_cache.check_duplicate_message(data) if data['failure']: failure = data['failure'] self._result = rpc_common.deserialize_remote_exception(self._conf, @@ -343,22 +578,41 @@ def create_connection(conf, new, connection_pool): return ConnectionContext(conf, connection_pool, pooled=not new) +_reply_proxy_create_sem = semaphore.Semaphore() + + def multicall(conf, context, topic, msg, timeout, connection_pool): """Make a call that returns multiple times.""" + # TODO(pekowski): Remove all these comments in Havana. + # For amqp_rpc_single_reply_queue = False, # Can't use 'with' for multicall, as it returns an iterator # that will continue to use the connection. When it's done, # connection.close() will get called which will put it back into # the pool - LOG.debug(_('Making asynchronous call on %s ...'), topic) + # For amqp_rpc_single_reply_queue = True, + # The 'with' statement is mandatory for closing the connection + LOG.debug(_('Making synchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) + _add_unique_id(msg) pack_context(msg, context) - conn = ConnectionContext(conf, connection_pool) - wait_msg = MulticallWaiter(conf, conn, timeout) - conn.declare_direct_consumer(msg_id, wait_msg) - conn.topic_send(topic, msg) + # TODO(pekowski): Remove this flag and the code under the if clause + # in Havana. + if not conf.amqp_rpc_single_reply_queue: + conn = ConnectionContext(conf, connection_pool) + wait_msg = MulticallWaiter(conf, conn, timeout) + conn.declare_direct_consumer(msg_id, wait_msg) + conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) + else: + with _reply_proxy_create_sem: + if not connection_pool.reply_proxy: + connection_pool.reply_proxy = ReplyProxy(conf, connection_pool) + msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()}) + wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool) + with ConnectionContext(conf, connection_pool) as conn: + conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) return wait_msg @@ -375,42 +629,50 @@ def call(conf, context, topic, msg, timeout, connection_pool): def cast(conf, context, topic, msg, connection_pool): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) + _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: - conn.topic_send(topic, msg) + conn.topic_send(topic, rpc_common.serialize_msg(msg)) def fanout_cast(conf, context, topic, msg, connection_pool): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) + _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: - conn.fanout_send(topic, msg) + conn.fanout_send(topic, rpc_common.serialize_msg(msg)) def cast_to_server(conf, context, server_params, topic, msg, connection_pool): """Sends a message on a topic to a specific server.""" + _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool, pooled=False, server_params=server_params) as conn: - conn.topic_send(topic, msg) + conn.topic_send(topic, rpc_common.serialize_msg(msg)) def fanout_cast_to_server(conf, context, server_params, topic, msg, connection_pool): """Sends a message on a fanout exchange to a specific server.""" + _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool, pooled=False, server_params=server_params) as conn: - conn.fanout_send(topic, msg) + conn.fanout_send(topic, rpc_common.serialize_msg(msg)) -def notify(conf, context, topic, msg, connection_pool): +def notify(conf, context, topic, msg, connection_pool, envelope): """Sends a notification event on a topic.""" - event_type = msg.get('event_type') - LOG.debug(_('Sending %(event_type)s on %(topic)s'), locals()) + LOG.debug(_('Sending %(event_type)s on %(topic)s'), + dict(event_type=msg.get('event_type'), + topic=topic)) + _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: + if envelope: + msg = rpc_common.serialize_msg(msg) conn.notify_send(topic, msg) @@ -420,7 +682,4 @@ def cleanup(connection_pool): def get_control_exchange(conf): - try: - return conf.control_exchange - except cfg.NoSuchOptError: - return 'openstack' + return conf.control_exchange diff --git a/cinder/openstack/common/rpc/common.py b/cinder/openstack/common/rpc/common.py index b14db3cfe9..9f0552e5e9 100644 --- a/cinder/openstack/common/rpc/common.py +++ b/cinder/openstack/common/rpc/common.py @@ -18,18 +18,58 @@ # under the License. import copy -import logging +import sys import traceback +from oslo.config import cfg + from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import importutils from cinder.openstack.common import jsonutils from cinder.openstack.common import local +from cinder.openstack.common import log as logging +CONF = cfg.CONF LOG = logging.getLogger(__name__) +'''RPC Envelope Version. + +This version number applies to the top level structure of messages sent out. +It does *not* apply to the message payload, which must be versioned +independently. For example, when using rpc APIs, a version number is applied +for changes to the API being exposed over rpc. This version number is handled +in the rpc proxy and dispatcher modules. + +This version number applies to the message envelope that is used in the +serialization done inside the rpc layer. See serialize_msg() and +deserialize_msg(). + +The current message format (version 2.0) is very simple. It is: + + { + 'oslo.version': , + 'oslo.message': + } + +Message format version '1.0' is just considered to be the messages we sent +without a message envelope. + +So, the current message envelope just includes the envelope version. It may +eventually contain additional information, such as a signature for the message +payload. + +We will JSON encode the application message payload. The message envelope, +which includes the JSON encoded application message body, will be passed down +to the messaging libraries as a dict. +''' +_RPC_ENVELOPE_VERSION = '2.0' + +_VERSION_KEY = 'oslo.version' +_MESSAGE_KEY = 'oslo.message' + + class RPCException(Exception): message = _("An unknown RPC related exception occurred.") @@ -40,7 +80,7 @@ def __init__(self, message=None, **kwargs): try: message = self.message % kwargs - except Exception as e: + except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) @@ -78,7 +118,29 @@ class Timeout(RPCException): This exception is raised if the rpc_response_timeout is reached while waiting for a response from the remote side. """ - message = _("Timeout while waiting on RPC response.") + message = _('Timeout while waiting on RPC response - ' + 'topic: "%(topic)s", RPC method: "%(method)s" ' + 'info: "%(info)s"') + + def __init__(self, info=None, topic=None, method=None): + """ + :param info: Extra info to convey to the user + :param topic: The topic that the rpc call was sent to + :param rpc_method_name: The name of the rpc method being + called + """ + self.info = info + self.topic = topic + self.method = method + super(Timeout, self).__init__( + None, + info=info or _(''), + topic=topic or _(''), + method=method or _('')) + + +class DuplicateMessageError(RPCException): + message = _("Found duplicate message(%(msg_id)s). Skipping it.") class InvalidRPCConnectionReuse(RPCException): @@ -90,6 +152,11 @@ class UnsupportedRpcVersion(RPCException): "this endpoint.") +class UnsupportedRpcEnvelopeVersion(RPCException): + message = _("Specified RPC envelope version, %(version)s, " + "not supported by this endpoint.") + + class Connection(object): """A connection, returned by rpc.create_connection(). @@ -148,6 +215,28 @@ def create_worker(self, topic, proxy, pool_name): """ raise NotImplementedError() + def join_consumer_pool(self, callback, pool_name, topic, exchange_name): + """Register as a member of a group of consumers for a given topic from + the specified exchange. + + Exactly one member of a given pool will receive each message. + + A message will be delivered to multiple pools, if more than + one is created. + + :param callback: Callable to be invoked for each message. + :type callback: callable accepting one argument + :param pool_name: The name of the consumer pool. + :type pool_name: str + :param topic: The routing topic for desired messages. + :type topic: str + :param exchange_name: The name of the message exchange where + the client should attach. Defaults to + the configured exchange. + :type exchange_name: str + """ + raise NotImplementedError() + def consume_in_thread(self): """Spawn a thread to handle incoming messages. @@ -164,8 +253,12 @@ def consume_in_thread(self): def _safe_log(log_func, msg, msg_data): """Sanitizes the msg_data field before logging.""" - SANITIZE = {'set_admin_password': ('new_pass',), - 'run_instance': ('admin_password',), } + SANITIZE = {'set_admin_password': [('args', 'new_pass')], + 'run_instance': [('args', 'admin_password')], + 'route_message': [('args', 'message', 'args', 'method_info', + 'method_kwargs', 'password'), + ('args', 'message', 'args', 'method_info', + 'method_kwargs', 'admin_password')]} has_method = 'method' in msg_data and msg_data['method'] in SANITIZE has_context_token = '_context_auth_token' in msg_data @@ -177,14 +270,16 @@ def _safe_log(log_func, msg, msg_data): msg_data = copy.deepcopy(msg_data) if has_method: - method = msg_data['method'] - if method in SANITIZE: - args_to_sanitize = SANITIZE[method] - for arg in args_to_sanitize: - try: - msg_data['args'][arg] = "" - except KeyError: - pass + for arg in SANITIZE.get(msg_data['method'], []): + try: + d = msg_data + for elem in arg[:-1]: + d = d[elem] + d[arg[-1]] = '' + except KeyError, e: + LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'), + {'item': arg, + 'err': e}) if has_context_token: msg_data['_context_auth_token'] = '' @@ -195,7 +290,7 @@ def _safe_log(log_func, msg, msg_data): return log_func(msg, msg_data) -def serialize_remote_exception(failure_info): +def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. @@ -203,8 +298,9 @@ def serialize_remote_exception(failure_info): """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] - LOG.error(_("Returning exception %s to caller"), unicode(failure)) - LOG.error(tb) + if log_failure: + LOG.error(_("Returning exception %s to caller"), unicode(failure)) + LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): @@ -234,7 +330,7 @@ def deserialize_remote_exception(conf, data): # NOTE(ameade): We DO NOT want to allow just any module to be imported, in # order to prevent arbitrary code execution. - if not module in conf.allowed_rpc_exception_modules: + if module not in conf.allowed_rpc_exception_modules: return RemoteError(name, failure.get('message'), trace) try: @@ -243,7 +339,7 @@ def deserialize_remote_exception(conf, data): if not issubclass(klass, Exception): raise TypeError("Can only deserialize Exceptions") - failure = klass(**failure.get('kwargs', {})) + failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) except (AttributeError, TypeError, ImportError): return RemoteError(name, failure.get('message'), trace) @@ -258,7 +354,7 @@ def deserialize_remote_exception(conf, data): # we cannot necessarily change an exception message so we must override # the __str__ method. failure.__class__ = new_ex_type - except TypeError as e: + except TypeError: # NOTE(ameade): If a core exception then just add the traceback to the # first exception argument. failure.args = (message,) + failure.args[1:] @@ -309,3 +405,104 @@ def elevated(self, read_deleted=None, overwrite=False): context.values['read_deleted'] = read_deleted return context + + +class ClientException(Exception): + """This encapsulates some actual exception that is expected to be + hit by an RPC proxy object. Merely instantiating it records the + current exception information, which will be passed back to the + RPC client without exceptional logging.""" + def __init__(self): + self._exc_info = sys.exc_info() + + +def catch_client_exception(exceptions, func, *args, **kwargs): + try: + return func(*args, **kwargs) + except Exception, e: + if type(e) in exceptions: + raise ClientException() + else: + raise + + +def client_exceptions(*exceptions): + """Decorator for manager methods that raise expected exceptions. + Marking a Manager method with this decorator allows the declaration + of expected exceptions that the RPC layer should not consider fatal, + and not log as if they were generated in a real error scenario. Note + that this will cause listed exceptions to be wrapped in a + ClientException, which is used internally by the RPC layer.""" + def outer(func): + def inner(*args, **kwargs): + return catch_client_exception(exceptions, func, *args, **kwargs) + return inner + return outer + + +def version_is_compatible(imp_version, version): + """Determine whether versions are compatible. + + :param imp_version: The version implemented + :param version: The version requested by an incoming message. + """ + version_parts = version.split('.') + imp_version_parts = imp_version.split('.') + if int(version_parts[0]) != int(imp_version_parts[0]): # Major + return False + if int(version_parts[1]) > int(imp_version_parts[1]): # Minor + return False + return True + + +def serialize_msg(raw_msg): + # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more + # information about this format. + msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, + _MESSAGE_KEY: jsonutils.dumps(raw_msg)} + + return msg + + +def deserialize_msg(msg): + # NOTE(russellb): Hang on to your hats, this road is about to + # get a little bumpy. + # + # Robustness Principle: + # "Be strict in what you send, liberal in what you accept." + # + # At this point we have to do a bit of guessing about what it + # is we just received. Here is the set of possibilities: + # + # 1) We received a dict. This could be 2 things: + # + # a) Inspect it to see if it looks like a standard message envelope. + # If so, great! + # + # b) If it doesn't look like a standard message envelope, it could either + # be a notification, or a message from before we added a message + # envelope (referred to as version 1.0). + # Just return the message as-is. + # + # 2) It's any other non-dict type. Just return it and hope for the best. + # This case covers return values from rpc.call() from before message + # envelopes were used. (messages to call a method were always a dict) + + if not isinstance(msg, dict): + # See #2 above. + return msg + + base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY) + if not all(map(lambda key: key in msg, base_envelope_keys)): + # See #1.b above. + return msg + + # At this point we think we have the message envelope + # format we were expecting. (#1.a above) + + if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]): + raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY]) + + raw_msg = jsonutils.loads(msg[_MESSAGE_KEY]) + + return raw_msg diff --git a/cinder/openstack/common/rpc/dispatcher.py b/cinder/openstack/common/rpc/dispatcher.py index 9f8a9085ee..85195d4a74 100644 --- a/cinder/openstack/common/rpc/dispatcher.py +++ b/cinder/openstack/common/rpc/dispatcher.py @@ -41,8 +41,8 @@ there can be both versioned and unversioned APIs implemented in the same code base. - -EXAMPLES: +EXAMPLES +======== Nova was the first project to use versioned rpc APIs. Consider the compute rpc API as an example. The client side is in nova/compute/rpcapi.py and the server @@ -50,12 +50,13 @@ Example 1) Adding a new method. +------------------------------- Adding a new method is a backwards compatible change. It should be added to nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should have a specific version specified to indicate the minimum API version that must -be implemented for the method to be supported. For example: +be implemented for the method to be supported. For example:: def get_host_uptime(self, ctxt, host): topic = _compute_topic(self.topic, ctxt, host, None) @@ -67,10 +68,11 @@ def get_host_uptime(self, ctxt, host): Example 2) Adding a new parameter. +---------------------------------- Adding a new parameter to an rpc method can be made backwards compatible. The RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped. -The implementation of the method must not expect the parameter to be present. +The implementation of the method must not expect the parameter to be present.:: def some_remote_method(self, arg1, arg2, newarg=None): # The code needs to deal with newarg=None for cases @@ -101,28 +103,16 @@ def __init__(self, callbacks): self.callbacks = callbacks super(RpcDispatcher, self).__init__() - @staticmethod - def _is_compatible(mversion, version): - """Determine whether versions are compatible. - - :param mversion: The API version implemented by a callback. - :param version: The API version requested by an incoming message. - """ - version_parts = version.split('.') - mversion_parts = mversion.split('.') - if int(version_parts[0]) != int(mversion_parts[0]): # Major - return False - if int(version_parts[1]) > int(mversion_parts[1]): # Minor - return False - return True - - def dispatch(self, ctxt, version, method, **kwargs): + def dispatch(self, ctxt, version, method, namespace, **kwargs): """Dispatch a message based on a requested version. :param ctxt: The request context :param version: The requested API version from the incoming message :param method: The method requested to be called by the incoming message. + :param namespace: The namespace for the requested method. If None, + the dispatcher will look for a method on a callback + object with no namespace set. :param kwargs: A dict of keyword arguments to be passed to the method. :returns: Whatever is returned by the underlying method that gets @@ -133,12 +123,25 @@ def dispatch(self, ctxt, version, method, **kwargs): had_compatible = False for proxyobj in self.callbacks: - if hasattr(proxyobj, 'RPC_API_VERSION'): + # Check for namespace compatibility + try: + cb_namespace = proxyobj.RPC_API_NAMESPACE + except AttributeError: + cb_namespace = None + + if namespace != cb_namespace: + continue + + # Check for version compatibility + try: rpc_api_version = proxyobj.RPC_API_VERSION - else: + except AttributeError: rpc_api_version = '1.0' - is_compatible = self._is_compatible(rpc_api_version, version) + + is_compatible = rpc_common.version_is_compatible(rpc_api_version, + version) had_compatible = had_compatible or is_compatible + if not hasattr(proxyobj, method): continue if is_compatible: diff --git a/cinder/openstack/common/rpc/impl_fake.py b/cinder/openstack/common/rpc/impl_fake.py index a47b5b7e44..ec7200a7b6 100644 --- a/cinder/openstack/common/rpc/impl_fake.py +++ b/cinder/openstack/common/rpc/impl_fake.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2011 OpenStack LLC +# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -18,11 +18,15 @@ """ import inspect +# NOTE(russellb): We specifically want to use json, not our own jsonutils. +# jsonutils has some extra logic to automatically convert objects to primitive +# types so that they can be serialized. We want to catch all cases where +# non-primitive types make it into this code and treat it as an error. +import json import time import eventlet -from cinder.openstack.common import jsonutils from cinder.openstack.common.rpc import common as rpc_common CONSUMERS = {} @@ -53,13 +57,14 @@ def __init__(self, topic, proxy): self.topic = topic self.proxy = proxy - def call(self, context, version, method, args, timeout): + def call(self, context, version, method, namespace, args, timeout): done = eventlet.event.Event() def _inner(): ctxt = RpcContext.from_dict(context.to_dict()) try: - rval = self.proxy.dispatch(context, version, method, **args) + rval = self.proxy.dispatch(context, version, method, + namespace, **args) res = [] # Caller might have called ctxt.reply() manually for (reply, failure) in ctxt._response: @@ -75,6 +80,8 @@ def _inner(): else: res.append(rval) done.send(res) + except rpc_common.ClientException as e: + done.send_exception(e._exc_info[1]) except Exception as e: done.send_exception(e) @@ -121,7 +128,7 @@ def create_connection(conf, new=True): def check_serialize(msg): """Make sure a message intended for rpc can be serialized.""" - jsonutils.dumps(msg) + json.dumps(msg) def multicall(conf, context, topic, msg, timeout=None): @@ -134,13 +141,15 @@ def multicall(conf, context, topic, msg, timeout=None): return args = msg.get('args', {}) version = msg.get('version', None) + namespace = msg.get('namespace', None) try: consumer = CONSUMERS[topic][0] except (KeyError, IndexError): return iter([None]) else: - return consumer.call(context, version, method, args, timeout) + return consumer.call(context, version, method, namespace, args, + timeout) def call(conf, context, topic, msg, timeout=None): @@ -154,13 +163,14 @@ def call(conf, context, topic, msg, timeout=None): def cast(conf, context, topic, msg): + check_serialize(msg) try: call(conf, context, topic, msg) except Exception: pass -def notify(conf, context, topic, msg): +def notify(conf, context, topic, msg, envelope): check_serialize(msg) @@ -176,9 +186,10 @@ def fanout_cast(conf, context, topic, msg): return args = msg.get('args', {}) version = msg.get('version', None) + namespace = msg.get('namespace', None) for consumer in CONSUMERS.get(topic, []): try: - consumer.call(context, version, method, args, None) + consumer.call(context, version, method, namespace, args, None) except Exception: pass diff --git a/cinder/openstack/common/rpc/impl_kombu.py b/cinder/openstack/common/rpc/impl_kombu.py index 01f67defd5..424a61c0a1 100644 --- a/cinder/openstack/common/rpc/impl_kombu.py +++ b/cinder/openstack/common/rpc/impl_kombu.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2011 OpenStack LLC +# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -28,12 +28,12 @@ import kombu.connection import kombu.entity import kombu.messaging +from oslo.config import cfg -from cinder.openstack.common import cfg from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import network_utils from cinder.openstack.common.rpc import amqp as rpc_amqp from cinder.openstack.common.rpc import common as rpc_common -from cinder.openstack.common import network_utils kombu_opts = [ cfg.StrOpt('kombu_ssl_version', @@ -66,7 +66,8 @@ help='the RabbitMQ userid'), cfg.StrOpt('rabbit_password', default='guest', - help='the RabbitMQ password'), + help='the RabbitMQ password', + secret=True), cfg.StrOpt('rabbit_virtual_host', default='/', help='the RabbitMQ virtual host'), @@ -81,9 +82,6 @@ default=0, help='maximum retries with trying to connect to RabbitMQ ' '(the default of 0 implies an infinite retry count)'), - cfg.BoolOpt('rabbit_durable_queues', - default=False, - help='use durable queues in RabbitMQ'), cfg.BoolOpt('rabbit_ha_queues', default=False, help='use H/A queues in RabbitMQ (x-ha-policy: all).' @@ -162,10 +160,12 @@ def consume(self, *args, **kwargs): def _callback(raw_message): message = self.channel.message_to_python(raw_message) try: - callback(message.payload) - message.ack() + msg = rpc_common.deserialize_msg(message.payload) + callback(msg) except Exception: LOG.exception(_("Failed to process message... skipping it.")) + finally: + message.ack() self.queue.consume(*args, callback=_callback, **options) @@ -195,8 +195,9 @@ def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): """ # Default options options = {'durable': False, + 'queue_arguments': _get_queue_arguments(conf), 'auto_delete': True, - 'exclusive': True} + 'exclusive': False} options.update(kwargs) exchange = kombu.entity.Exchange(name=msg_id, type='direct', @@ -229,9 +230,9 @@ def __init__(self, conf, channel, topic, callback, tag, name=None, Other kombu options may be passed as keyword arguments """ # Default options - options = {'durable': conf.rabbit_durable_queues, + options = {'durable': conf.amqp_durable_queues, 'queue_arguments': _get_queue_arguments(conf), - 'auto_delete': False, + 'auto_delete': conf.amqp_auto_delete, 'exclusive': False} options.update(kwargs) exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) @@ -267,8 +268,9 @@ def __init__(self, conf, channel, topic, callback, tag, **kwargs): # Default options options = {'durable': False, + 'queue_arguments': _get_queue_arguments(conf), 'auto_delete': True, - 'exclusive': True} + 'exclusive': False} options.update(kwargs) exchange = kombu.entity.Exchange(name=exchange_name, type='fanout', durable=options['durable'], @@ -300,9 +302,15 @@ def reconnect(self, channel): channel=channel, routing_key=self.routing_key) - def send(self, msg): + def send(self, msg, timeout=None): """Send a message""" - self.producer.publish(msg) + if timeout: + # + # AMQP TTL is in milliseconds when set in the header. + # + self.producer.publish(msg, headers={'ttl': (timeout * 1000)}) + else: + self.producer.publish(msg) class DirectPublisher(Publisher): @@ -315,7 +323,7 @@ def __init__(self, conf, channel, msg_id, **kwargs): options = {'durable': False, 'auto_delete': True, - 'exclusive': True} + 'exclusive': False} options.update(kwargs) super(DirectPublisher, self).__init__(channel, msg_id, msg_id, type='direct', **options) @@ -328,8 +336,8 @@ def __init__(self, conf, channel, topic, **kwargs): Kombu options may be passed as keyword args to override defaults """ - options = {'durable': conf.rabbit_durable_queues, - 'auto_delete': False, + options = {'durable': conf.amqp_durable_queues, + 'auto_delete': conf.amqp_auto_delete, 'exclusive': False} options.update(kwargs) exchange_name = rpc_amqp.get_control_exchange(conf) @@ -349,7 +357,7 @@ def __init__(self, conf, channel, topic, **kwargs): """ options = {'durable': False, 'auto_delete': True, - 'exclusive': True} + 'exclusive': False} options.update(kwargs) super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic, None, type='fanout', **options) @@ -359,7 +367,7 @@ class NotifyPublisher(TopicPublisher): """Publisher class for 'notify'""" def __init__(self, conf, channel, topic, **kwargs): - self.durable = kwargs.pop('durable', conf.rabbit_durable_queues) + self.durable = kwargs.pop('durable', conf.amqp_durable_queues) self.queue_arguments = _get_queue_arguments(conf) super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) @@ -386,6 +394,7 @@ class Connection(object): def __init__(self, conf, server_params=None): self.consumers = [] self.consumer_thread = None + self.proxy_callbacks = [] self.conf = conf self.max_retries = self.conf.rabbit_max_retries # Try forever? @@ -408,18 +417,18 @@ def __init__(self, conf, server_params=None): hostname, port = network_utils.parse_host_port( adr, default_port=self.conf.rabbit_port) - params = {} + params = { + 'hostname': hostname, + 'port': port, + 'userid': self.conf.rabbit_userid, + 'password': self.conf.rabbit_password, + 'virtual_host': self.conf.rabbit_virtual_host, + } for sp_key, value in server_params.iteritems(): p_key = server_params_to_kombu_params.get(sp_key, sp_key) params[p_key] = value - params.setdefault('hostname', hostname) - params.setdefault('port', port) - params.setdefault('userid', self.conf.rabbit_userid) - params.setdefault('password', self.conf.rabbit_password) - params.setdefault('virtual_host', self.conf.rabbit_virtual_host) - if self.conf.fake_rabbit: params['transport'] = 'memory' if self.conf.rabbit_use_ssl: @@ -468,7 +477,7 @@ def _connect(self, params): LOG.info(_("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % params) try: - self.connection.close() + self.connection.release() except self.connection_errors: pass # Setting this in case the next statement fails, though @@ -572,12 +581,14 @@ def get_channel(self): def close(self): """Close/release this connection""" self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() self.connection.release() self.connection = None def reset(self): """Reset a connection so it can be used again""" self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() self.channel.close() self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 @@ -610,8 +621,8 @@ def iterconsume(self, limit=None, timeout=None): def _error_callback(exc): if isinstance(exc, socket.timeout): - LOG.exception(_('Timed out waiting for RPC response: %s') % - str(exc)) + LOG.debug(_('Timed out waiting for RPC response: %s') % + str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % @@ -643,7 +654,12 @@ def cancel_consumer_thread(self): pass self.consumer_thread = None - def publisher_send(self, cls, topic, msg, **kwargs): + def wait_on_proxy_callbacks(self): + """Wait for all proxy callback threads to exit.""" + for proxy_cb in self.proxy_callbacks: + proxy_cb.wait() + + def publisher_send(self, cls, topic, msg, timeout=None, **kwargs): """Send to a publisher based on the publisher class""" def _error_callback(exc): @@ -653,7 +669,7 @@ def _error_callback(exc): def _publish(): publisher = cls(self.conf, self.channel, topic, **kwargs) - publisher.send(msg) + publisher.send(msg, timeout) self.ensure(_error_callback, _publish) @@ -681,9 +697,9 @@ def direct_send(self, msg_id, msg): """Send a 'direct' message""" self.publisher_send(DirectPublisher, msg_id, msg) - def topic_send(self, topic, msg): + def topic_send(self, topic, msg, timeout=None): """Send a 'topic' message""" - self.publisher_send(TopicPublisher, topic, msg) + self.publisher_send(TopicPublisher, topic, msg, timeout) def fanout_send(self, topic, msg): """Send a 'fanout' message""" @@ -691,7 +707,7 @@ def fanout_send(self, topic, msg): def notify_send(self, topic, msg, **kwargs): """Send a notify message on a topic""" - self.publisher_send(NotifyPublisher, topic, msg, **kwargs) + self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs) def consume(self, limit=None): """Consume from all queues/consumers""" @@ -718,6 +734,7 @@ def create_consumer(self, topic, proxy, fanout=False): proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) if fanout: self.declare_fanout_consumer(topic, proxy_cb) @@ -729,8 +746,33 @@ def create_worker(self, topic, proxy, pool_name): proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) self.declare_topic_consumer(topic, proxy_cb, pool_name) + def join_consumer_pool(self, callback, pool_name, topic, + exchange_name=None): + """Register as a member of a group of consumers for a given topic from + the specified exchange. + + Exactly one member of a given pool will receive each message. + + A message will be delivered to multiple pools, if more than + one is created. + """ + callback_wrapper = rpc_amqp.CallbackWrapper( + conf=self.conf, + callback=callback, + connection_pool=rpc_amqp.get_connection_pool(self.conf, + Connection), + ) + self.proxy_callbacks.append(callback_wrapper) + self.declare_topic_consumer( + queue_name=pool_name, + topic=topic, + exchange_name=exchange_name, + callback=callback_wrapper, + ) + def create_connection(conf, new=True): """Create a connection""" @@ -776,16 +818,17 @@ def cast_to_server(conf, context, server_params, topic, msg): def fanout_cast_to_server(conf, context, server_params, topic, msg): """Sends a message on a fanout exchange to a specific server.""" - return rpc_amqp.cast_to_server( + return rpc_amqp.fanout_cast_to_server( conf, context, server_params, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) -def notify(conf, context, topic, msg): +def notify(conf, context, topic, msg, envelope): """Sends a notification event on a topic.""" return rpc_amqp.notify( conf, context, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection)) + rpc_amqp.get_connection_pool(conf, Connection), + envelope) def cleanup(): diff --git a/cinder/openstack/common/rpc/impl_qpid.py b/cinder/openstack/common/rpc/impl_qpid.py index 01f8a22c23..f6e38dd11f 100644 --- a/cinder/openstack/common/rpc/impl_qpid.py +++ b/cinder/openstack/common/rpc/impl_qpid.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2011 OpenStack LLC +# Copyright 2011 OpenStack Foundation # Copyright 2011 - 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,57 +17,44 @@ import functools import itertools -import logging import time -import uuid import eventlet import greenlet -import qpid.messaging -import qpid.messaging.exceptions +from oslo.config import cfg -from cinder.openstack.common import cfg from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging from cinder.openstack.common.rpc import amqp as rpc_amqp from cinder.openstack.common.rpc import common as rpc_common +qpid_messaging = importutils.try_import("qpid.messaging") +qpid_exceptions = importutils.try_import("qpid.messaging.exceptions") + LOG = logging.getLogger(__name__) qpid_opts = [ cfg.StrOpt('qpid_hostname', default='localhost', help='Qpid broker hostname'), - cfg.StrOpt('qpid_port', - default='5672', + cfg.IntOpt('qpid_port', + default=5672, help='Qpid broker port'), + cfg.ListOpt('qpid_hosts', + default=['$qpid_hostname:$qpid_port'], + help='Qpid HA cluster host:port pairs'), cfg.StrOpt('qpid_username', default='', help='Username for qpid connection'), cfg.StrOpt('qpid_password', default='', - help='Password for qpid connection'), + help='Password for qpid connection', + secret=True), cfg.StrOpt('qpid_sasl_mechanisms', default='', help='Space separated list of SASL mechanisms to use for auth'), - cfg.BoolOpt('qpid_reconnect', - default=True, - help='Automatically reconnect'), - cfg.IntOpt('qpid_reconnect_timeout', - default=0, - help='Reconnection timeout in seconds'), - cfg.IntOpt('qpid_reconnect_limit', - default=0, - help='Max reconnections before giving up'), - cfg.IntOpt('qpid_reconnect_interval_min', - default=0, - help='Minimum seconds between reconnection attempts'), - cfg.IntOpt('qpid_reconnect_interval_max', - default=0, - help='Maximum seconds between reconnection attempts'), - cfg.IntOpt('qpid_reconnect_interval', - default=0, - help='Equivalent to setting max and min to the same value'), cfg.IntOpt('qpid_heartbeat', default=60, help='Seconds between connection keepalive heartbeats'), @@ -77,15 +64,33 @@ cfg.BoolOpt('qpid_tcp_nodelay', default=True, help='Disable Nagle algorithm'), + # NOTE(russellb) If any additional versions are added (beyond 1 and 2), + # this file could probably use some additional refactoring so that the + # differences between each version are split into different classes. + cfg.IntOpt('qpid_topology_version', + default=1, + help="The qpid topology version to use. Version 1 is what " + "was originally used by impl_qpid. Version 2 includes " + "some backwards-incompatible changes that allow broker " + "federation to work. Users should update to version 2 " + "when they are able to take everything down, as it " + "requires a clean break."), ] cfg.CONF.register_opts(qpid_opts) +def raise_invalid_topology_version(conf): + msg = (_("Invalid value for qpid_topology_version: %d") % + conf.qpid_topology_version) + LOG.error(msg) + raise Exception(msg) + + class ConsumerBase(object): """Consumer base class.""" - def __init__(self, session, callback, node_name, node_opts, + def __init__(self, conf, session, callback, node_name, node_opts, link_name, link_opts): """Declare a queue on an amqp session. @@ -103,27 +108,41 @@ def __init__(self, session, callback, node_name, node_opts, self.receiver = None self.session = None - addr_opts = { - "create": "always", - "node": { - "type": "topic", - "x-declare": { + if conf.qpid_topology_version == 1: + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": True, + "auto-delete": True, + }, + }, + "link": { "durable": True, - "auto-delete": True, + "x-declare": { + "durable": False, + "auto-delete": True, + "exclusive": False, + }, }, - }, - "link": { - "name": link_name, - "durable": True, - "x-declare": { - "durable": False, - "auto-delete": True, - "exclusive": False, + } + addr_opts["node"]["x-declare"].update(node_opts) + elif conf.qpid_topology_version == 2: + addr_opts = { + "link": { + "x-declare": { + "auto-delete": True, + "exclusive": False, + }, }, - }, - } - addr_opts["node"]["x-declare"].update(node_opts) + } + else: + raise_invalid_topology_version() + addr_opts["link"]["x-declare"].update(link_opts) + if link_name: + addr_opts["link"]["name"] = link_name self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) @@ -139,7 +158,8 @@ def consume(self): """Fetch the message and pass it to the callback object""" message = self.receiver.fetch() try: - self.callback(message.content) + msg = rpc_common.deserialize_msg(message.content) + self.callback(msg) except Exception: LOG.exception(_("Failed to process message... skipping it.")) finally: @@ -160,11 +180,26 @@ def __init__(self, conf, session, msg_id, callback): 'callback' is the callback to call when messages are received """ - super(DirectConsumer, self).__init__(session, callback, - "%s/%s" % (msg_id, msg_id), - {"type": "direct"}, - msg_id, - {"exclusive": True}) + link_opts = { + "auto-delete": conf.amqp_auto_delete, + "exclusive": True, + "durable": conf.amqp_durable_queues, + } + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (msg_id, msg_id) + node_opts = {"type": "direct"} + link_name = msg_id + elif conf.qpid_topology_version == 2: + node_name = "amq.direct/%s" % msg_id + node_opts = {} + link_name = None + else: + raise_invalid_topology_version() + + super(DirectConsumer, self).__init__(conf, session, callback, + node_name, node_opts, link_name, + link_opts) class TopicConsumer(ConsumerBase): @@ -182,9 +217,20 @@ def __init__(self, conf, session, topic, callback, name=None, """ exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) - super(TopicConsumer, self).__init__(session, callback, - "%s/%s" % (exchange_name, topic), - {}, name or topic, {}) + link_opts = { + "auto-delete": conf.amqp_auto_delete, + "durable": conf.amqp_durable_queues, + } + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (exchange_name, topic) + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) + else: + raise_invalid_topology_version() + + super(TopicConsumer, self).__init__(conf, session, callback, node_name, + {}, name or topic, link_opts) class FanoutConsumer(ConsumerBase): @@ -198,40 +244,53 @@ def __init__(self, conf, session, topic, callback): 'callback' is the callback to call when messages are received """ - super(FanoutConsumer, self).__init__( - session, callback, - "%s_fanout" % topic, - {"durable": False, "type": "fanout"}, - "%s_fanout_%s" % (topic, uuid.uuid4().hex), - {"exclusive": True}) + link_opts = {"exclusive": True} + + if conf.qpid_topology_version == 1: + node_name = "%s_fanout" % topic + node_opts = {"durable": False, "type": "fanout"} + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/fanout/%s" % topic + node_opts = {} + else: + raise_invalid_topology_version() + + super(FanoutConsumer, self).__init__(conf, session, callback, + node_name, node_opts, None, + link_opts) class Publisher(object): """Base Publisher class""" - def __init__(self, session, node_name, node_opts=None): + def __init__(self, conf, session, node_name, node_opts=None): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.sender = None self.session = session - addr_opts = { - "create": "always", - "node": { - "type": "topic", - "x-declare": { - "durable": False, - # auto-delete isn't implemented for exchanges in qpid, - # but put in here anyway - "auto-delete": True, + if conf.qpid_topology_version == 1: + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": False, + # auto-delete isn't implemented for exchanges in qpid, + # but put in here anyway + "auto-delete": True, + }, }, - }, - } - if node_opts: - addr_opts["node"]["x-declare"].update(node_opts) + } + if node_opts: + addr_opts["node"]["x-declare"].update(node_opts) - self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) + elif conf.qpid_topology_version == 2: + self.address = node_name + else: + raise_invalid_topology_version() self.reconnect(session) @@ -248,8 +307,17 @@ class DirectPublisher(Publisher): """Publisher class for 'direct'""" def __init__(self, conf, session, msg_id): """Init a 'direct' publisher.""" - super(DirectPublisher, self).__init__(session, msg_id, - {"type": "Direct"}) + if conf.qpid_topology_version == 1: + node_name = msg_id + node_opts = {"type": "direct"} + elif conf.qpid_topology_version == 2: + node_name = "amq.direct/%s" % msg_id + node_opts = {} + else: + raise_invalid_topology_version() + + super(DirectPublisher, self).__init__(conf, session, node_name, + node_opts) class TopicPublisher(Publisher): @@ -258,8 +326,15 @@ def __init__(self, conf, session, topic): """init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) - super(TopicPublisher, self).__init__(session, - "%s/%s" % (exchange_name, topic)) + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (exchange_name, topic) + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) + else: + raise_invalid_topology_version() + + super(TopicPublisher, self).__init__(conf, session, node_name) class FanoutPublisher(Publisher): @@ -267,9 +342,18 @@ class FanoutPublisher(Publisher): def __init__(self, conf, session, topic): """init a 'fanout' publisher. """ - super(FanoutPublisher, self).__init__( - session, - "%s_fanout" % topic, {"type": "fanout"}) + + if conf.qpid_topology_version == 1: + node_name = "%s_fanout" % topic + node_opts = {"type": "fanout"} + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/fanout/%s" % topic + node_opts = {} + else: + raise_invalid_topology_version() + + super(FanoutPublisher, self).__init__(conf, session, node_name, + node_opts) class NotifyPublisher(Publisher): @@ -278,9 +362,17 @@ def __init__(self, conf, session, topic): """init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) - super(NotifyPublisher, self).__init__(session, - "%s/%s" % (exchange_name, topic), - {"durable": True}) + node_opts = {"durable": True} + + if conf.qpid_topology_version == 1: + node_name = "%s/%s" % (exchange_name, topic) + elif conf.qpid_topology_version == 2: + node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) + else: + raise_invalid_topology_version() + + super(NotifyPublisher, self).__init__(conf, session, node_name, + node_opts) class Connection(object): @@ -289,55 +381,51 @@ class Connection(object): pool = None def __init__(self, conf, server_params=None): + if not qpid_messaging: + raise ImportError("Failed to import qpid.messaging") + self.session = None self.consumers = {} self.consumer_thread = None + self.proxy_callbacks = [] self.conf = conf - if server_params is None: - server_params = {} - - default_params = dict(hostname=self.conf.qpid_hostname, - port=self.conf.qpid_port, - username=self.conf.qpid_username, - password=self.conf.qpid_password) + if server_params and 'hostname' in server_params: + # NOTE(russellb) This enables support for cast_to_server. + server_params['qpid_hosts'] = [ + '%s:%d' % (server_params['hostname'], + server_params.get('port', 5672)) + ] + + params = { + 'qpid_hosts': self.conf.qpid_hosts, + 'username': self.conf.qpid_username, + 'password': self.conf.qpid_password, + } + params.update(server_params or {}) - params = server_params - for key in default_params.keys(): - params.setdefault(key, default_params[key]) + self.brokers = params['qpid_hosts'] + self.username = params['username'] + self.password = params['password'] + self.connection_create(self.brokers[0]) + self.reconnect() - self.broker = params['hostname'] + ":" + str(params['port']) + def connection_create(self, broker): # Create the connection - this does not open the connection - self.connection = qpid.messaging.Connection(self.broker) + self.connection = qpid_messaging.Connection(broker) # Check if flags are set and if so set them for the connection # before we call open - self.connection.username = params['username'] - self.connection.password = params['password'] + self.connection.username = self.username + self.connection.password = self.password + self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms - self.connection.reconnect = self.conf.qpid_reconnect - if self.conf.qpid_reconnect_timeout: - self.connection.reconnect_timeout = ( - self.conf.qpid_reconnect_timeout) - if self.conf.qpid_reconnect_limit: - self.connection.reconnect_limit = self.conf.qpid_reconnect_limit - if self.conf.qpid_reconnect_interval_max: - self.connection.reconnect_interval_max = ( - self.conf.qpid_reconnect_interval_max) - if self.conf.qpid_reconnect_interval_min: - self.connection.reconnect_interval_min = ( - self.conf.qpid_reconnect_interval_min) - if self.conf.qpid_reconnect_interval: - self.connection.reconnect_interval = ( - self.conf.qpid_reconnect_interval) + # Reconnection is done by self.reconnect() + self.connection.reconnect = False self.connection.heartbeat = self.conf.qpid_heartbeat - self.connection.protocol = self.conf.qpid_protocol + self.connection.transport = self.conf.qpid_protocol self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay - # Open is part of reconnect - - # NOTE(WGH) not sure we need this with the reconnect flags - self.reconnect() - def _register_consumer(self, consumer): self.consumers[str(consumer.get_receiver())] = consumer @@ -346,37 +434,51 @@ def _lookup_consumer(self, receiver): def reconnect(self): """Handles reconnecting and re-establishing sessions and queues""" - if self.connection.opened(): - try: - self.connection.close() - except qpid.messaging.exceptions.ConnectionError: - pass - + attempt = 0 + delay = 1 while True: + # Close the session if necessary + if self.connection.opened(): + try: + self.connection.close() + except qpid_exceptions.ConnectionError: + pass + + broker = self.brokers[attempt % len(self.brokers)] + attempt += 1 + try: + self.connection_create(broker) self.connection.open() - except qpid.messaging.exceptions.ConnectionError, e: - LOG.error(_('Unable to connect to AMQP server: %s'), e) - time.sleep(self.conf.qpid_reconnect_interval or 1) + except qpid_exceptions.ConnectionError, e: + msg_dict = dict(e=e, delay=delay) + msg = _("Unable to connect to AMQP server: %(e)s. " + "Sleeping %(delay)s seconds") % msg_dict + LOG.error(msg) + time.sleep(delay) + delay = min(2 * delay, 60) else: + LOG.info(_('Connected to AMQP server on %s'), broker) break - LOG.info(_('Connected to AMQP server on %s'), self.broker) - self.session = self.connection.session() - for consumer in self.consumers.itervalues(): - consumer.reconnect(self.session) - if self.consumers: + consumers = self.consumers + self.consumers = {} + + for consumer in consumers.itervalues(): + consumer.reconnect(self.session) + self._register_consumer(consumer) + LOG.debug(_("Re-established AMQP queues")) def ensure(self, error_callback, method, *args, **kwargs): while True: try: return method(*args, **kwargs) - except (qpid.messaging.exceptions.Empty, - qpid.messaging.exceptions.ConnectionError), e: + except (qpid_exceptions.Empty, + qpid_exceptions.ConnectionError), e: if error_callback: error_callback(e) self.reconnect() @@ -384,12 +486,14 @@ def ensure(self, error_callback, method, *args, **kwargs): def close(self): """Close/release this connection""" self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() self.connection.close() self.connection = None def reset(self): """Reset a connection so it can be used again""" self.cancel_consumer_thread() + self.wait_on_proxy_callbacks() self.session.close() self.session = self.connection.session() self.consumers = {} @@ -414,9 +518,9 @@ def iterconsume(self, limit=None, timeout=None): """Return an iterator that will consume from all queues/consumers""" def _error_callback(exc): - if isinstance(exc, qpid.messaging.exceptions.Empty): - LOG.exception(_('Timed out waiting for RPC response: %s') % - str(exc)) + if isinstance(exc, qpid_exceptions.Empty): + LOG.debug(_('Timed out waiting for RPC response: %s') % + str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % @@ -444,6 +548,11 @@ def cancel_consumer_thread(self): pass self.consumer_thread = None + def wait_on_proxy_callbacks(self): + """Wait for all proxy callback threads to exit.""" + for proxy_cb in self.proxy_callbacks: + proxy_cb.wait() + def publisher_send(self, cls, topic, msg): """Send to a publisher based on the publisher class""" @@ -482,9 +591,20 @@ def direct_send(self, msg_id, msg): """Send a 'direct' message""" self.publisher_send(DirectPublisher, msg_id, msg) - def topic_send(self, topic, msg): + def topic_send(self, topic, msg, timeout=None): """Send a 'topic' message""" - self.publisher_send(TopicPublisher, topic, msg) + # + # We want to create a message with attributes, e.g. a TTL. We + # don't really need to keep 'msg' in its JSON format any longer + # so let's create an actual qpid message here and get some + # value-add on the go. + # + # WARNING: Request timeout happens to be in the same units as + # qpid's TTL (seconds). If this changes in the future, then this + # will need to be altered accordingly. + # + qpid_message = qpid_messaging.Message(content=msg, ttl=timeout) + self.publisher_send(TopicPublisher, topic, qpid_message) def fanout_send(self, topic, msg): """Send a 'fanout' message""" @@ -519,6 +639,7 @@ def create_consumer(self, topic, proxy, fanout=False): proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) if fanout: consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb) @@ -534,6 +655,7 @@ def create_worker(self, topic, proxy, pool_name): proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) + self.proxy_callbacks.append(proxy_cb) consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb, name=pool_name) @@ -542,6 +664,34 @@ def create_worker(self, topic, proxy, pool_name): return consumer + def join_consumer_pool(self, callback, pool_name, topic, + exchange_name=None): + """Register as a member of a group of consumers for a given topic from + the specified exchange. + + Exactly one member of a given pool will receive each message. + + A message will be delivered to multiple pools, if more than + one is created. + """ + callback_wrapper = rpc_amqp.CallbackWrapper( + conf=self.conf, + callback=callback, + connection_pool=rpc_amqp.get_connection_pool(self.conf, + Connection), + ) + self.proxy_callbacks.append(callback_wrapper) + + consumer = TopicConsumer(conf=self.conf, + session=self.session, + topic=topic, + callback=callback_wrapper, + name=pool_name, + exchange_name=exchange_name) + + self._register_consumer(consumer) + return consumer + def create_connection(conf, new=True): """Create a connection""" @@ -592,10 +742,11 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg): rpc_amqp.get_connection_pool(conf, Connection)) -def notify(conf, context, topic, msg): +def notify(conf, context, topic, msg, envelope): """Sends a notification event on a topic.""" return rpc_amqp.notify(conf, context, topic, msg, - rpc_amqp.get_connection_pool(conf, Connection)) + rpc_amqp.get_connection_pool(conf, Connection), + envelope) def cleanup(): diff --git a/cinder/openstack/common/rpc/impl_zmq.py b/cinder/openstack/common/rpc/impl_zmq.py index 570431778a..1d4aab768c 100644 --- a/cinder/openstack/common/rpc/impl_zmq.py +++ b/cinder/openstack/common/rpc/impl_zmq.py @@ -14,23 +14,25 @@ # License for the specific language governing permissions and limitations # under the License. +import os import pprint +import re import socket -import string import sys import types import uuid import eventlet -from eventlet.green import zmq import greenlet +from oslo.config import cfg -from cinder.openstack.common import cfg +from cinder.openstack.common import excutils from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import importutils from cinder.openstack.common import jsonutils from cinder.openstack.common.rpc import common as rpc_common +zmq = importutils.try_import('eventlet.green.zmq') # for convenience, are not modified. pformat = pprint.pformat @@ -58,12 +60,13 @@ cfg.IntOpt('rpc_zmq_port', default=9501, help='ZeroMQ receiver listening port'), - cfg.IntOpt('rpc_zmq_port_pub', default=9502, - help='ZeroMQ fanout publisher port'), - cfg.IntOpt('rpc_zmq_contexts', default=1, help='Number of ZeroMQ contexts, defaults to 1'), + cfg.IntOpt('rpc_zmq_topic_backlog', default=None, + help='Maximum number of ingress messages to locally buffer ' + 'per topic. Default is unlimited.'), + cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack', help='Directory for holding IPC sockets'), @@ -73,44 +76,41 @@ ] -# These globals are defined in register_opts(conf), -# a mandatory initialization call -CONF = None +CONF = cfg.CONF +CONF.register_opts(zmq_opts) + ZMQ_CTX = None # ZeroMQ Context, must be global. matchmaker = None # memoized matchmaker object def _serialize(data): - """ - Serialization wrapper + """Serialization wrapper. + We prefer using JSON, but it cannot encode all types. Error if a developer passes us bad data. """ try: - return str(jsonutils.dumps(data, ensure_ascii=True)) + return jsonutils.dumps(data, ensure_ascii=True) except TypeError: - LOG.error(_("JSON serialization failed.")) - raise + with excutils.save_and_reraise_exception(): + LOG.error(_("JSON serialization failed.")) def _deserialize(data): - """ - Deserialization wrapper - """ + """Deserialization wrapper.""" LOG.debug(_("Deserializing: %s"), data) return jsonutils.loads(data) class ZmqSocket(object): - """ - A tiny wrapper around ZeroMQ to simplify the send/recv protocol - and connection management. + """A tiny wrapper around ZeroMQ. + Simplifies the send/recv protocol and connection management. Can be used as a Context (supports the 'with' statement). """ def __init__(self, addr, zmq_type, bind=True, subscribe=None): - self.sock = ZMQ_CTX.socket(zmq_type) + self.sock = _get_ctxt().socket(zmq_type) self.addr = addr self.type = zmq_type self.subscriptions = [] @@ -176,7 +176,7 @@ def close(self): return # We must unsubscribe, or we'll leak descriptors. - if len(self.subscriptions) > 0: + if self.subscriptions: for f in self.subscriptions: try: self.sock.setsockopt(zmq.UNSUBSCRIBE, f) @@ -184,33 +184,46 @@ def close(self): pass self.subscriptions = [] - # Linger -1 prevents lost/dropped messages try: - self.sock.close(linger=-1) + # Default is to linger + self.sock.close() except Exception: - pass + # While this is a bad thing to happen, + # it would be much worse if some of the code calling this + # were to fail. For now, lets log, and later evaluate + # if we can safely raise here. + LOG.error("ZeroMQ socket could not be closed.") self.sock = None - def recv(self): + def recv(self, **kwargs): if not self.can_recv: raise RPCException(_("You cannot recv on this socket.")) - return self.sock.recv_multipart() + return self.sock.recv_multipart(**kwargs) - def send(self, data): + def send(self, data, **kwargs): if not self.can_send: raise RPCException(_("You cannot send on this socket.")) - self.sock.send_multipart(data) + self.sock.send_multipart(data, **kwargs) class ZmqClient(object): """Client for ZMQ sockets.""" - def __init__(self, addr, socket_type=zmq.PUSH, bind=False): - self.outq = ZmqSocket(addr, socket_type, bind=bind) + def __init__(self, addr): + self.outq = ZmqSocket(addr, zmq.PUSH, bind=False) + + def cast(self, msg_id, topic, data, envelope): + msg_id = msg_id or 0 + + if not envelope: + self.outq.send(map(bytes, + (msg_id, topic, 'cast', _serialize(data)))) + return - def cast(self, msg_id, topic, data): - self.outq.send([str(topic), str(msg_id), str('cast'), - _serialize(data)]) + rpc_envelope = rpc_common.serialize_msg(data[1], envelope) + zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items()) + self.outq.send(map(bytes, + (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg)) def close(self): self.outq.close() @@ -253,37 +266,50 @@ def _get_response(self, ctx, proxy, topic, data): """Process a curried message and cast the result to topic.""" LOG.debug(_("Running func with context: %s"), ctx.to_dict()) data.setdefault('version', None) - data.setdefault('args', []) + data.setdefault('args', {}) try: result = proxy.dispatch( - ctx, data['version'], data['method'], **data['args']) + ctx, data['version'], data['method'], + data.get('namespace'), **data['args']) return ConsumerBase.normalize_reply(result, ctx.replies) except greenlet.GreenletExit: # ignore these since they are just from shutdowns pass + except rpc_common.ClientException as e: + LOG.debug(_("Expected exception during message handling (%s)") % + e._exc_info[1]) + return {'exc': + rpc_common.serialize_remote_exception(e._exc_info, + log_failure=False)} except Exception: + LOG.error(_("Exception during message handling")) return {'exc': rpc_common.serialize_remote_exception(sys.exc_info())} def reply(self, ctx, proxy, msg_id=None, context=None, topic=None, msg=None): """Reply to a casted call.""" - # Our real method is curried into msg['args'] + # NOTE(ewindisch): context kwarg exists for Grizzly compat. + # this may be able to be removed earlier than + # 'I' if ConsumerBase.process were refactored. + if type(msg) is list: + payload = msg[-1] + else: + payload = msg - child_ctx = RpcContext.unmarshal(msg[0]) response = ConsumerBase.normalize_reply( - self._get_response(child_ctx, proxy, topic, msg[1]), + self._get_response(ctx, proxy, topic, payload), ctx.replies) LOG.debug(_("Sending reply")) - cast(CONF, ctx, topic, { + _multi_send(_cast, ctx, topic, { 'method': '-process_reply', 'args': { - 'msg_id': msg_id, + 'msg_id': msg_id, # Include for Folsom compat. 'response': response } - }) + }, _msg_id=msg_id) class ConsumerBase(object): @@ -302,40 +328,36 @@ def normalize_reply(self, result, replies): else: return [result] - def consume(self, sock): - raise NotImplementedError() + def process(self, proxy, ctx, data): + data.setdefault('version', None) + data.setdefault('args', {}) - def process(self, style, target, proxy, ctx, data): # Method starting with - are # processed internally. (non-valid method name) - method = data['method'] + method = data.get('method') + if not method: + LOG.error(_("RPC message did not include method.")) + return # Internal method # uses internal context for safety. - if data['method'][0] == '-': - # For reply / process_reply - method = method[1:] - if method == 'reply': - self.private_ctx.reply(ctx, proxy, **data['args']) + if method == '-reply': + self.private_ctx.reply(ctx, proxy, **data['args']) return - data.setdefault('version', None) - data.setdefault('args', []) proxy.dispatch(ctx, data['version'], - data['method'], **data['args']) + data['method'], data.get('namespace'), **data['args']) class ZmqBaseReactor(ConsumerBase): - """ - A consumer class implementing a - centralized casting broker (PULL-PUSH) - for RoundRobin requests. + """A consumer class implementing a centralized casting broker (PULL-PUSH). + + Used for RoundRobin requests. """ def __init__(self, conf): super(ZmqBaseReactor, self).__init__() - self.mapping = {} self.proxies = {} self.threads = [] self.sockets = [] @@ -343,9 +365,8 @@ def __init__(self, conf): self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) - def register(self, proxy, in_addr, zmq_type_in, out_addr=None, - zmq_type_out=None, in_bind=True, out_bind=True, - subscribe=None): + def register(self, proxy, in_addr, zmq_type_in, + in_bind=True, subscribe=None): LOG.info(_("Registering reactor")) @@ -361,21 +382,6 @@ def register(self, proxy, in_addr, zmq_type_in, out_addr=None, LOG.info(_("In reactor registered")) - if not out_addr: - return - - if zmq_type_out not in (zmq.PUSH, zmq.PUB): - raise RPCException("Bad output socktype") - - # Items push out. - outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) - - self.mapping[inq] = outq - self.mapping[outq] = inq - self.sockets.append(outq) - - LOG.info(_("Out reactor registered")) - def consume_in_thread(self): def _consume(sock): LOG.info(_("Consuming socket")) @@ -400,87 +406,132 @@ def close(self): class ZmqProxy(ZmqBaseReactor): - """ - A consumer class implementing a - topic-based proxy, forwarding to - IPC sockets. + """A consumer class implementing a topic-based proxy. + + Forwards to IPC sockets. """ def __init__(self, conf): super(ZmqProxy, self).__init__(conf) + pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\')) + self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep))) self.topic_proxy = {} - ipc_dir = CONF.rpc_zmq_ipc_dir - - self.topic_proxy['zmq_replies'] = \ - ZmqSocket("ipc://%s/zmq_topic_zmq_replies" % (ipc_dir, ), - zmq.PUB, bind=True) - self.sockets.append(self.topic_proxy['zmq_replies']) - - self.topic_proxy['fanout~'] = \ - ZmqSocket("tcp://%s:%s" % (CONF.rpc_zmq_bind_address, - CONF.rpc_zmq_port_pub), zmq.PUB, bind=True) - self.sockets.append(self.topic_proxy['fanout~']) def consume(self, sock): ipc_dir = CONF.rpc_zmq_ipc_dir - #TODO(ewindisch): use zero-copy (i.e. references, not copying) - data = sock.recv() - topic, msg_id, style, in_msg = data - topic = topic.split('.', 1)[0] - - LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data))) + data = sock.recv(copy=False) + topic = data[1].bytes - # Handle zmq_replies magic if topic.startswith('fanout~'): sock_type = zmq.PUB - - # This doesn't change what is in the message, - # it only specifies that these messages go to - # the generic fanout topic. - topic = 'fanout~' + topic = topic.split('.', 1)[0] elif topic.startswith('zmq_replies'): sock_type = zmq.PUB - inside = _deserialize(in_msg) - msg_id = inside[-1]['args']['msg_id'] - response = inside[-1]['args']['response'] - LOG.debug(_("->response->%s"), response) - data = [str(msg_id), _serialize(response)] else: sock_type = zmq.PUSH - if not topic in self.topic_proxy: - outq = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), - sock_type, bind=True) - self.topic_proxy[topic] = outq - self.sockets.append(outq) - LOG.info(_("Created topic proxy: %s"), topic) + if topic not in self.topic_proxy: + def publisher(waiter): + LOG.info(_("Creating proxy for topic: %s"), topic) - LOG.debug(_("ROUTER RELAY-OUT START %(data)s") % {'data': data}) - self.topic_proxy[topic].send(data) - LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data}) + try: + # The topic is received over the network, + # don't trust this input. + if self.badchars.search(topic) is not None: + emsg = _("Topic contained dangerous characters.") + LOG.warn(emsg) + raise RPCException(emsg) + + out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % + (ipc_dir, topic), + sock_type, bind=True) + except RPCException: + waiter.send_exception(*sys.exc_info()) + return + + self.topic_proxy[topic] = eventlet.queue.LightQueue( + CONF.rpc_zmq_topic_backlog) + self.sockets.append(out_sock) + + # It takes some time for a pub socket to open, + # before we can have any faith in doing a send() to it. + if sock_type == zmq.PUB: + eventlet.sleep(.5) + + waiter.send(True) + + while(True): + data = self.topic_proxy[topic].get() + out_sock.send(data, copy=False) + + wait_sock_creation = eventlet.event.Event() + eventlet.spawn(publisher, wait_sock_creation) + + try: + wait_sock_creation.wait() + except RPCException: + LOG.error(_("Topic socket file creation failed.")) + return + try: + self.topic_proxy[topic].put_nowait(data) + except eventlet.queue.Full: + LOG.error(_("Local per-topic backlog buffer full for topic " + "%(topic)s. Dropping message.") % {'topic': topic}) -class CallbackReactor(ZmqBaseReactor): - """ - A consumer class passing messages to a callback - """ + def consume_in_thread(self): + """Runs the ZmqProxy service.""" + ipc_dir = CONF.rpc_zmq_ipc_dir + consume_in = "tcp://%s:%s" % \ + (CONF.rpc_zmq_bind_address, + CONF.rpc_zmq_port) + consumption_proxy = InternalContext(None) - def __init__(self, conf, callback): - self._cb = callback - super(CallbackReactor, self).__init__(conf) + try: + os.makedirs(ipc_dir) + except os.error: + if not os.path.isdir(ipc_dir): + with excutils.save_and_reraise_exception(): + LOG.error(_("Required IPC directory does not exist at" + " %s") % (ipc_dir, )) + try: + self.register(consumption_proxy, + consume_in, + zmq.PULL) + except zmq.ZMQError: + if os.access(ipc_dir, os.X_OK): + with excutils.save_and_reraise_exception(): + LOG.error(_("Permission denied to IPC directory at" + " %s") % (ipc_dir, )) + with excutils.save_and_reraise_exception(): + LOG.error(_("Could not create ZeroMQ receiver daemon. " + "Socket may already be in use.")) - def consume(self, sock): - data = sock.recv() - self._cb(data[3]) + super(ZmqProxy, self).consume_in_thread() -class ZmqReactor(ZmqBaseReactor): +def unflatten_envelope(packenv): + """Unflattens the RPC envelope. + + Takes a list and returns a dictionary. + i.e. [1,2,3,4] => {1: 2, 3: 4} """ - A consumer class implementing a - consumer for messages. Can also be - used as a 1:1 proxy + i = iter(packenv) + h = {} + try: + while True: + k = i.next() + h[k] = i.next() + except StopIteration: + return h + + +class ZmqReactor(ZmqBaseReactor): + """A consumer class implementing a consumer for messages. + + Can also be used as a 1:1 proxy """ def __init__(self, conf): @@ -490,99 +541,84 @@ def consume(self, sock): #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) - if sock in self.mapping: - LOG.debug(_("ROUTER RELAY-OUT %(data)s") % { - 'data': data}) - self.mapping[sock].send(data) - return - topic, msg_id, style, in_msg = data + proxy = self.proxies[sock] + + if data[2] == 'cast': # Legacy protocol + packenv = data[3] - ctx, request = _deserialize(in_msg) - ctx = RpcContext.unmarshal(ctx) + ctx, msg = _deserialize(packenv) + request = rpc_common.deserialize_msg(msg) + ctx = RpcContext.unmarshal(ctx) + elif data[2] == 'impl_zmq_v2': + packenv = data[4:] - proxy = self.proxies[sock] + msg = unflatten_envelope(packenv) + request = rpc_common.deserialize_msg(msg) + + # Unmarshal only after verifying the message. + ctx = RpcContext.unmarshal(data[3]) + else: + LOG.error(_("ZMQ Envelope version unsupported or unknown.")) + return - self.pool.spawn_n(self.process, style, topic, - proxy, ctx, request) + self.pool.spawn_n(self.process, proxy, ctx, request) class Connection(rpc_common.Connection): """Manages connections and threads.""" def __init__(self, conf): + self.topics = [] self.reactor = ZmqReactor(conf) - def _consume_fanout(self, reactor, topic, proxy, bind=False): - for topic, host in matchmaker.queues("publishers~%s" % (topic, )): - inaddr = "tcp://%s:%s" % (host, CONF.rpc_zmq_port) - reactor.register(proxy, inaddr, zmq.SUB, in_bind=bind) - - def declare_topic_consumer(self, topic, callback=None, - queue_name=None): - """declare_topic_consumer is a private method, but - it is being used by Quantum (Folsom). - This has been added compatibility. - """ - # Only consume on the base topic name. - topic = topic.split('.', 1)[0] - - if CONF.rpc_zmq_host in matchmaker.queues("fanout~%s" % (topic, )): - return - - reactor = CallbackReactor(CONF, callback) - self._consume_fanout(reactor, topic, None, bind=False) - def create_consumer(self, topic, proxy, fanout=False): - # Only consume on the base topic name. - topic = topic.split('.', 1)[0] - - LOG.info(_("Create Consumer for topic (%(topic)s)") % - {'topic': topic}) + # Register with matchmaker. + _get_matchmaker().register(topic, CONF.rpc_zmq_host) - # Consume direct-push fanout messages (relay to local consumers) + # Subscription scenarios if fanout: - # If we're not in here, we can't receive direct fanout messages - if CONF.rpc_zmq_host in matchmaker.queues(topic): - # Consume from all remote publishers. - self._consume_fanout(self.reactor, topic, proxy) - else: - LOG.warn("This service cannot receive direct PUSH fanout " - "messages without being known by the matchmaker.") - return - - # Configure consumer for direct pushes. - subscribe = (topic, fanout)[type(fanout) == str] sock_type = zmq.SUB - topic = 'fanout~' + topic - - inaddr = "tcp://127.0.0.1:%s" % (CONF.rpc_zmq_port_pub, ) + subscribe = ('', fanout)[type(fanout) == str] + topic = 'fanout~' + topic.split('.', 1)[0] else: sock_type = zmq.PULL subscribe = None + topic = '.'.join((topic, CONF.rpc_zmq_host)) + + if topic in self.topics: + LOG.info(_("Skipping topic registration. Already registered.")) + return - # Receive messages from (local) proxy - inaddr = "ipc://%s/zmq_topic_%s" % \ - (CONF.rpc_zmq_ipc_dir, topic) + # Receive messages from (local) proxy + inaddr = "ipc://%s/zmq_topic_%s" % \ + (CONF.rpc_zmq_ipc_dir, topic) LOG.debug(_("Consumer is a zmq.%s"), ['PULL', 'SUB'][sock_type == zmq.SUB]) - # Consume messages from local rpc-zmq-receiver daemon. self.reactor.register(proxy, inaddr, sock_type, subscribe=subscribe, in_bind=False) + self.topics.append(topic) def close(self): + _get_matchmaker().stop_heartbeat() + for topic in self.topics: + _get_matchmaker().unregister(topic, CONF.rpc_zmq_host) + self.reactor.close() + self.topics = [] def wait(self): self.reactor.wait() def consume_in_thread(self): + _get_matchmaker().start_heartbeat() self.reactor.consume_in_thread() -def _cast(addr, context, msg_id, topic, msg, timeout=None): +def _cast(addr, context, topic, msg, timeout=None, envelope=False, + _msg_id=None): timeout_cast = timeout or CONF.rpc_cast_timeout payload = [RpcContext.marshal(context), msg] @@ -591,7 +627,7 @@ def _cast(addr, context, msg_id, topic, msg, timeout=None): conn = ZmqClient(addr) # assumes cast can't return an exception - conn.cast(msg_id, topic, payload) + conn.cast(_msg_id, topic, payload, envelope) except zmq.ZMQError: raise RPCException("Cast failed. ZMQ Socket Exception") finally: @@ -599,12 +635,13 @@ def _cast(addr, context, msg_id, topic, msg, timeout=None): conn.close() -def _call(addr, context, msg_id, topic, msg, timeout=None): +def _call(addr, context, topic, msg, timeout=None, + envelope=False): # timeout_response is how long we wait for a response timeout = timeout or CONF.rpc_response_timeout # The msg_id is used to track replies. - msg_id = str(uuid.uuid4().hex) + msg_id = uuid.uuid4().hex # Replies always come into the reply service. reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host @@ -616,8 +653,8 @@ def _call(addr, context, msg_id, topic, msg, timeout=None): 'method': '-reply', 'args': { 'msg_id': msg_id, - 'context': mcontext, 'topic': reply_topic, + # TODO(ewindisch): safe to remove mcontext in I. 'msg': [mcontext, msg] } } @@ -629,22 +666,36 @@ def _call(addr, context, msg_id, topic, msg, timeout=None): with Timeout(timeout, exception=rpc_common.Timeout): try: msg_waiter = ZmqSocket( - "ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir, + "ipc://%s/zmq_topic_zmq_replies.%s" % + (CONF.rpc_zmq_ipc_dir, + CONF.rpc_zmq_host), zmq.SUB, subscribe=msg_id, bind=False ) LOG.debug(_("Sending cast")) - _cast(addr, context, msg_id, topic, payload) + _cast(addr, context, topic, payload, envelope) LOG.debug(_("Cast sent; Waiting reply")) # Blocks until receives reply msg = msg_waiter.recv() LOG.debug(_("Received message: %s"), msg) LOG.debug(_("Unpacking response")) - responses = _deserialize(msg[-1]) + + if msg[2] == 'cast': # Legacy version + raw_msg = _deserialize(msg[-1])[-1] + elif msg[2] == 'impl_zmq_v2': + rpc_envelope = unflatten_envelope(msg[4:]) + raw_msg = rpc_common.deserialize_msg(rpc_envelope) + else: + raise rpc_common.UnsupportedRpcEnvelopeVersion( + _("Unsupported or unknown ZMQ envelope returned.")) + + responses = raw_msg['args']['response'] # ZMQError trumps the Timeout error. except zmq.ZMQError: raise RPCException("ZMQ Socket Error") + except (IndexError, KeyError): + raise RPCException(_("RPC Message Invalid.")) finally: if 'msg_waiter' in vars(): msg_waiter.close() @@ -660,24 +711,24 @@ def _call(addr, context, msg_id, topic, msg, timeout=None): return responses[-1] -def _multi_send(method, context, topic, msg, timeout=None): - """ - Wraps the sending of messages, - dispatches to the matchmaker and sends - message to all relevant hosts. +def _multi_send(method, context, topic, msg, timeout=None, + envelope=False, _msg_id=None): + """Wraps the sending of messages. + + Dispatches to the matchmaker and sends message to all relevant hosts. """ conf = CONF LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) - queues = matchmaker.queues(topic) + queues = _get_matchmaker().queues(topic) LOG.debug(_("Sending message(s) to: %s"), queues) # Don't stack if we have no matchmaker results - if len(queues) == 0: + if not queues: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. - raise rpc_common.Timeout, "No match from matchmaker." + raise rpc_common.Timeout(_("No match from matchmaker.")) # This supports brokerless fanout (addresses > 1) for queue in queues: @@ -686,9 +737,11 @@ def _multi_send(method, context, topic, msg, timeout=None): if method.__name__ == '_cast': eventlet.spawn_n(method, _addr, context, - _topic, _topic, msg, timeout) + _topic, msg, timeout, envelope, + _msg_id) return - return method(_addr, context, _topic, _topic, msg, timeout) + return method(_addr, context, _topic, msg, timeout, + envelope) def create_connection(conf, new=True): @@ -715,62 +768,52 @@ def fanout_cast(conf, context, topic, msg, **kwargs): """Send a message to all listening and expect no reply.""" # NOTE(ewindisch): fanout~ is used because it avoid splitting on . # and acts as a non-subtle hint to the matchmaker and ZmqProxy. - _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs) + LOG.error(_('topic is %s.') % topic) + if topic: + _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs) -def notify(conf, context, topic, msg, **kwargs): - """ - Send notification event. +def notify(conf, context, topic, msg, envelope): + """Send notification event. + Notifications are sent to topic-priority. This differs from the AMQP drivers which send to topic.priority. """ # NOTE(ewindisch): dot-priority in rpc notifier does not # work with our assumptions. - topic.replace('.', '-') - cast(conf, context, topic, msg, **kwargs) + topic = topic.replace('.', '-') + cast(conf, context, topic, msg, envelope=envelope) def cleanup(): """Clean up resources in use by implementation.""" global ZMQ_CTX + if ZMQ_CTX: + ZMQ_CTX.term() + ZMQ_CTX = None + global matchmaker matchmaker = None - ZMQ_CTX.term() - ZMQ_CTX = None -def register_opts(conf): - """Registration of options for this driver.""" - #NOTE(ewindisch): ZMQ_CTX and matchmaker - # are initialized here as this is as good - # an initialization method as any. +def _get_ctxt(): + if not zmq: + raise ImportError("Failed to import eventlet.green.zmq") - # We memoize through these globals global ZMQ_CTX - global matchmaker - global CONF - - if not CONF: - conf.register_opts(zmq_opts) - CONF = conf - # Don't re-set, if this method is called twice. if not ZMQ_CTX: - ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts) - if not matchmaker: - # rpc_zmq_matchmaker should be set to a 'module.Class' - mm_path = conf.rpc_zmq_matchmaker.split('.') - mm_module = '.'.join(mm_path[:-1]) - mm_class = mm_path[-1] - - # Only initialize a class. - if mm_path[-1][0] not in string.ascii_uppercase: - LOG.error(_("Matchmaker could not be loaded.\n" - "rpc_zmq_matchmaker is not a class.")) - raise RPCException(_("Error loading Matchmaker.")) - - mm_impl = importutils.import_module(mm_module) - mm_constructor = getattr(mm_impl, mm_class) - matchmaker = mm_constructor() + ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts) + return ZMQ_CTX -register_opts(cfg.CONF) +def _get_matchmaker(*args, **kwargs): + global matchmaker + if not matchmaker: + mm = CONF.rpc_zmq_matchmaker + if mm.endswith('matchmaker.MatchMakerRing'): + mm.replace('matchmaker', 'matchmaker_ring') + LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use' + ' %(new)s instead') % dict( + orig=CONF.rpc_zmq_matchmaker, new=mm)) + matchmaker = importutils.import_object(mm, *args, **kwargs) + return matchmaker diff --git a/cinder/openstack/common/rpc/matchmaker.py b/cinder/openstack/common/rpc/matchmaker.py index ffe4870aa4..f12c14dfd1 100644 --- a/cinder/openstack/common/rpc/matchmaker.py +++ b/cinder/openstack/common/rpc/matchmaker.py @@ -21,10 +21,12 @@ import contextlib import itertools import json -import logging -from cinder.openstack.common import cfg +import eventlet +from oslo.config import cfg + from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging matchmaker_opts = [ @@ -32,6 +34,12 @@ cfg.StrOpt('matchmaker_ringfile', default='/etc/nova/matchmaker_ring.json', help='Matchmaker ring file (JSON)'), + cfg.IntOpt('matchmaker_heartbeat_freq', + default=300, + help='Heartbeat frequency'), + cfg.IntOpt('matchmaker_heartbeat_ttl', + default=600, + help='Heartbeat time-to-live.'), ] CONF = cfg.CONF @@ -69,12 +77,73 @@ def test(self, key): class MatchMakerBase(object): - """Match Maker Base Class.""" - + """ + Match Maker Base Class. + Build off HeartbeatMatchMakerBase if building a + heartbeat-capable MatchMaker. + """ def __init__(self): # Array of tuples. Index [2] toggles negation, [3] is last-if-true self.bindings = [] + self.no_heartbeat_msg = _('Matchmaker does not implement ' + 'registration or heartbeat.') + + def register(self, key, host): + """ + Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. + """ + pass + + def ack_alive(self, key, host): + """ + Acknowledge that a key.host is alive. + Used internally for updating heartbeats, + but may also be used publically to acknowledge + a system is alive (i.e. rpc message successfully + sent to host) + """ + pass + + def is_alive(self, topic, host): + """ + Checks if a host is alive. + """ + pass + + def expire(self, topic, host): + """ + Explicitly expire a host's registration. + """ + pass + + def send_heartbeats(self): + """ + Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, + which loops this method. + """ + pass + + def unregister(self, key, host): + """ + Unregister a topic. + """ + pass + + def start_heartbeat(self): + """ + Spawn heartbeat greenthread. + """ + pass + + def stop_heartbeat(self): + """ + Destroys the heartbeat greenthread. + """ + pass + def add_binding(self, binding, rule, last=True): self.bindings.append((binding, rule, False, last)) @@ -98,6 +167,103 @@ def queues(self, key): return workers +class HeartbeatMatchMakerBase(MatchMakerBase): + """ + Base for a heart-beat capable MatchMaker. + Provides common methods for registering, + unregistering, and maintaining heartbeats. + """ + def __init__(self): + self.hosts = set() + self._heart = None + self.host_topic = {} + + super(HeartbeatMatchMakerBase, self).__init__() + + def send_heartbeats(self): + """ + Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, + which loops this method. + """ + for key, host in self.host_topic: + self.ack_alive(key, host) + + def ack_alive(self, key, host): + """ + Acknowledge that a host.topic is alive. + Used internally for updating heartbeats, + but may also be used publically to acknowledge + a system is alive (i.e. rpc message successfully + sent to host) + """ + raise NotImplementedError("Must implement ack_alive") + + def backend_register(self, key, host): + """ + Implements registration logic. + Called by register(self,key,host) + """ + raise NotImplementedError("Must implement backend_register") + + def backend_unregister(self, key, key_host): + """ + Implements de-registration logic. + Called by unregister(self,key,host) + """ + raise NotImplementedError("Must implement backend_unregister") + + def register(self, key, host): + """ + Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. + """ + self.hosts.add(host) + self.host_topic[(key, host)] = host + key_host = '.'.join((key, host)) + + self.backend_register(key, key_host) + + self.ack_alive(key, host) + + def unregister(self, key, host): + """ + Unregister a topic. + """ + if (key, host) in self.host_topic: + del self.host_topic[(key, host)] + + self.hosts.discard(host) + self.backend_unregister(key, '.'.join((key, host))) + + LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host))) + + def start_heartbeat(self): + """ + Implementation of MatchMakerBase.start_heartbeat + Launches greenthread looping send_heartbeats(), + yielding for CONF.matchmaker_heartbeat_freq seconds + between iterations. + """ + if len(self.hosts) == 0: + raise MatchMakerException( + _("Register before starting heartbeat.")) + + def do_heartbeat(): + while True: + self.send_heartbeats() + eventlet.sleep(CONF.matchmaker_heartbeat_freq) + + self._heart = eventlet.spawn(do_heartbeat) + + def stop_heartbeat(self): + """ + Destroys the heartbeat greenthread. + """ + if self._heart: + self._heart.kill() + + class DirectBinding(Binding): """ Specifies a host in the key via a '.' character @@ -132,14 +298,6 @@ def test(self, key): return False -class PublisherBinding(Binding): - """Match on publishers keys, where key starts with 'publishers.' string.""" - def test(self, key): - if key.startswith('publishers~'): - return True - return False - - class StubExchange(Exchange): """Exchange that does nothing.""" def run(self, key): @@ -190,23 +348,6 @@ def run(self, key): return [(key + '.' + host, host)] -class PublisherRingExchange(RingExchange): - """Fanout Exchange based on a hashmap.""" - def __init__(self, ring=None): - super(PublisherRingExchange, self).__init__(ring) - - def run(self, key): - # Assume starts with "publishers~", strip it for lookup. - nkey = key.split('publishers~')[1:][0] - if not self._ring_has(nkey): - LOG.warn( - _("No key defining hosts for topic '%s', " - "see ringfile") % (nkey, ) - ) - return [] - return map(lambda x: (key + '.' + x, x), self.ring[nkey]) - - class FanoutRingExchange(RingExchange): """Fanout Exchange based on a hashmap.""" def __init__(self, ring=None): @@ -221,30 +362,30 @@ def run(self, key): "see ringfile") % (nkey, ) ) return [] - return map(lambda x: (key + '.' + x, x), self.ring[nkey] + - ['localhost']) + return map(lambda x: (key + '.' + x, x), self.ring[nkey]) class LocalhostExchange(Exchange): """Exchange where all direct topics are local.""" - def __init__(self): + def __init__(self, host='localhost'): + self.host = host super(Exchange, self).__init__() def run(self, key): - return [(key.split('.')[0] + '.localhost', 'localhost')] + return [('.'.join((key.split('.')[0], self.host)), self.host)] class DirectExchange(Exchange): """ Exchange where all topic keys are split, sending to second half. - i.e. "compute.host" sends a message to "compute" running on "host" + i.e. "compute.host" sends a message to "compute.host" running on "host" """ def __init__(self): super(Exchange, self).__init__() def run(self, key): - b, e = key.split('.', 1) - return [(b, e)] + e = key.split('.', 1)[1] + return [(key, e)] class MatchMakerRing(MatchMakerBase): @@ -253,7 +394,6 @@ class MatchMakerRing(MatchMakerBase): """ def __init__(self, ring=None): super(MatchMakerRing, self).__init__() - self.add_binding(PublisherBinding(), PublisherRingExchange(ring)) self.add_binding(FanoutBinding(), FanoutRingExchange(ring)) self.add_binding(DirectBinding(), DirectExchange()) self.add_binding(TopicBinding(), RoundRobinRingExchange(ring)) @@ -264,12 +404,11 @@ class MatchMakerLocalhost(MatchMakerBase): Match Maker where all bare topics resolve to localhost. Useful for testing. """ - def __init__(self): + def __init__(self, host='localhost'): super(MatchMakerLocalhost, self).__init__() - self.add_binding(PublisherBinding(), LocalhostExchange()) - self.add_binding(FanoutBinding(), LocalhostExchange()) + self.add_binding(FanoutBinding(), LocalhostExchange(host)) self.add_binding(DirectBinding(), DirectExchange()) - self.add_binding(TopicBinding(), LocalhostExchange()) + self.add_binding(TopicBinding(), LocalhostExchange(host)) class MatchMakerStub(MatchMakerBase): @@ -281,7 +420,6 @@ class MatchMakerStub(MatchMakerBase): def __init__(self): super(MatchMakerLocalhost, self).__init__() - self.add_binding(PublisherBinding(), StubExchange()) self.add_binding(FanoutBinding(), StubExchange()) self.add_binding(DirectBinding(), StubExchange()) self.add_binding(TopicBinding(), StubExchange()) diff --git a/cinder/openstack/common/rpc/matchmaker_redis.py b/cinder/openstack/common/rpc/matchmaker_redis.py new file mode 100644 index 0000000000..87f9fb2956 --- /dev/null +++ b/cinder/openstack/common/rpc/matchmaker_redis.py @@ -0,0 +1,149 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +The MatchMaker classes should accept a Topic or Fanout exchange key and +return keys for direct exchanges, per (approximate) AMQP parlance. +""" + +from oslo.config import cfg + +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import matchmaker as mm_common + +redis = importutils.try_import('redis') + + +matchmaker_redis_opts = [ + cfg.StrOpt('host', + default='127.0.0.1', + help='Host to locate redis'), + cfg.IntOpt('port', + default=6379, + help='Use this port to connect to redis host.'), + cfg.StrOpt('password', + default=None, + help='Password for Redis server. (optional)'), +] + +CONF = cfg.CONF +opt_group = cfg.OptGroup(name='matchmaker_redis', + title='Options for Redis-based MatchMaker') +CONF.register_group(opt_group) +CONF.register_opts(matchmaker_redis_opts, opt_group) +LOG = logging.getLogger(__name__) + + +class RedisExchange(mm_common.Exchange): + def __init__(self, matchmaker): + self.matchmaker = matchmaker + self.redis = matchmaker.redis + super(RedisExchange, self).__init__() + + +class RedisTopicExchange(RedisExchange): + """ + Exchange where all topic keys are split, sending to second half. + i.e. "compute.host" sends a message to "compute" running on "host" + """ + def run(self, topic): + while True: + member_name = self.redis.srandmember(topic) + + if not member_name: + # If this happens, there are no + # longer any members. + break + + if not self.matchmaker.is_alive(topic, member_name): + continue + + host = member_name.split('.', 1)[1] + return [(member_name, host)] + return [] + + +class RedisFanoutExchange(RedisExchange): + """ + Return a list of all hosts. + """ + def run(self, topic): + topic = topic.split('~', 1)[1] + hosts = self.redis.smembers(topic) + good_hosts = filter( + lambda host: self.matchmaker.is_alive(topic, host), hosts) + + return [(x, x.split('.', 1)[1]) for x in good_hosts] + + +class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase): + """ + MatchMaker registering and looking-up hosts with a Redis server. + """ + def __init__(self): + super(MatchMakerRedis, self).__init__() + + if not redis: + raise ImportError("Failed to import module redis.") + + self.redis = redis.StrictRedis( + host=CONF.matchmaker_redis.host, + port=CONF.matchmaker_redis.port, + password=CONF.matchmaker_redis.password) + + self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self)) + self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange()) + self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self)) + + def ack_alive(self, key, host): + topic = "%s.%s" % (key, host) + if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl): + # If we could not update the expiration, the key + # might have been pruned. Re-register, creating a new + # key in Redis. + self.register(self.topic_host[host], host) + + def is_alive(self, topic, host): + if self.redis.ttl(host) == -1: + self.expire(topic, host) + return False + return True + + def expire(self, topic, host): + with self.redis.pipeline() as pipe: + pipe.multi() + pipe.delete(host) + pipe.srem(topic, host) + pipe.execute() + + def backend_register(self, key, key_host): + with self.redis.pipeline() as pipe: + pipe.multi() + pipe.sadd(key, key_host) + + # No value is needed, we just + # care if it exists. Sets aren't viable + # because only keys can expire. + pipe.set(key_host, '') + + pipe.execute() + + def backend_unregister(self, key, key_host): + with self.redis.pipeline() as pipe: + pipe.multi() + pipe.srem(key, key_host) + pipe.delete(key_host) + pipe.execute() diff --git a/cinder/openstack/common/rpc/matchmaker_ring.py b/cinder/openstack/common/rpc/matchmaker_ring.py new file mode 100644 index 0000000000..9c6e0a9947 --- /dev/null +++ b/cinder/openstack/common/rpc/matchmaker_ring.py @@ -0,0 +1,110 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011-2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +The MatchMaker classes should except a Topic or Fanout exchange key and +return keys for direct exchanges, per (approximate) AMQP parlance. +""" + +import itertools +import json + +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ # noqa +from cinder.openstack.common import log as logging +from cinder.openstack.common.rpc import matchmaker as mm + + +matchmaker_opts = [ + # Matchmaker ring file + cfg.StrOpt('ringfile', + deprecated_name='matchmaker_ringfile', + deprecated_group='DEFAULT', + default='/etc/oslo/matchmaker_ring.json', + help='Matchmaker ring file (JSON)'), +] + +CONF = cfg.CONF +CONF.register_opts(matchmaker_opts, 'matchmaker_ring') +LOG = logging.getLogger(__name__) + + +class RingExchange(mm.Exchange): + """Match Maker where hosts are loaded from a static JSON formatted file. + + __init__ takes optional ring dictionary argument, otherwise + loads the ringfile from CONF.mathcmaker_ringfile. + """ + def __init__(self, ring=None): + super(RingExchange, self).__init__() + + if ring: + self.ring = ring + else: + fh = open(CONF.matchmaker_ring.ringfile, 'r') + self.ring = json.load(fh) + fh.close() + + self.ring0 = {} + for k in self.ring.keys(): + self.ring0[k] = itertools.cycle(self.ring[k]) + + def _ring_has(self, key): + if key in self.ring0: + return True + return False + + +class RoundRobinRingExchange(RingExchange): + """A Topic Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(RoundRobinRingExchange, self).__init__(ring) + + def run(self, key): + if not self._ring_has(key): + LOG.warn( + _("No key defining hosts for topic '%s', " + "see ringfile") % (key, ) + ) + return [] + host = next(self.ring0[key]) + return [(key + '.' + host, host)] + + +class FanoutRingExchange(RingExchange): + """Fanout Exchange based on a hashmap.""" + def __init__(self, ring=None): + super(FanoutRingExchange, self).__init__(ring) + + def run(self, key): + # Assume starts with "fanout~", strip it for lookup. + nkey = key.split('fanout~')[1:][0] + if not self._ring_has(nkey): + LOG.warn( + _("No key defining hosts for topic '%s', " + "see ringfile") % (nkey, ) + ) + return [] + return map(lambda x: (key + '.' + x, x), self.ring[nkey]) + + +class MatchMakerRing(mm.MatchMakerBase): + """Match Maker where hosts are loaded from a static hashmap.""" + def __init__(self, ring=None): + super(MatchMakerRing, self).__init__() + self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring)) + self.add_binding(mm.DirectBinding(), mm.DirectExchange()) + self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring)) diff --git a/cinder/openstack/common/rpc/proxy.py b/cinder/openstack/common/rpc/proxy.py index 23305d41d2..4ddc5c936a 100644 --- a/cinder/openstack/common/rpc/proxy.py +++ b/cinder/openstack/common/rpc/proxy.py @@ -58,9 +58,13 @@ def _get_topic(self, topic): """Return the topic to use for a message.""" return topic if topic else self.topic + @staticmethod + def make_namespaced_msg(method, namespace, **kwargs): + return {'method': method, 'namespace': namespace, 'args': kwargs} + @staticmethod def make_msg(method, **kwargs): - return {'method': method, 'args': kwargs} + return RpcProxy.make_namespaced_msg(method, None, **kwargs) def call(self, context, msg, topic=None, version=None, timeout=None): """rpc.call() a remote method. @@ -68,16 +72,21 @@ def call(self, context, msg, topic=None, version=None, timeout=None): :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. :param timeout: (Optional) A timeout to use when waiting for the response. If no timeout is specified, a default timeout will be used that is usually sufficient. - :param version: (Optional) Override the requested API version in this - message. :returns: The return value from the remote method. """ self._set_version(msg, version) - return rpc.call(context, self._get_topic(topic), msg, timeout) + real_topic = self._get_topic(topic) + try: + return rpc.call(context, real_topic, msg, timeout) + except rpc.common.Timeout as exc: + raise rpc.common.Timeout( + exc.info, real_topic, msg.get('method')) def multicall(self, context, msg, topic=None, version=None, timeout=None): """rpc.multicall() a remote method. @@ -85,17 +94,22 @@ def multicall(self, context, msg, topic=None, version=None, timeout=None): :param context: The request context :param msg: The message to send, including the method and args. :param topic: Override the topic for this message. + :param version: (Optional) Override the requested API version in this + message. :param timeout: (Optional) A timeout to use when waiting for the response. If no timeout is specified, a default timeout will be used that is usually sufficient. - :param version: (Optional) Override the requested API version in this - message. :returns: An iterator that lets you process each of the returned values from the remote method as they arrive. """ self._set_version(msg, version) - return rpc.multicall(context, self._get_topic(topic), msg, timeout) + real_topic = self._get_topic(topic) + try: + return rpc.multicall(context, real_topic, msg, timeout) + except rpc.common.Timeout as exc: + raise rpc.common.Timeout( + exc.info, real_topic, msg.get('method')) def cast(self, context, msg, topic=None, version=None): """rpc.cast() a remote method. diff --git a/cinder/openstack/common/rpc/service.py b/cinder/openstack/common/rpc/service.py index a35fd6ad44..b1f997d38f 100644 --- a/cinder/openstack/common/rpc/service.py +++ b/cinder/openstack/common/rpc/service.py @@ -20,6 +20,7 @@ from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import log as logging from cinder.openstack.common import rpc +from cinder.openstack.common.rpc import dispatcher as rpc_dispatcher from cinder.openstack.common import service @@ -46,15 +47,20 @@ def start(self): LOG.debug(_("Creating Consumer connection for Service %s") % self.topic) - rpc_dispatcher = rpc.dispatcher.RpcDispatcher([self.manager]) + dispatcher = rpc_dispatcher.RpcDispatcher([self.manager]) # Share this same connection for these Consumers - self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False) + self.conn.create_consumer(self.topic, dispatcher, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) - self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False) + self.conn.create_consumer(node_topic, dispatcher, fanout=False) - self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True) + self.conn.create_consumer(self.topic, dispatcher, fanout=True) + + # Hook to allow the manager to do other initializations after + # the rpc connection is created. + if callable(getattr(self.manager, 'initialize_service_hook', None)): + self.manager.initialize_service_hook(self) # Consume from all consumers in a thread self.conn.consume_in_thread() diff --git a/cinder/api/openstack/volume/extensions.py b/cinder/openstack/common/rpc/zmq_receiver.py old mode 100644 new mode 100755 similarity index 55% rename from cinder/api/openstack/volume/extensions.py rename to cinder/openstack/common/rpc/zmq_receiver.py index af1f367a3e..e4c6ee30ed --- a/cinder/api/openstack/volume/extensions.py +++ b/cinder/openstack/common/rpc/zmq_receiver.py @@ -1,7 +1,7 @@ +#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. +# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -15,19 +15,27 @@ # License for the specific language governing permissions and limitations # under the License. -from cinder.api.openstack import extensions as base_extensions -from cinder import flags -from cinder.openstack.common import log as logging +import eventlet +eventlet.monkey_patch() + +import contextlib +import sys +from oslo.config import cfg + +from cinder.openstack.common import log as logging +from cinder.openstack.common import rpc +from cinder.openstack.common.rpc import impl_zmq -LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS +CONF = cfg.CONF +CONF.register_opts(rpc.rpc_opts) +CONF.register_opts(impl_zmq.zmq_opts) -class ExtensionManager(base_extensions.ExtensionManager): - def __init__(self): - LOG.audit(_('Initializing extension manager.')) +def main(): + CONF(sys.argv[1:], project='oslo') + logging.setup("oslo") - self.cls_list = FLAGS.osapi_volume_extension - self.extensions = {} - self._load_extensions() + with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor: + reactor.consume_in_thread() + reactor.wait() diff --git a/cinder/openstack/common/scheduler/__init__.py b/cinder/openstack/common/scheduler/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/openstack/common/scheduler/base_filter.py b/cinder/openstack/common/scheduler/base_filter.py new file mode 100644 index 0000000000..e31e7a270d --- /dev/null +++ b/cinder/openstack/common/scheduler/base_filter.py @@ -0,0 +1,53 @@ +# Copyright (c) 2011-2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Filter support +""" + +from cinder.openstack.common.scheduler import base_handler + + +class BaseFilter(object): + """Base class for all filter classes.""" + def _filter_one(self, obj, filter_properties): + """Return True if it passes the filter, False otherwise. + Override this in a subclass. + """ + return True + + def filter_all(self, filter_obj_list, filter_properties): + """Yield objects that pass the filter. + + Can be overridden in a subclass, if you need to base filtering + decisions on all objects. Otherwise, one can just override + _filter_one() to filter a single object. + """ + for obj in filter_obj_list: + if self._filter_one(obj, filter_properties): + yield obj + + +class BaseFilterHandler(base_handler.BaseHandler): + """Base class to handle loading filter classes. + + This class should be subclassed where one needs to use filters. + """ + + def get_filtered_objects(self, filter_classes, objs, + filter_properties): + for filter_cls in filter_classes: + objs = filter_cls().filter_all(objs, filter_properties) + return list(objs) diff --git a/cinder/openstack/common/scheduler/base_handler.py b/cinder/openstack/common/scheduler/base_handler.py new file mode 100644 index 0000000000..1808d2c61d --- /dev/null +++ b/cinder/openstack/common/scheduler/base_handler.py @@ -0,0 +1,45 @@ +# Copyright (c) 2011-2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +A common base for handling extension classes. + +Used by BaseFilterHandler and BaseWeightHandler +""" + +import inspect + +from stevedore import extension + + +class BaseHandler(object): + """Base class to handle loading filter and weight classes.""" + def __init__(self, modifier_class_type, modifier_namespace): + self.namespace = modifier_namespace + self.modifier_class_type = modifier_class_type + self.extension_manager = extension.ExtensionManager(modifier_namespace) + + def _is_correct_class(self, cls): + """Return whether an object is a class of the correct type and + is not prefixed with an underscore. + """ + return (inspect.isclass(cls) and + not cls.__name__.startswith('_') and + issubclass(cls, self.modifier_class_type)) + + def get_all_classes(self): + # We use a set, as some classes may have an entrypoint of their own, + # and also be returned by a function such as 'all_filters' for example + return [ext.plugin for ext in self.extension_manager if + self._is_correct_class(ext.plugin)] diff --git a/cinder/openstack/common/scheduler/base_weight.py b/cinder/openstack/common/scheduler/base_weight.py new file mode 100644 index 0000000000..a6ba75d360 --- /dev/null +++ b/cinder/openstack/common/scheduler/base_weight.py @@ -0,0 +1,72 @@ +# Copyright (c) 2011-2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Pluggable Weighing support +""" + +from cinder.openstack.common.scheduler import base_handler + + +class WeighedObject(object): + """Object with weight information.""" + def __init__(self, obj, weight): + self.obj = obj + self.weight = weight + + def __repr__(self): + return "" % (self.obj, self.weight) + + +class BaseWeigher(object): + """Base class for pluggable weighers.""" + def _weight_multiplier(self): + """How weighted this weigher should be. Normally this would + be overridden in a subclass based on a config value. + """ + return 1.0 + + def _weigh_object(self, obj, weight_properties): + """Override in a subclass to specify a weight for a specific + object. + """ + return 0.0 + + def weigh_objects(self, weighed_obj_list, weight_properties): + """Weigh multiple objects. Override in a subclass if you need + need access to all objects in order to manipulate weights. + """ + constant = self._weight_multiplier() + for obj in weighed_obj_list: + obj.weight += (constant * + self._weigh_object(obj.obj, weight_properties)) + + +class BaseWeightHandler(base_handler.BaseHandler): + object_class = WeighedObject + + def get_weighed_objects(self, weigher_classes, obj_list, + weighing_properties): + """Return a sorted (highest score first) list of WeighedObjects.""" + + if not obj_list: + return [] + + weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list] + for weigher_cls in weigher_classes: + weigher = weigher_cls() + weigher.weigh_objects(weighed_objs, weighing_properties) + + return sorted(weighed_objs, key=lambda x: x.weight, reverse=True) diff --git a/cinder/openstack/common/scheduler/filters/__init__.py b/cinder/openstack/common/scheduler/filters/__init__.py new file mode 100644 index 0000000000..7c77d9cdf2 --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler host filters +""" + +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import base_filter + +LOG = logging.getLogger(__name__) + + +class BaseHostFilter(base_filter.BaseFilter): + """Base class for host filters.""" + def _filter_one(self, obj, filter_properties): + """Return True if the object passes the filter, otherwise False.""" + return self.host_passes(obj, filter_properties) + + def host_passes(self, host_state, filter_properties): + """Return True if the HostState passes the filter, otherwise False. + Override this in a subclass. + """ + raise NotImplementedError() + + +class HostFilterHandler(base_filter.BaseFilterHandler): + def __init__(self, namespace): + super(HostFilterHandler, self).__init__(BaseHostFilter, namespace) diff --git a/cinder/openstack/common/scheduler/filters/availability_zone_filter.py b/cinder/openstack/common/scheduler/filters/availability_zone_filter.py new file mode 100644 index 0000000000..1afc64c03d --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/availability_zone_filter.py @@ -0,0 +1,30 @@ +# Copyright (c) 2011-2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.openstack.common.scheduler import filters + + +class AvailabilityZoneFilter(filters.BaseHostFilter): + """Filters Hosts by availability zone.""" + + def host_passes(self, host_state, filter_properties): + spec = filter_properties.get('request_spec', {}) + props = spec.get('resource_properties', {}) + availability_zone = props.get('availability_zone') + + if availability_zone: + return availability_zone == host_state.service['availability_zone'] + return True diff --git a/cinder/openstack/common/scheduler/filters/capabilities_filter.py b/cinder/openstack/common/scheduler/filters/capabilities_filter.py new file mode 100644 index 0000000000..7e7953c144 --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/capabilities_filter.py @@ -0,0 +1,70 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + +from cinder.openstack.common.gettextutils import _ # noqa +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters +from cinder.openstack.common.scheduler.filters import extra_specs_ops + +LOG = logging.getLogger(__name__) + + +class CapabilitiesFilter(filters.BaseHostFilter): + """HostFilter to work with resource (instance & volume) type records.""" + + def _satisfies_extra_specs(self, capabilities, resource_type): + """Check that the capabilities provided by the services satisfy + the extra specs associated with the resource type. + """ + extra_specs = resource_type.get('extra_specs', []) + if not extra_specs: + return True + + for key, req in six.iteritems(extra_specs): + # Either not scope format, or in capabilities scope + scope = key.split(':') + if len(scope) > 1 and scope[0] != "capabilities": + continue + elif scope[0] == "capabilities": + del scope[0] + + cap = capabilities + for index in range(len(scope)): + try: + cap = cap.get(scope[index], None) + except AttributeError: + return False + if cap is None: + return False + if not extra_specs_ops.match(cap, req): + LOG.debug(_("extra_spec requirement '%(req)s' does not match " + "'%(cap)s'"), {'req': req, 'cap': cap}) + return False + return True + + def host_passes(self, host_state, filter_properties): + """Return a list of hosts that can create resource_type.""" + # Note(zhiteng) Currently only Cinder and Nova are using + # this filter, so the resource type is either instance or + # volume. + resource_type = filter_properties.get('resource_type') + if not self._satisfies_extra_specs(host_state.capabilities, + resource_type): + LOG.debug(_("%(host_state)s fails resource_type extra_specs " + "requirements"), {'host_state': host_state}) + return False + return True diff --git a/cinder/openstack/common/scheduler/filters/extra_specs_ops.py b/cinder/openstack/common/scheduler/filters/extra_specs_ops.py new file mode 100644 index 0000000000..b3730f5af4 --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/extra_specs_ops.py @@ -0,0 +1,72 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import operator + +from cinder.openstack.common import strutils + +# 1. The following operations are supported: +# =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= +# 2. Note that is handled in a different way below. +# 3. If the first word in the extra_specs is not one of the operators, +# it is ignored. +_op_methods = {'=': lambda x, y: float(x) >= float(y), + '': lambda x, y: y in x, + '': lambda x, y: (strutils.bool_from_string(x) is + strutils.bool_from_string(y)), + '==': lambda x, y: float(x) == float(y), + '!=': lambda x, y: float(x) != float(y), + '>=': lambda x, y: float(x) >= float(y), + '<=': lambda x, y: float(x) <= float(y), + 's==': operator.eq, + 's!=': operator.ne, + 's<': operator.lt, + 's<=': operator.le, + 's>': operator.gt, + 's>=': operator.ge} + + +def match(value, req): + words = req.split() + + op = method = None + if words: + op = words.pop(0) + method = _op_methods.get(op) + + if op != '' and not method: + return value == req + + if value is None: + return False + + if op == '': # Ex: v1 v2 v3 + while True: + if words.pop(0) == value: + return True + if not words: + break + op = words.pop(0) # remove a keyword + if not words: + break + return False + + try: + if words and method(value, words[0]): + return True + except ValueError: + pass + + return False diff --git a/cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py b/cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py new file mode 100644 index 0000000000..d288ead205 --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py @@ -0,0 +1,56 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.openstack.common.gettextutils import _ # noqa +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters + +LOG = logging.getLogger(__name__) + + +class IgnoreAttemptedHostsFilter(filters.BaseHostFilter): + """Filter out previously attempted hosts + + A host passes this filter if it has not already been attempted for + scheduling. The scheduler needs to add previously attempted hosts + to the 'retry' key of filter_properties in order for this to work + correctly. For example: + { + 'retry': { + 'hosts': ['host1', 'host2'], + 'num_attempts': 3, + } + } + """ + + def host_passes(self, host_state, filter_properties): + """Skip nodes that have already been attempted.""" + attempted = filter_properties.get('retry', None) + if not attempted: + # Re-scheduling is disabled + LOG.debug(_("Re-scheduling is disabled.")) + return True + + hosts = attempted.get('hosts', []) + host = host_state.host + + passes = host not in hosts + pass_msg = "passes" if passes else "fails" + + LOG.debug(_("Host %(host)s %(pass_msg)s. Previously tried hosts: " + "%(hosts)s") % {'host': host, + 'pass_msg': pass_msg, + 'hosts': hosts}) + return passes diff --git a/cinder/openstack/common/scheduler/filters/json_filter.py b/cinder/openstack/common/scheduler/filters/json_filter.py new file mode 100644 index 0000000000..ce0a365ee7 --- /dev/null +++ b/cinder/openstack/common/scheduler/filters/json_filter.py @@ -0,0 +1,152 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import operator + +import six + +from cinder.openstack.common import jsonutils +from cinder.openstack.common.scheduler import filters + + +class JsonFilter(filters.BaseHostFilter): + """Host Filter to allow simple JSON-based grammar for + selecting hosts. + """ + def _op_compare(self, args, op): + """Returns True if the specified operator can successfully + compare the first item in the args with all the rest. Will + return False if only one item is in the list. + """ + if len(args) < 2: + return False + if op is operator.contains: + bad = args[0] not in args[1:] + else: + bad = [arg for arg in args[1:] + if not op(args[0], arg)] + return not bool(bad) + + def _equals(self, args): + """First term is == all the other terms.""" + return self._op_compare(args, operator.eq) + + def _less_than(self, args): + """First term is < all the other terms.""" + return self._op_compare(args, operator.lt) + + def _greater_than(self, args): + """First term is > all the other terms.""" + return self._op_compare(args, operator.gt) + + def _in(self, args): + """First term is in set of remaining terms.""" + return self._op_compare(args, operator.contains) + + def _less_than_equal(self, args): + """First term is <= all the other terms.""" + return self._op_compare(args, operator.le) + + def _greater_than_equal(self, args): + """First term is >= all the other terms.""" + return self._op_compare(args, operator.ge) + + def _not(self, args): + """Flip each of the arguments.""" + return [not arg for arg in args] + + def _or(self, args): + """True if any arg is True.""" + return any(args) + + def _and(self, args): + """True if all args are True.""" + return all(args) + + commands = { + '=': _equals, + '<': _less_than, + '>': _greater_than, + 'in': _in, + '<=': _less_than_equal, + '>=': _greater_than_equal, + 'not': _not, + 'or': _or, + 'and': _and, + } + + def _parse_string(self, string, host_state): + """Strings prefixed with $ are capability lookups in the + form '$variable' where 'variable' is an attribute in the + HostState class. If $variable is a dictionary, you may + use: $variable.dictkey + """ + if not string: + return None + if not string.startswith("$"): + return string + + path = string[1:].split(".") + obj = getattr(host_state, path[0], None) + if obj is None: + return None + for item in path[1:]: + obj = obj.get(item, None) + if obj is None: + return None + return obj + + def _process_filter(self, query, host_state): + """Recursively parse the query structure.""" + if not query: + return True + cmd = query[0] + method = self.commands[cmd] + cooked_args = [] + for arg in query[1:]: + if isinstance(arg, list): + arg = self._process_filter(arg, host_state) + elif isinstance(arg, six.string_types): + arg = self._parse_string(arg, host_state) + if arg is not None: + cooked_args.append(arg) + result = method(self, cooked_args) + return result + + def host_passes(self, host_state, filter_properties): + """Return a list of hosts that can fulfill the requirements + specified in the query. + """ + # TODO(zhiteng) Add description for filter_properties structure + # and scheduler_hints. + try: + query = filter_properties['scheduler_hints']['query'] + except KeyError: + query = None + if not query: + return True + + # NOTE(comstud): Not checking capabilities or service for + # enabled/disabled so that a provided json filter can decide + + result = self._process_filter(jsonutils.loads(query), host_state) + if isinstance(result, list): + # If any succeeded, include the host + result = any(result) + if result: + # Filter it out. + return True + return False diff --git a/cinder/openstack/common/scheduler/weights/__init__.py b/cinder/openstack/common/scheduler/weights/__init__.py new file mode 100644 index 0000000000..f4f5f42023 --- /dev/null +++ b/cinder/openstack/common/scheduler/weights/__init__.py @@ -0,0 +1,45 @@ +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler host weights +""" + + +from cinder.openstack.common.scheduler import base_weight + + +class WeighedHost(base_weight.WeighedObject): + def to_dict(self): + return { + 'weight': self.weight, + 'host': self.obj.host, + } + + def __repr__(self): + return ("WeighedHost [host: %s, weight: %s]" % + (self.obj.host, self.weight)) + + +class BaseHostWeigher(base_weight.BaseWeigher): + """Base class for host weights.""" + pass + + +class HostWeightHandler(base_weight.BaseWeightHandler): + object_class = WeighedHost + + def __init__(self, namespace): + super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace) diff --git a/cinder/openstack/common/service.py b/cinder/openstack/common/service.py new file mode 100644 index 0000000000..7632625d49 --- /dev/null +++ b/cinder/openstack/common/service.py @@ -0,0 +1,494 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import logging as std_logging +import os +import random +import signal +import sys +import threading +import time + +try: + # Importing just the symbol here because the io module does not + # exist in Python 2.6. + from io import UnsupportedOperation # noqa +except ImportError: + # Python 2.6 + UnsupportedOperation = None + +import eventlet +from oslo.config import cfg + +from cinder.openstack.common import eventlet_backdoor +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import threadgroup + + +rpc = importutils.try_import('cinder.openstack.common.rpc') +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def _sighup_supported(): + return hasattr(signal, 'SIGHUP') + + +def _is_daemon(): + # The process group for a foreground process will match the + # process group of the controlling terminal. If those values do + # not match, or ioctl() fails on the stdout file handle, we assume + # the process is running in the background as a daemon. + # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics + try: + is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) + except OSError as err: + if err.errno == errno.ENOTTY: + # Assume we are a daemon because there is no terminal. + is_daemon = True + else: + raise + except UnsupportedOperation: + # Could not get the fileno for stdout, so we must be a daemon. + is_daemon = True + return is_daemon + + +def _is_sighup_and_daemon(signo): + if not (_sighup_supported() and signo == signal.SIGHUP): + # Avoid checking if we are a daemon, because the signal isn't + # SIGHUP. + return False + return _is_daemon() + + +def _signo_to_signame(signo): + signals = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'} + if _sighup_supported(): + signals[signal.SIGHUP] = 'SIGHUP' + return signals[signo] + + +def _set_signals_handler(handler): + signal.signal(signal.SIGTERM, handler) + signal.signal(signal.SIGINT, handler) + if _sighup_supported(): + signal.signal(signal.SIGHUP, handler) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self.services = Services() + self.backdoor_port = eventlet_backdoor.initialize_if_enabled() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + service.backdoor_port = self.backdoor_port + self.services.add(service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self.services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self.services.wait() + + def restart(self): + """Reload config files and restart service. + + :returns: None + + """ + cfg.CONF.reload_config_files() + self.services.restart() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + raise SignalExit(signo) + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _wait_for_exit_or_signal(self, ready_callback=None): + status = None + signo = 0 + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + if ready_callback: + ready_callback() + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + finally: + self.stop() + if rpc: + try: + rpc.cleanup() + except Exception: + # We're shutting down, so it doesn't matter at this point. + LOG.exception(_('Exception during rpc cleanup.')) + + return status, signo + + def wait(self, ready_callback=None): + while True: + self.handle_signal() + status, signo = self._wait_for_exit_or_signal(ready_callback) + if not _is_sighup_and_daemon(signo): + return status + self.restart() + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self, wait_interval=0.01): + """Constructor. + + :param wait_interval: The interval to sleep for between checks + of child process exit. + """ + self.children = {} + self.sigcaught = None + self.running = True + self.wait_interval = wait_interval + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + self.handle_signal() + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process_handle_signal(self): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + def _sighup(*args): + signal.signal(signal.SIGHUP, signal.SIG_DFL) + raise SignalExit(signal.SIGHUP) + + signal.signal(signal.SIGTERM, _sigterm) + if _sighup_supported(): + signal.signal(signal.SIGHUP, _sighup) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + def _child_wait_for_exit_or_signal(self, launcher): + status = 0 + signo = 0 + + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + try: + launcher.wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_('Unhandled exception')) + status = 2 + finally: + launcher.stop() + + return status, signo + + def _child_process(self, service): + self._child_process_handle_signal() + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.launch_service(service) + return launcher + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + launcher = self._child_process(wrap.service) + while True: + self._child_process_handle_signal() + status, signo = self._child_wait_for_exit_or_signal(launcher) + if not _is_sighup_and_daemon(signo): + break + launcher.restart() + + os._exit(status) + + LOG.info(_('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def _respawn_children(self): + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(self.wait_interval) + continue + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + while True: + self.handle_signal() + self._respawn_children() + if self.sigcaught: + signame = _signo_to_signame(self.sigcaught) + LOG.info(_('Caught %s, stopping children'), signame) + if not _is_sighup_and_daemon(self.sigcaught): + break + + for pid in self.children: + os.kill(pid, signal.SIGHUP) + self.running = True + self.sigcaught = None + + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup(threads) + + # signal that the service is done shutting itself down: + self._done = threading.Event() + + def reset(self): + self._done = threading.Event() + + def start(self): + pass + + def stop(self): + self.tg.stop() + self.tg.wait() + # Signal that service cleanup is done: + self._done.set() + + def wait(self): + self._done.wait() + + +class Services(object): + + def __init__(self): + self.services = [] + self.tg = threadgroup.ThreadGroup() + self.done = threading.Event() + + def add(self, service): + self.services.append(service) + self.tg.add_thread(self.run_service, service, self.done) + + def stop(self): + # wait for graceful shutdown of services: + for service in self.services: + service.stop() + service.wait() + + # Each service has performed cleanup, now signal that the run_service + # wrapper threads can now die: + self.done.set() + + # reap threads: + self.tg.stop() + + def wait(self): + self.tg.wait() + + def restart(self): + self.stop() + self.done = threading.Event() + for restart_service in self.services: + restart_service.reset() + self.tg.add_thread(self.run_service, restart_service, self.done) + + @staticmethod + def run_service(service, done): + """Service start wrapper. + + :param service: service to run + :param done: event to wait on until a shutdown is triggered + :returns: None + + """ + service.start() + done.wait() + + +def launch(service, workers=None): + if workers: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + else: + launcher = ServiceLauncher() + launcher.launch_service(service) + return launcher diff --git a/cinder/openstack/common/setup.py b/cinder/openstack/common/setup.py deleted file mode 100644 index 628f5e3c9b..0000000000 --- a/cinder/openstack/common/setup.py +++ /dev/null @@ -1,358 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utilities with minimum-depends for use in setup.py -""" - -import datetime -import os -import re -import subprocess -import sys - -from setuptools.command import sdist - - -def parse_mailmap(mailmap='.mailmap'): - mapping = {} - if os.path.exists(mailmap): - fp = open(mailmap, 'r') - for l in fp: - l = l.strip() - if not l.startswith('#') and ' ' in l: - canonical_email, alias = [x for x in l.split(' ') - if x.startswith('<')] - mapping[alias] = canonical_email - return mapping - - -def canonicalize_emails(changelog, mapping): - """Takes in a string and an email alias mapping and replaces all - instances of the aliases in the string with their real email. - """ - for alias, email in mapping.iteritems(): - changelog = changelog.replace(alias, email) - return changelog - - -# Get requirements from the first file that exists -def get_reqs_from_files(requirements_files): - for requirements_file in requirements_files: - if os.path.exists(requirements_file): - return open(requirements_file, 'r').read().split('\n') - return [] - - -def parse_requirements(requirements_files=['requirements.txt', - 'tools/pip-requires']): - requirements = [] - for line in get_reqs_from_files(requirements_files): - # For the requirements list, we need to inject only the portion - # after egg= so that distutils knows the package it's looking for - # such as: - # -e git://github.com/openstack/nova/master#egg=nova - if re.match(r'\s*-e\s+', line): - requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', - line)) - # such as: - # http://github.com/openstack/nova/zipball/master#egg=nova - elif re.match(r'\s*https?:', line): - requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1', - line)) - # -f lines are for index locations, and don't get used here - elif re.match(r'\s*-f\s+', line): - pass - # argparse is part of the standard library starting with 2.7 - # adding it to the requirements list screws distro installs - elif line == 'argparse' and sys.version_info >= (2, 7): - pass - else: - requirements.append(line) - - return requirements - - -def parse_dependency_links(requirements_files=['requirements.txt', - 'tools/pip-requires']): - dependency_links = [] - # dependency_links inject alternate locations to find packages listed - # in requirements - for line in get_reqs_from_files(requirements_files): - # skip comments and blank lines - if re.match(r'(\s*#)|(\s*$)', line): - continue - # lines with -e or -f need the whole line, minus the flag - if re.match(r'\s*-[ef]\s+', line): - dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line)) - # lines that are only urls can go in unmolested - elif re.match(r'\s*https?:', line): - dependency_links.append(line) - return dependency_links - - -def write_requirements(): - venv = os.environ.get('VIRTUAL_ENV', None) - if venv is not None: - with open("requirements.txt", "w") as req_file: - output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"], - stdout=subprocess.PIPE) - requirements = output.communicate()[0].strip() - req_file.write(requirements) - - -def _run_shell_command(cmd): - output = subprocess.Popen(["/bin/sh", "-c", cmd], - stdout=subprocess.PIPE) - out = output.communicate() - if len(out) == 0: - return None - if len(out[0].strip()) == 0: - return None - return out[0].strip() - - -def _get_git_next_version_suffix(branch_name): - datestamp = datetime.datetime.now().strftime('%Y%m%d') - if branch_name == 'milestone-proposed': - revno_prefix = "r" - else: - revno_prefix = "" - _run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*") - milestone_cmd = "git show meta/openstack/release:%s" % branch_name - milestonever = _run_shell_command(milestone_cmd) - if not milestonever: - milestonever = "" - post_version = _get_git_post_version() - # post version should look like: - # 0.1.1.4.gcc9e28a - # where the bit after the last . is the short sha, and the bit between - # the last and second to last is the revno count - (revno, sha) = post_version.split(".")[-2:] - first_half = "%s~%s" % (milestonever, datestamp) - second_half = "%s%s.%s" % (revno_prefix, revno, sha) - return ".".join((first_half, second_half)) - - -def _get_git_current_tag(): - return _run_shell_command("git tag --contains HEAD") - - -def _get_git_tag_info(): - return _run_shell_command("git describe --tags") - - -def _get_git_post_version(): - current_tag = _get_git_current_tag() - if current_tag is not None: - return current_tag - else: - tag_info = _get_git_tag_info() - if tag_info is None: - base_version = "0.0" - cmd = "git --no-pager log --oneline" - out = _run_shell_command(cmd) - revno = len(out.split("\n")) - sha = _run_shell_command("git describe --always") - else: - tag_infos = tag_info.split("-") - base_version = "-".join(tag_infos[:-2]) - (revno, sha) = tag_infos[-2:] - return "%s.%s.%s" % (base_version, revno, sha) - - -def write_git_changelog(): - """Write a changelog based on the git changelog.""" - new_changelog = 'ChangeLog' - if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'): - if os.path.isdir('.git'): - git_log_cmd = 'git log --stat' - changelog = _run_shell_command(git_log_cmd) - mailmap = parse_mailmap() - with open(new_changelog, "w") as changelog_file: - changelog_file.write(canonicalize_emails(changelog, mailmap)) - else: - open(new_changelog, 'w').close() - - -def generate_authors(): - """Create AUTHORS file using git commits.""" - jenkins_email = 'jenkins@review.openstack.org' - old_authors = 'AUTHORS.in' - new_authors = 'AUTHORS' - if not os.getenv('SKIP_GENERATE_AUTHORS'): - if os.path.isdir('.git'): - # don't include jenkins email address in AUTHORS file - git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | " - "grep -v " + jenkins_email) - changelog = _run_shell_command(git_log_cmd) - mailmap = parse_mailmap() - with open(new_authors, 'w') as new_authors_fh: - new_authors_fh.write(canonicalize_emails(changelog, mailmap)) - if os.path.exists(old_authors): - with open(old_authors, "r") as old_authors_fh: - new_authors_fh.write('\n' + old_authors_fh.read()) - else: - open(new_authors, 'w').close() - - -_rst_template = """%(heading)s -%(underline)s - -.. automodule:: %(module)s - :members: - :undoc-members: - :show-inheritance: -""" - - -def read_versioninfo(project): - """Read the versioninfo file. If it doesn't exist, we're in a github - zipball, and there's really no way to know what version we really - are, but that should be ok, because the utility of that should be - just about nil if this code path is in use in the first place.""" - versioninfo_path = os.path.join(project, 'versioninfo') - if os.path.exists(versioninfo_path): - with open(versioninfo_path, 'r') as vinfo: - version = vinfo.read().strip() - else: - version = "0.0.0" - return version - - -def write_versioninfo(project, version): - """Write a simple file containing the version of the package.""" - open(os.path.join(project, 'versioninfo'), 'w').write("%s\n" % version) - - -def get_cmdclass(): - """Return dict of commands to run from setup.py.""" - - cmdclass = dict() - - def _find_modules(arg, dirname, files): - for filename in files: - if filename.endswith('.py') and filename != '__init__.py': - arg["%s.%s" % (dirname.replace('/', '.'), - filename[:-3])] = True - - class LocalSDist(sdist.sdist): - """Builds the ChangeLog and Authors files from VC first.""" - - def run(self): - write_git_changelog() - generate_authors() - # sdist.sdist is an old style class, can't use super() - sdist.sdist.run(self) - - cmdclass['sdist'] = LocalSDist - - # If Sphinx is installed on the box running setup.py, - # enable setup.py to build the documentation, otherwise, - # just ignore it - try: - from sphinx.setup_command import BuildDoc - - class LocalBuildDoc(BuildDoc): - def generate_autoindex(self): - print "**Autodocumenting from %s" % os.path.abspath(os.curdir) - modules = {} - option_dict = self.distribution.get_option_dict('build_sphinx') - source_dir = os.path.join(option_dict['source_dir'][1], 'api') - if not os.path.exists(source_dir): - os.makedirs(source_dir) - for pkg in self.distribution.packages: - if '.' not in pkg: - os.path.walk(pkg, _find_modules, modules) - module_list = modules.keys() - module_list.sort() - autoindex_filename = os.path.join(source_dir, 'autoindex.rst') - with open(autoindex_filename, 'w') as autoindex: - autoindex.write(""".. toctree:: - :maxdepth: 1 - -""") - for module in module_list: - output_filename = os.path.join(source_dir, - "%s.rst" % module) - heading = "The :mod:`%s` Module" % module - underline = "=" * len(heading) - values = dict(module=module, heading=heading, - underline=underline) - - print "Generating %s" % output_filename - with open(output_filename, 'w') as output_file: - output_file.write(_rst_template % values) - autoindex.write(" %s.rst\n" % module) - - def run(self): - if not os.getenv('SPHINX_DEBUG'): - self.generate_autoindex() - - for builder in ['html', 'man']: - self.builder = builder - self.finalize_options() - self.project = self.distribution.get_name() - self.version = self.distribution.get_version() - self.release = self.distribution.get_version() - BuildDoc.run(self) - cmdclass['build_sphinx'] = LocalBuildDoc - except ImportError: - pass - - return cmdclass - - -def get_git_branchname(): - for branch in _run_shell_command("git branch --color=never").split("\n"): - if branch.startswith('*'): - _branch_name = branch.split()[1].strip() - if _branch_name == "(no": - _branch_name = "no-branch" - return _branch_name - - -def get_pre_version(projectname, base_version): - """Return a version which is leading up to a version that will - be released in the future.""" - if os.path.isdir('.git'): - current_tag = _get_git_current_tag() - if current_tag is not None: - version = current_tag - else: - branch_name = os.getenv('BRANCHNAME', - os.getenv('GERRIT_REFNAME', - get_git_branchname())) - version_suffix = _get_git_next_version_suffix(branch_name) - version = "%s~%s" % (base_version, version_suffix) - write_versioninfo(projectname, version) - return version - else: - version = read_versioninfo(projectname) - return version - - -def get_post_version(projectname): - """Return a version which is equal to the tag that's on the current - revision if there is one, or tag plus number of additional revisions - if the current revision has no tag.""" - - if os.path.isdir('.git'): - version = _get_git_post_version() - write_versioninfo(projectname, version) - return version - return read_versioninfo(projectname) diff --git a/cinder/openstack/common/sslutils.py b/cinder/openstack/common/sslutils.py new file mode 100644 index 0000000000..f1c4589ff3 --- /dev/null +++ b/cinder/openstack/common/sslutils.py @@ -0,0 +1,98 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import ssl + +from oslo.config import cfg + +from cinder.openstack.common.gettextutils import _ + + +ssl_opts = [ + cfg.StrOpt('ca_file', + default=None, + help="CA certificate file to use to verify " + "connecting clients"), + cfg.StrOpt('cert_file', + default=None, + help="Certificate file to use when starting " + "the server securely"), + cfg.StrOpt('key_file', + default=None, + help="Private key file to use when starting " + "the server securely"), +] + + +CONF = cfg.CONF +CONF.register_opts(ssl_opts, "ssl") + + +def is_enabled(): + cert_file = CONF.ssl.cert_file + key_file = CONF.ssl.key_file + ca_file = CONF.ssl.ca_file + use_ssl = cert_file or key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError(_("Unable to find key_file : %s") % key_file) + + if use_ssl and (not cert_file or not key_file): + raise RuntimeError(_("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + + return use_ssl + + +def wrap(sock): + ssl_kwargs = { + 'server_side': True, + 'certfile': CONF.ssl.cert_file, + 'keyfile': CONF.ssl.key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl.ca_file: + ssl_kwargs['ca_certs'] = CONF.ssl.ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + return ssl.wrap_socket(sock, **ssl_kwargs) + + +_SSL_PROTOCOLS = { + "tlsv1": ssl.PROTOCOL_TLSv1, + "sslv23": ssl.PROTOCOL_SSLv23, + "sslv3": ssl.PROTOCOL_SSLv3 +} + +try: + _SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2 +except AttributeError: + pass + + +def validate_ssl_version(version): + key = version.lower() + try: + return _SSL_PROTOCOLS[key] + except KeyError: + raise RuntimeError(_("Invalid SSL version : %s") % version) diff --git a/cinder/openstack/common/strutils.py b/cinder/openstack/common/strutils.py new file mode 100644 index 0000000000..8cd9965996 --- /dev/null +++ b/cinder/openstack/common/strutils.py @@ -0,0 +1,216 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import re +import sys +import unicodedata + +from cinder.openstack.common.gettextutils import _ + + +# Used for looking up extensions of text +# to their 'multiplied' byte amount +BYTE_MULTIPLIERS = { + '': 1, + 't': 1024 ** 4, + 'g': 1024 ** 3, + 'm': 1024 ** 2, + 'k': 1024, +} +BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)') + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + +SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") +SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") + + +def int_from_bool_as_string(subject): + """Interpret a string as a boolean and return either 1 or 0. + + Any string value in: + + ('True', 'true', 'On', 'on', '1') + + is interpreted as a boolean True. + + Useful for JSON-decoded stuff and config file parsing + """ + return bool_from_string(subject) and 1 or 0 + + +def bool_from_string(subject, strict=False): + """Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else is considered False. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. + """ + if not isinstance(subject, basestring): + subject = str(subject) + + lowered = subject.strip().lower() + + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return False + + +def safe_decode(text, incoming=None, errors='strict'): + """Decodes incoming str using `incoming` if they're not already unicode. + + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a unicode `incoming` encoded + representation of it. + :raises TypeError: If text is not an isntance of basestring + """ + if not isinstance(text, basestring): + raise TypeError("%s can't be decoded" % type(text)) + + if isinstance(text, unicode): + return text + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + try: + return text.decode(incoming, errors) + except UnicodeDecodeError: + # Note(flaper87) If we get here, it means that + # sys.stdin.encoding / sys.getdefaultencoding + # didn't return a suitable encoding to decode + # text. This happens mostly when global LANG + # var is not set correctly and there's no + # default encoding. In this case, most likely + # python will use ASCII or ANSI encoders as + # default encodings but they won't be capable + # of decoding non-ASCII characters. + # + # Also, UTF-8 is being used since it's an ASCII + # extension. + return text.decode('utf-8', errors) + + +def safe_encode(text, incoming=None, + encoding='utf-8', errors='strict'): + """Encodes incoming str/unicode using `encoding`. + + If incoming is not specified, text is expected to be encoded with + current python's default encoding. (`sys.getdefaultencoding`) + + :param incoming: Text's current encoding + :param encoding: Expected encoding for text (Default UTF-8) + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a bytestring `encoding` encoded + representation of it. + :raises TypeError: If text is not an isntance of basestring + """ + if not isinstance(text, basestring): + raise TypeError("%s can't be encoded" % type(text)) + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + if isinstance(text, unicode): + return text.encode(encoding, errors) + elif text and encoding != incoming: + # Decode text before encoding it with `encoding` + text = safe_decode(text, incoming, errors) + return text.encode(encoding, errors) + + return text + + +def to_bytes(text, default=0): + """Converts a string into an integer of bytes. + + Looks at the last characters of the text to determine + what conversion is needed to turn the input text into a byte number. + Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive) + + :param text: String input for bytes size conversion. + :param default: Default return value when text is blank. + + """ + match = BYTE_REGEX.search(text) + if match: + magnitude = int(match.group(1)) + mult_key_org = match.group(2) + if not mult_key_org: + return magnitude + elif text: + msg = _('Invalid string format: %s') % text + raise TypeError(msg) + else: + return default + mult_key = mult_key_org.lower().replace('b', '', 1) + multiplier = BYTE_MULTIPLIERS.get(mult_key) + if multiplier is None: + msg = _('Unknown byte multiplier: %s') % mult_key_org + raise TypeError(msg) + return magnitude * multiplier + + +def to_slug(value, incoming=None, errors="strict"): + """Normalize string. + + Convert to lowercase, remove non-word characters, and convert spaces + to hyphens. + + Inspired by Django's `slugify` filter. + + :param value: Text to slugify + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: slugified unicode representation of `value` + :raises TypeError: If text is not an instance of basestring + """ + value = safe_decode(value, incoming, errors) + # NOTE(aababilov): no need to use safe_(encode|decode) here: + # encodings are always "ascii", error handling is always "ignore" + # and types are always known (first: unicode; second: str) + value = unicodedata.normalize("NFKD", value).encode( + "ascii", "ignore").decode("ascii") + value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() + return SLUGIFY_HYPHENATE_RE.sub("-", value) diff --git a/cinder/openstack/common/threadgroup.py b/cinder/openstack/common/threadgroup.py new file mode 100644 index 0000000000..edeaf30701 --- /dev/null +++ b/cinder/openstack/common/threadgroup.py @@ -0,0 +1,129 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +from eventlet import greenpool +from eventlet import greenthread + +from cinder.openstack.common import log as logging +from cinder.openstack.common import loopingcall + + +LOG = logging.getLogger(__name__) + + +def _thread_done(gt, *args, **kwargs): + """Callback function to be passed to GreenThread.link() when we spawn() + Calls the :class:`ThreadGroup` to notify if. + + """ + kwargs['group'].thread_done(kwargs['thread']) + + +class Thread(object): + """Wrapper around a greenthread, that holds a reference to the + :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when + it has done so it can be removed from the threads list. + """ + def __init__(self, thread, group): + self.thread = thread + self.thread.link(_thread_done, group=group, thread=self) + + def stop(self): + self.thread.kill() + + def wait(self): + return self.thread.wait() + + def link(self, func, *args, **kwargs): + self.thread.link(func, *args, **kwargs) + + +class ThreadGroup(object): + """The point of the ThreadGroup classis to: + + * keep track of timers and greenthreads (making it easier to stop them + when need be). + * provide an easy API to add timers. + """ + def __init__(self, thread_pool_size=10): + self.pool = greenpool.GreenPool(thread_pool_size) + self.threads = [] + self.timers = [] + + def add_dynamic_timer(self, callback, initial_delay=None, + periodic_interval_max=None, *args, **kwargs): + timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) + timer.start(initial_delay=initial_delay, + periodic_interval_max=periodic_interval_max) + self.timers.append(timer) + + def add_timer(self, interval, callback, initial_delay=None, + *args, **kwargs): + pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) + pulse.start(interval=interval, + initial_delay=initial_delay) + self.timers.append(pulse) + + def add_thread(self, callback, *args, **kwargs): + gt = self.pool.spawn(callback, *args, **kwargs) + th = Thread(gt, self) + self.threads.append(th) + return th + + def thread_done(self, thread): + self.threads.remove(thread) + + def stop(self): + current = greenthread.getcurrent() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + # don't kill the current thread. + continue + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + + for x in self.timers: + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + self.timers = [] + + def wait(self): + for x in self.timers: + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + current = greenthread.getcurrent() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + continue + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) diff --git a/cinder/openstack/common/timeutils.py b/cinder/openstack/common/timeutils.py index 93b34fc5b1..d5ed81d3e3 100644 --- a/cinder/openstack/common/timeutils.py +++ b/cinder/openstack/common/timeutils.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -21,32 +19,38 @@ import calendar import datetime +import time import iso8601 +import six -TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" -PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" +# ISO 8601 extended time format with microseconds +_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' +_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' +PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND -def isotime(at=None): - """Stringify time in ISO 8601 format""" +def isotime(at=None, subsecond=False): + """Stringify time in ISO 8601 format.""" if not at: at = utcnow() - str = at.strftime(TIME_FORMAT) + st = at.strftime(_ISO8601_TIME_FORMAT + if not subsecond + else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' - str += ('Z' if tz == 'UTC' else tz) - return str + st += ('Z' if tz == 'UTC' else tz) + return st def parse_isotime(timestr): - """Parse time from ISO 8601 format""" + """Parse time from ISO 8601 format.""" try: return iso8601.parse_date(timestr) except iso8601.ParseError as e: - raise ValueError(e.message) + raise ValueError(six.text_type(e)) except TypeError as e: - raise ValueError(e.message) + raise ValueError(six.text_type(e)) def strtime(at=None, fmt=PERFECT_TIME_FORMAT): @@ -62,7 +66,7 @@ def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): def normalize_time(timestamp): - """Normalize time in arbitrary timezone to UTC naive object""" + """Normalize time in arbitrary timezone to UTC naive object.""" offset = timestamp.utcoffset() if offset is None: return timestamp @@ -71,33 +75,71 @@ def normalize_time(timestamp): def is_older_than(before, seconds): """Return True if before is older than seconds.""" + if isinstance(before, six.string_types): + before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + return utcnow() - before > datetime.timedelta(seconds=seconds) +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, six.string_types): + after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + + return after - utcnow() > datetime.timedelta(seconds=seconds) + + def utcnow_ts(): """Timestamp version of our utcnow function.""" + if utcnow.override_time is None: + # NOTE(kgriffs): This is several times faster + # than going through calendar.timegm(...) + return int(time.time()) + return calendar.timegm(utcnow().timetuple()) def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: - return utcnow.override_time + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time return datetime.datetime.utcnow() +def iso8601_from_timestamp(timestamp): + """Returns a iso8601 formated date from timestamp.""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + utcnow.override_time = None -def set_time_override(override_time=datetime.datetime.utcnow()): - """Override utils.utcnow to return a constant time.""" - utcnow.override_time = override_time +def set_time_override(override_time=None): + """Overrides utils.utcnow. + + Make it return a constant time or a list thereof, one at a time. + + :param override_time: datetime instance or list thereof. If not + given, defaults to the current UTC time. + """ + utcnow.override_time = override_time or datetime.datetime.utcnow() def advance_time_delta(timedelta): """Advance overridden time using a datetime.timedelta.""" assert(not utcnow.override_time is None) - utcnow.override_time += timedelta + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta def advance_time_seconds(seconds): @@ -113,7 +155,8 @@ def clear_time_override(): def marshall_now(now=None): """Make an rpc-safe datetime with microseconds. - Note: tzinfo is stripped, but not required for relative times.""" + Note: tzinfo is stripped, but not required for relative times. + """ if not now: now = utcnow() return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, @@ -130,3 +173,38 @@ def unmarshall_time(tyme): minute=tyme['minute'], second=tyme['second'], microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """Return the difference between two timing objects. + + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + return total_seconds(delta) + + +def total_seconds(delta): + """Return the total seconds of datetime.timedelta object. + + Compute total seconds of datetime.timedelta, datetime.timedelta + doesn't have method total_seconds in Python2.6, calculate it manually. + """ + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """Determines if time is going to happen in the next window seconds. + + :param dt: the time + :param window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/cinder/openstack/common/uuidutils.py b/cinder/openstack/common/uuidutils.py new file mode 100644 index 0000000000..7608acb942 --- /dev/null +++ b/cinder/openstack/common/uuidutils.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +UUID related utilities and helper functions. +""" + +import uuid + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False diff --git a/cinder/openstack/common/versionutils.py b/cinder/openstack/common/versionutils.py new file mode 100644 index 0000000000..8d3d952d8c --- /dev/null +++ b/cinder/openstack/common/versionutils.py @@ -0,0 +1,148 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helpers for comparing version strings. +""" + +import functools +import pkg_resources + +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class deprecated(object): + """A decorator to mark callables as deprecated. + + This decorator logs a deprecation message when the callable it decorates is + used. The message will include the release where the callable was + deprecated, the release where it may be removed and possibly an optional + replacement. + + Examples: + + 1. Specifying the required deprecated release + + >>> @deprecated(as_of=deprecated.ICEHOUSE) + ... def a(): pass + + 2. Specifying a replacement: + + >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()') + ... def b(): pass + + 3. Specifying the release where the functionality may be removed: + + >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1) + ... def c(): pass + + """ + + FOLSOM = 'F' + GRIZZLY = 'G' + HAVANA = 'H' + ICEHOUSE = 'I' + + _RELEASES = { + 'F': 'Folsom', + 'G': 'Grizzly', + 'H': 'Havana', + 'I': 'Icehouse', + } + + _deprecated_msg_with_alternative = _( + '%(what)s is deprecated as of %(as_of)s in favor of ' + '%(in_favor_of)s and may be removed in %(remove_in)s.') + + _deprecated_msg_no_alternative = _( + '%(what)s is deprecated as of %(as_of)s and may be ' + 'removed in %(remove_in)s. It will not be superseded.') + + def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None): + """Initialize decorator + + :param as_of: the release deprecating the callable. Constants + are define in this class for convenience. + :param in_favor_of: the replacement for the callable (optional) + :param remove_in: an integer specifying how many releases to wait + before removing (default: 2) + :param what: name of the thing being deprecated (default: the + callable's name) + + """ + self.as_of = as_of + self.in_favor_of = in_favor_of + self.remove_in = remove_in + self.what = what + + def __call__(self, func): + if not self.what: + self.what = func.__name__ + '()' + + @functools.wraps(func) + def wrapped(*args, **kwargs): + msg, details = self._build_message() + LOG.deprecated(msg, details) + return func(*args, **kwargs) + return wrapped + + def _get_safe_to_remove_release(self, release): + # TODO(dstanek): this method will have to be reimplemented once + # when we get to the X release because once we get to the Y + # release, what is Y+2? + new_release = chr(ord(release) + self.remove_in) + if new_release in self._RELEASES: + return self._RELEASES[new_release] + else: + return new_release + + def _build_message(self): + details = dict(what=self.what, + as_of=self._RELEASES[self.as_of], + remove_in=self._get_safe_to_remove_release(self.as_of)) + + if self.in_favor_of: + details['in_favor_of'] = self.in_favor_of + msg = self._deprecated_msg_with_alternative + else: + msg = self._deprecated_msg_no_alternative + return msg, details + + +def is_compatible(requested_version, current_version, same_major=True): + """Determine whether `requested_version` is satisfied by + `current_version`; in other words, `current_version` is >= + `requested_version`. + + :param requested_version: version to check for compatibility + :param current_version: version to check against + :param same_major: if True, the major version must be identical between + `requested_version` and `current_version`. This is used when a + major-version difference indicates incompatibility between the two + versions. Since this is the common-case in practice, the default is + True. + :returns: True if compatible, False if not + """ + requested_parts = pkg_resources.parse_version(requested_version) + current_parts = pkg_resources.parse_version(current_version) + + if same_major and (requested_parts[0] != current_parts[0]): + return False + + return current_parts >= requested_parts diff --git a/cinder/policy.py b/cinder/policy.py index 95cfd4c25b..9e3999efaf 100644 --- a/cinder/policy.py +++ b/cinder/policy.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 OpenStack, LLC. +# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,9 +15,11 @@ """Policy Engine For Cinder""" + +from oslo.config import cfg + from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg +from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import policy from cinder import utils @@ -30,11 +30,10 @@ help=_('JSON file representing policy')), cfg.StrOpt('policy_default_rule', default='default', - help=_('Rule checked when requested rule is not found')), - ] + help=_('Rule checked when requested rule is not found')), ] -FLAGS = flags.FLAGS -FLAGS.register_opts(policy_opts) +CONF = cfg.CONF +CONF.register_opts(policy_opts) _POLICY_PATH = None _POLICY_CACHE = {} @@ -52,14 +51,28 @@ def init(): global _POLICY_PATH global _POLICY_CACHE if not _POLICY_PATH: - _POLICY_PATH = utils.find_config(FLAGS.policy_file) + _POLICY_PATH = utils.find_config(CONF.policy_file) utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, reload_func=_set_brain) def _set_brain(data): - default_rule = FLAGS.policy_default_rule - policy.set_brain(policy.HttpBrain.load_json(data, default_rule)) + default_rule = CONF.policy_default_rule + policy.set_brain(policy.Brain.load_json(data, default_rule)) + + +def enforce_action(context, action): + """Checks that the action can be done by the given context. + + Applies a check to ensure the context's project_id and user_id can be + applied to the given action using the policy enforcement api. + """ + + target = { + 'project_id': context.project_id, + 'user_id': context.user_id, + } + enforce(context, action, target) def enforce(context, action, target): diff --git a/cinder/quota.py b/cinder/quota.py index 8c6057d42d..6e48ea4104 100644 --- a/cinder/quota.py +++ b/cinder/quota.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -16,14 +14,16 @@ # License for the specific language governing permissions and limitations # under the License. -"""Quotas for instances, volumes, and floating ips.""" +"""Quotas for volumes.""" + import datetime +from oslo.config import cfg + +from cinder import context from cinder import db from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg from cinder.openstack.common import importutils from cinder.openstack.common import log as logging from cinder.openstack.common import timeutils @@ -35,9 +35,13 @@ cfg.IntOpt('quota_volumes', default=10, help='number of volumes allowed per project'), + cfg.IntOpt('quota_snapshots', + default=10, + help='number of volume snapshots allowed per project'), cfg.IntOpt('quota_gigabytes', default=1000, - help='number of volume gigabytes allowed per project'), + help='number of volume gigabytes (snapshots are also included) ' + 'allowed per project'), cfg.IntOpt('reservation_expire', default=86400, help='number of seconds until a reservation expires'), @@ -50,47 +54,67 @@ cfg.StrOpt('quota_driver', default='cinder.quota.DbQuotaDriver', help='default driver to use for quota checks'), - ] + cfg.BoolOpt('use_default_quota_class', + default=True, + help='whether to use default quota class for default quota'), ] -FLAGS = flags.FLAGS -FLAGS.register_opts(quota_opts) +CONF = cfg.CONF +CONF.register_opts(quota_opts) class DbQuotaDriver(object): - """ - Driver to perform necessary checks to enforce quotas and obtain - quota information. The default driver utilizes the local - database. + + """Driver to perform check to enforcement of quotas. + + Also allows to obtain quota information. + The default driver utilizes the local database. """ - def get_by_project(self, context, project_id, resource): + def get_by_project(self, context, project_id, resource_name): """Get a specific quota by project.""" - return db.quota_get(context, project_id, resource) + return db.quota_get(context, project_id, resource_name) - def get_by_class(self, context, quota_class, resource): + def get_by_class(self, context, quota_class, resource_name): """Get a specific quota by quota class.""" - return db.quota_class_get(context, quota_class, resource) + return db.quota_class_get(context, quota_class, resource_name) + + def get_default(self, context, resource): + """Get a specific default quota for a resource.""" + + default_quotas = db.quota_class_get_default(context) + return default_quotas.get(resource.name, resource.default) def get_defaults(self, context, resources): """Given a list of resources, retrieve the default quotas. + Use the class quotas named `_DEFAULT_QUOTA_NAME` as default quotas, + if it exists. + :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. """ quotas = {} + default_quotas = {} + if CONF.use_default_quota_class: + default_quotas = db.quota_class_get_default(context) for resource in resources.values(): - quotas[resource.name] = resource.default + if resource.name not in default_quotas: + LOG.deprecated(_("Default quota for resource: %(res)s is set " + "by the default quota flag: quota_%(res)s, " + "it is now deprecated. Please use the " + "the default quota class for default " + "quota.") % {'res': resource.name}) + quotas[resource.name] = default_quotas.get(resource.name, + resource.default) return quotas def get_class_quotas(self, context, resources, quota_class, defaults=True): - """ - Given a list of resources, retrieve the quotas for the given - quota class. + """Given list of resources, retrieve the quotas for given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. @@ -102,19 +126,25 @@ def get_class_quotas(self, context, resources, quota_class, """ quotas = {} + default_quotas = {} class_quotas = db.quota_class_get_all_by_name(context, quota_class) + if defaults: + default_quotas = db.quota_class_get_default(context) for resource in resources.values(): - if defaults or resource.name in class_quotas: - quotas[resource.name] = class_quotas.get(resource.name, - resource.default) + if resource.name in class_quotas: + quotas[resource.name] = class_quotas[resource.name] + continue + + if defaults: + quotas[resource.name] = default_quotas.get(resource.name, + resource.default) return quotas def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True): - """ - Given a list of resources, retrieve the quotas for the given + """Given a list of resources, retrieve the quotas for the given project. :param context: The request context, for access checks. @@ -150,15 +180,19 @@ def get_project_quotas(self, context, resources, project_id, else: class_quotas = {} + default_quotas = self.get_defaults(context, resources) + for resource in resources.values(): # Omit default/quota class values if not defaults and resource.name not in project_quotas: continue quotas[resource.name] = dict( - limit=project_quotas.get(resource.name, class_quotas.get( - resource.name, resource.default)), - ) + limit=project_quotas.get( + resource.name, + class_quotas.get(resource.name, + default_quotas[resource.name])), + ) # Include usages if desired. This is optional because one # internal consumer of this interface wants to access the @@ -167,16 +201,15 @@ def get_project_quotas(self, context, resources, project_id, usage = project_usages.get(resource.name, {}) quotas[resource.name].update( in_use=usage.get('in_use', 0), - reserved=usage.get('reserved', 0), - ) + reserved=usage.get('reserved', 0), ) return quotas - def _get_quotas(self, context, resources, keys, has_sync): - """ - A helper method which retrieves the quotas for the specific - resources identified by keys, and which apply to the current - context. + def _get_quotas(self, context, resources, keys, has_sync, project_id=None): + """A helper method which retrieves the quotas for specific resources. + + This specific resource is identified by keys, and which apply to the + current context. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. @@ -185,6 +218,9 @@ def _get_quotas(self, context, resources, keys, has_sync): have a sync attribute; if False, indicates that the resource must NOT have a sync attribute. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ # Filter resources @@ -203,12 +239,12 @@ def _get_quotas(self, context, resources, keys, has_sync): # Grab and return the quotas (without usages) quotas = self.get_project_quotas(context, sub_resources, - context.project_id, + project_id, context.quota_class, usages=False) return dict((k, v['limit']) for k, v in quotas.items()) - def limit_check(self, context, resources, values): + def limit_check(self, context, resources, values, project_id=None): """Check simple quota limits. For limits--those quotas for which there is no usage @@ -228,6 +264,9 @@ def limit_check(self, context, resources, values): :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ # Ensure no value is less than zero @@ -235,9 +274,13 @@ def limit_check(self, context, resources, values): if unders: raise exception.InvalidQuotaValue(unders=sorted(unders)) + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id + # Get the applicable quotas quotas = self._get_quotas(context, resources, values.keys(), - has_sync=False) + has_sync=False, project_id=project_id) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() @@ -246,7 +289,8 @@ def limit_check(self, context, resources, values): raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages={}) - def reserve(self, context, resources, deltas, expire=None): + def reserve(self, context, resources, deltas, expire=None, + project_id=None): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage @@ -276,11 +320,14 @@ def reserve(self, context, resources, deltas, expire=None): default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ # Set up the reservation expiration if expire is None: - expire = FLAGS.reservation_expire + expire = CONF.reservation_expire if isinstance(expire, (int, long)): expire = datetime.timedelta(seconds=expire) if isinstance(expire, datetime.timedelta): @@ -288,12 +335,16 @@ def reserve(self, context, resources, deltas, expire=None): if not isinstance(expire, datetime.datetime): raise exception.InvalidReservationExpiration(expire=expire) + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id + # Get the applicable quotas. # NOTE(Vek): We're not worried about races at this point. # Yes, the admin may be in the process of reducing # quotas, but that's a pretty rare thing. quotas = self._get_quotas(context, resources, deltas.keys(), - has_sync=True) + has_sync=True, project_id=project_id) # NOTE(Vek): Most of the work here has to be done in the DB # API, because we have to do it in a transaction, @@ -301,32 +352,45 @@ def reserve(self, context, resources, deltas, expire=None): # session isn't available outside the DBAPI, we # have to do the work there. return db.quota_reserve(context, resources, quotas, deltas, expire, - FLAGS.until_refresh, FLAGS.max_age) + CONF.until_refresh, CONF.max_age, + project_id=project_id) - def commit(self, context, reservations): + def commit(self, context, reservations, project_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id - db.reservation_commit(context, reservations) + db.reservation_commit(context, reservations, project_id=project_id) - def rollback(self, context, reservations): + def rollback(self, context, reservations, project_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ + # If project_id is None, then we use the project_id in context + if project_id is None: + project_id = context.project_id - db.reservation_rollback(context, reservations) + db.reservation_rollback(context, reservations, project_id=project_id) def destroy_all_by_project(self, context, project_id): - """ - Destroy all quotas, usages, and reservations associated with a - project. + """Destroy all that is associated with a project. + + This includes quotas, usages and reservations. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. @@ -350,10 +414,9 @@ class BaseResource(object): """Describe a single resource for quota checking.""" def __init__(self, name, flag=None): - """ - Initializes a Resource. + """Initializes a Resource. - :param name: The name of the resource, i.e., "instances". + :param name: The name of the resource, i.e., "volumes". :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. @@ -363,9 +426,7 @@ def __init__(self, name, flag=None): self.flag = flag def quota(self, driver, context, **kwargs): - """ - Given a driver and context, obtain the quota for this - resource. + """Given a driver and context, obtain the quota for this resource. :param driver: A quota driver. :param context: The request context. @@ -407,24 +468,23 @@ def quota(self, driver, context, **kwargs): pass # OK, return the default - return self.default + return driver.get_default(context, self) @property def default(self): """Return the default value of the quota.""" - return FLAGS[self.flag] if self.flag else -1 + return CONF[self.flag] if self.flag else -1 class ReservableResource(BaseResource): """Describe a reservable resource.""" def __init__(self, name, sync, flag=None): - """ - Initializes a ReservableResource. + """Initializes a ReservableResource. Reservable resources are those resources which directly - correspond to objects in the database, i.e., instances, cores, + correspond to objects in the database, i.e., volumes, gigabytes, etc. A ReservableResource must be constructed with a usage synchronization function, which will be called to determine the current counts of one or more resources. @@ -439,9 +499,9 @@ def __init__(self, name, sync, flag=None): synchronization functions may be associated with more than one ReservableResource. - :param name: The name of the resource, i.e., "instances". - :param sync: A callable which returns a dictionary to - resynchronize the in_use count for one or more + :param name: The name of the resource, i.e., "volumes". + :param sync: A dbapi methods name which returns a dictionary + to resynchronize the in_use count for one or more resources, as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota @@ -459,17 +519,13 @@ class AbsoluteResource(BaseResource): class CountableResource(AbsoluteResource): - """ - Describe a resource where the counts aren't based solely on the - project ID. - """ + """Describe a resource where counts aren't based only on the project ID.""" def __init__(self, name, count, flag=None): - """ - Initializes a CountableResource. + """Initializes a CountableResource. Countable resources are those resources which directly - correspond to objects in the database, i.e., instances, cores, + correspond to objects in the database, i.e., volumes, gigabytes, etc., but for which a count by project ID is inappropriate. A CountableResource must be constructed with a counting function, which will be called to determine the current counts @@ -485,7 +541,7 @@ def __init__(self, name, count, flag=None): required functionality, until a better approach to solving this problem can be evolved. - :param name: The name of the resource, i.e., "instances". + :param name: The name of the resource, i.e., "volumes". :param count: A callable which returns the count of the resource. The arguments passed are as described above. @@ -498,6 +554,22 @@ def __init__(self, name, count, flag=None): self.count = count +class VolumeTypeResource(ReservableResource): + """ReservableResource for a specific volume type.""" + + def __init__(self, part_name, volume_type): + """Initializes a VolumeTypeResource. + + :param part_name: The kind of resource, i.e., "volumes". + :param volume_type: The volume type for this resource. + """ + + self.volume_type_name = volume_type['name'] + self.volume_type_id = volume_type['id'] + name = "%s_%s" % (part_name, self.volume_type_name) + super(VolumeTypeResource, self).__init__(name, "_sync_%s" % part_name) + + class QuotaEngine(object): """Represent the set of recognized quotas.""" @@ -505,7 +577,7 @@ def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" if not quota_driver_class: - quota_driver_class = FLAGS.quota_driver + quota_driver_class = CONF.quota_driver if isinstance(quota_driver_class, basestring): quota_driver_class = importutils.import_object(quota_driver_class) @@ -514,7 +586,7 @@ def __init__(self, quota_driver_class=None): self._driver = quota_driver_class def __contains__(self, resource): - return resource in self._resources + return resource in self.resources def register_resource(self, resource): """Register a resource.""" @@ -527,15 +599,20 @@ def register_resources(self, resources): for resource in resources: self.register_resource(resource) - def get_by_project(self, context, project_id, resource): + def get_by_project(self, context, project_id, resource_name): """Get a specific quota by project.""" - return self._driver.get_by_project(context, project_id, resource) + return self._driver.get_by_project(context, project_id, resource_name) - def get_by_class(self, context, quota_class, resource): + def get_by_class(self, context, quota_class, resource_name): """Get a specific quota by quota class.""" - return self._driver.get_by_class(context, quota_class, resource) + return self._driver.get_by_class(context, quota_class, resource_name) + + def get_default(self, context, resource): + """Get a specific default quota for a resource.""" + + return self._driver.get_default(context, resource) def get_defaults(self, context): """Retrieve the default quotas. @@ -543,7 +620,7 @@ def get_defaults(self, context): :param context: The request context, for access checks. """ - return self._driver.get_defaults(context, self._resources) + return self._driver.get_defaults(context, self.resources) def get_class_quotas(self, context, quota_class, defaults=True): """Retrieve the quotas for the given quota class. @@ -556,7 +633,7 @@ def get_class_quotas(self, context, quota_class, defaults=True): resource. """ - return self._driver.get_class_quotas(context, self._resources, + return self._driver.get_class_quotas(context, self.resources, quota_class, defaults=defaults) def get_project_quotas(self, context, project_id, quota_class=None, @@ -576,11 +653,11 @@ def get_project_quotas(self, context, project_id, quota_class=None, will also be returned. """ - return self._driver.get_project_quotas(context, self._resources, - project_id, - quota_class=quota_class, - defaults=defaults, - usages=usages) + return self._driver.get_project_quotas(context, self.resources, + project_id, + quota_class=quota_class, + defaults=defaults, + usages=usages) def count(self, context, resource, *args, **kwargs): """Count a resource. @@ -595,13 +672,13 @@ def count(self, context, resource, *args, **kwargs): """ # Get the resource - res = self._resources.get(resource) + res = self.resources.get(resource) if not res or not hasattr(res, 'count'): raise exception.QuotaResourceUnknown(unknown=[resource]) return res.count(context, *args, **kwargs) - def limit_check(self, context, **values): + def limit_check(self, context, project_id=None, **values): """Check simple quota limits. For limits--those quotas for which there is no usage @@ -621,11 +698,15 @@ def limit_check(self, context, **values): nothing. :param context: The request context, for access checks. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ - return self._driver.limit_check(context, self._resources, values) + return self._driver.limit_check(context, self.resources, values, + project_id=project_id) - def reserve(self, context, expire=None, **deltas): + def reserve(self, context, expire=None, project_id=None, **deltas): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage @@ -655,54 +736,62 @@ def reserve(self, context, expire=None, **deltas): default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ - reservations = self._driver.reserve(context, self._resources, deltas, - expire=expire) + reservations = self._driver.reserve(context, self.resources, deltas, + expire=expire, + project_id=project_id) - LOG.debug(_("Created reservations %(reservations)s") % locals()) + LOG.debug(_("Created reservations %s") % reservations) return reservations - def commit(self, context, reservations): + def commit(self, context, reservations, project_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ try: - self._driver.commit(context, reservations) + self._driver.commit(context, reservations, project_id=project_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. - LOG.exception(_("Failed to commit reservations " - "%(reservations)s") % locals()) + LOG.exception(_("Failed to commit reservations %s") % reservations) - def rollback(self, context, reservations): + def rollback(self, context, reservations, project_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ try: - self._driver.rollback(context, reservations) + self._driver.rollback(context, reservations, project_id=project_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception(_("Failed to roll back reservations " - "%(reservations)s") % locals()) + "%s") % reservations) def destroy_all_by_project(self, context, project_id): - """ - Destroy all quotas, usages, and reservations associated with a + """Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. @@ -722,30 +811,69 @@ def expire(self, context): self._driver.expire(context) - @property - def resources(self): - return sorted(self._resources.keys()) + def add_volume_type_opts(self, context, opts, volume_type_id): + """Add volume type resource options. + + Adds elements to the opts hash for volume type quotas. + If a resource is being reserved ('gigabytes', etc) and the volume + type is set up for its own quotas, these reservations are copied + into keys for 'gigabytes_', etc. + + :param context: The request context, for access checks. + :param opts: The reservations options hash. + :param volume_type_id: The volume type id for this reservation. + """ + if not volume_type_id: + return + # NOTE(jdg): set inactive to True in volume_type_get, as we + # may be operating on a volume that was created with a type + # that has since been deleted. + volume_type = db.volume_type_get(context, volume_type_id, True) -def _sync_instances(context, project_id, session): - return dict(zip(('instances', 'cores', 'ram'), - db.instance_data_get_for_project( - context, project_id, session=session))) + for quota in ('volumes', 'gigabytes', 'snapshots'): + if quota in opts: + vtype_quota = "%s_%s" % (quota, volume_type['name']) + opts[vtype_quota] = opts[quota] + @property + def resource_names(self): + return sorted(self.resources.keys()) -def _sync_volumes(context, project_id, session): - return dict(zip(('volumes', 'gigabytes'), - db.volume_data_get_for_project( - context, project_id, session=session))) + @property + def resources(self): + return self._resources -QUOTAS = QuotaEngine() +class VolumeTypeQuotaEngine(QuotaEngine): + """Represent the set of all quotas.""" + @property + def resources(self): + """Fetches all possible quota resources.""" + + result = {} + # Global quotas. + argses = [('volumes', '_sync_volumes', 'quota_volumes'), + ('snapshots', '_sync_snapshots', 'quota_snapshots'), + ('gigabytes', '_sync_gigabytes', 'quota_gigabytes'), ] + for args in argses: + resource = ReservableResource(*args) + result[resource.name] = resource + + # Volume type quotas. + volume_types = db.volume_type_get_all(context.get_admin_context(), + False) + for volume_type in volume_types.values(): + for part_name in ('volumes', 'gigabytes', 'snapshots'): + resource = VolumeTypeResource(part_name, volume_type) + result[resource.name] = resource + return result -resources = [ - ReservableResource('volumes', _sync_volumes, 'quota_volumes'), - ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'), - ] + def register_resource(self, resource): + raise NotImplementedError(_("Cannot register resource")) + def register_resources(self, resources): + raise NotImplementedError(_("Cannot register resources")) -QUOTAS.register_resources(resources) +QUOTAS = VolumeTypeQuotaEngine() diff --git a/cinder/quota_utils.py b/cinder/quota_utils.py new file mode 100644 index 0000000000..4a81077dc6 --- /dev/null +++ b/cinder/quota_utils.py @@ -0,0 +1,64 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import quota + + +LOG = logging.getLogger(__name__) +QUOTAS = quota.QUOTAS + + +def get_volume_type_reservation(ctxt, volume, type_id): + # Reserve quotas for the given volume type + try: + reserve_opts = {'volumes': 1, 'gigabytes': volume['size']} + QUOTAS.add_volume_type_opts(ctxt, + reserve_opts, + type_id) + reservations = QUOTAS.reserve(ctxt, **reserve_opts) + except exception.OverQuota as e: + overs = e.kwargs['overs'] + usages = e.kwargs['usages'] + quotas = e.kwargs['quotas'] + + def _consumed(name): + return (usages[name]['reserved'] + usages[name]['in_use']) + + for over in overs: + if 'gigabytes' in over: + s_size = volume['size'] + d_quota = quotas[over] + d_consumed = _consumed(over) + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "%(s_size)sG volume - (%(d_consumed)dG of " + "%(d_quota)dG already consumed)") + LOG.warn(msg % {'s_pid': ctxt.project_id, + 's_size': s_size, + 'd_consumed': d_consumed, + 'd_quota': d_quota}) + raise exception.VolumeSizeExceedsAvailableQuota( + requested=s_size, quota=d_quota, consumed=d_consumed) + elif 'volumes' in over: + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "volume (%(d_consumed)d volumes " + "already consumed)") + + LOG.warn(msg % {'s_pid': ctxt.project_id, + 'd_consumed': _consumed(over)}) + raise exception.VolumeLimitExceeded( + allowed=quotas[over]) + return reservations diff --git a/cinder/rootwrap/filters.py b/cinder/rootwrap/filters.py deleted file mode 100644 index 3509602f28..0000000000 --- a/cinder/rootwrap/filters.py +++ /dev/null @@ -1,152 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 OpenStack, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os -import re - - -class CommandFilter(object): - """Command filter only checking that the 1st argument matches exec_path""" - - def __init__(self, exec_path, run_as, *args): - self.exec_path = exec_path - self.run_as = run_as - self.args = args - - def match(self, userargs): - """Only check that the first argument (command) matches exec_path""" - if (os.path.basename(self.exec_path) == userargs[0]): - return True - return False - - def get_command(self, userargs): - """Returns command to execute (with sudo -u if run_as != root).""" - if (self.run_as != 'root'): - # Used to run commands at lesser privileges - return ['sudo', '-u', self.run_as, self.exec_path] + userargs[1:] - return [self.exec_path] + userargs[1:] - - def get_environment(self, userargs): - """Returns specific environment to set, None if none""" - return None - - -class RegExpFilter(CommandFilter): - """Command filter doing regexp matching for every argument""" - - def match(self, userargs): - # Early skip if command or number of args don't match - if (len(self.args) != len(userargs)): - # DENY: argument numbers don't match - return False - # Compare each arg (anchoring pattern explicitly at end of string) - for (pattern, arg) in zip(self.args, userargs): - try: - if not re.match(pattern + '$', arg): - break - except re.error: - # DENY: Badly-formed filter - return False - else: - # ALLOW: All arguments matched - return True - - # DENY: Some arguments did not match - return False - - -class DnsmasqFilter(CommandFilter): - """Specific filter for the dnsmasq call (which includes env)""" - - def match(self, userargs): - if (userargs[0].startswith("FLAGFILE=") and - userargs[1].startswith("NETWORK_ID=") and - userargs[2] == "dnsmasq"): - return True - return False - - def get_command(self, userargs): - return [self.exec_path] + userargs[3:] - - def get_environment(self, userargs): - env = os.environ.copy() - env['FLAGFILE'] = userargs[0].split('=')[-1] - env['NETWORK_ID'] = userargs[1].split('=')[-1] - return env - - -class KillFilter(CommandFilter): - """Specific filter for the kill calls. - 1st argument is the user to run /bin/kill under - 2nd argument is the location of the affected executable - Subsequent arguments list the accepted signals (if any) - - This filter relies on /proc to accurately determine affected - executable, so it will only work on procfs-capable systems (not OSX). - """ - - def __init__(self, *args): - super(KillFilter, self).__init__("/bin/kill", *args) - - def match(self, userargs): - if userargs[0] != "kill": - return False - args = list(userargs) - if len(args) == 3: - # A specific signal is requested - signal = args.pop(1) - if signal not in self.args[1:]: - # Requested signal not in accepted list - return False - else: - if len(args) != 2: - # Incorrect number of arguments - return False - if len(self.args) > 1: - # No signal requested, but filter requires specific signal - return False - try: - command = os.readlink("/proc/%d/exe" % int(args[1])) - # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on - # the end if an executable is updated or deleted - if command.endswith(" (deleted)"): - command = command[:command.rindex(" ")] - if command != self.args[0]: - # Affected executable does not match - return False - except (ValueError, OSError): - # Incorrect PID - return False - return True - - -class ReadFileFilter(CommandFilter): - """Specific filter for the utils.read_file_as_root call""" - - def __init__(self, file_path, *args): - self.file_path = file_path - super(ReadFileFilter, self).__init__("/bin/cat", "root", *args) - - def match(self, userargs): - if userargs[0] != 'cat': - return False - if userargs[1] != self.file_path: - return False - if len(userargs) != 2: - return False - return True diff --git a/cinder/rootwrap/wrapper.py b/cinder/rootwrap/wrapper.py deleted file mode 100644 index 4211a49261..0000000000 --- a/cinder/rootwrap/wrapper.py +++ /dev/null @@ -1,72 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 OpenStack, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import ConfigParser -import os -import string - -from cinder.rootwrap import filters - - -def build_filter(class_name, *args): - """Returns a filter object of class class_name""" - if not hasattr(filters, class_name): - # TODO(ttx): Log the error (whenever cinder-rootwrap has a log file) - return None - filterclass = getattr(filters, class_name) - return filterclass(*args) - - -def load_filters(filters_path): - """Load filters from a list of directories""" - filterlist = [] - for filterdir in filters_path: - if not os.path.isdir(filterdir): - continue - for filterfile in os.listdir(filterdir): - filterconfig = ConfigParser.RawConfigParser() - filterconfig.read(os.path.join(filterdir, filterfile)) - for (name, value) in filterconfig.items("Filters"): - filterdefinition = [string.strip(s) for s in value.split(',')] - newfilter = build_filter(*filterdefinition) - if newfilter is None: - continue - filterlist.append(newfilter) - return filterlist - - -def match_filter(filters, userargs): - """ - Checks user command and arguments through command filters and - returns the first matching filter, or None is none matched. - """ - - found_filter = None - - for f in filters: - if f.match(userargs): - # Try other filters if executable is absent - if not os.access(f.exec_path, os.X_OK): - if not found_filter: - found_filter = f - continue - # Otherwise return matching filter for execution - return f - - # No filter matched or first missing executable - return found_filter diff --git a/cinder/scheduler/__init__.py b/cinder/scheduler/__init__.py index 727de580eb..a810121991 100644 --- a/cinder/scheduler/__init__.py +++ b/cinder/scheduler/__init__.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 OpenStack, LLC. +# Copyright (c) 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/cinder/scheduler/chance.py b/cinder/scheduler/chance.py deleted file mode 100644 index 3cee1f0534..0000000000 --- a/cinder/scheduler/chance.py +++ /dev/null @@ -1,61 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 OpenStack, LLC. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Chance (Random) Scheduler implementation -""" - -import random - -from cinder import exception -from cinder.scheduler import driver - - -class ChanceScheduler(driver.Scheduler): - """Implements Scheduler as a random node selector.""" - - def _filter_hosts(self, request_spec, hosts, **kwargs): - """Filter a list of hosts based on request_spec.""" - - filter_properties = kwargs.get('filter_properties', {}) - ignore_hosts = filter_properties.get('ignore_hosts', []) - hosts = [host for host in hosts if host not in ignore_hosts] - return hosts - - def _schedule(self, context, topic, request_spec, **kwargs): - """Picks a host that is up at random.""" - - elevated = context.elevated() - hosts = self.hosts_up(elevated, topic) - if not hosts: - msg = _("Is the appropriate service running?") - raise exception.NoValidHost(reason=msg) - - hosts = self._filter_hosts(request_spec, hosts, **kwargs) - if not hosts: - msg = _("Could not find another host") - raise exception.NoValidHost(reason=msg) - - return hosts[int(random.random() * len(hosts))] - - def schedule(self, context, topic, method, *_args, **kwargs): - """Picks a host that is up at random.""" - - host = self._schedule(context, topic, None, **kwargs) - driver.cast_to_host(context, topic, host, method, **kwargs) diff --git a/cinder/scheduler/driver.py b/cinder/scheduler/driver.py index 4989187522..b64f776467 100644 --- a/cinder/scheduler/driver.py +++ b/cinder/scheduler/driver.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 OpenStack, LLC. +# Copyright (c) 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -21,58 +19,35 @@ Scheduler base class that all Schedulers should inherit from """ +from oslo.config import cfg + from cinder import db -from cinder import flags -from cinder.openstack.common import log as logging -from cinder.openstack.common import cfg from cinder.openstack.common import importutils -from cinder.openstack.common import rpc from cinder.openstack.common import timeutils -from cinder import utils - +from cinder.volume import rpcapi as volume_rpcapi -LOG = logging.getLogger(__name__) scheduler_driver_opts = [ cfg.StrOpt('scheduler_host_manager', default='cinder.scheduler.host_manager.HostManager', help='The scheduler host manager class to use'), - ] - -FLAGS = flags.FLAGS -FLAGS.register_opts(scheduler_driver_opts) - - -def cast_to_volume_host(context, host, method, update_db=True, **kwargs): - """Cast request to a volume host queue""" + cfg.IntOpt('scheduler_max_attempts', + default=3, + help='Maximum number of attempts to schedule an volume'), +] - if update_db: - volume_id = kwargs.get('volume_id', None) - if volume_id is not None: - now = timeutils.utcnow() - db.volume_update(context, volume_id, - {'host': host, 'scheduled_at': now}) - rpc.cast(context, - rpc.queue_get_for(context, FLAGS.volume_topic, host), - {"method": method, "args": kwargs}) - LOG.debug(_("Casted '%(method)s' to host '%(host)s'") % locals()) +CONF = cfg.CONF +CONF.register_opts(scheduler_driver_opts) -def cast_to_host(context, topic, host, method, update_db=True, **kwargs): - """Generic cast to host""" +def volume_update_db(context, volume_id, host): + '''Set the host and set the scheduled_at field of a volume. - topic_mapping = { - "volume": cast_to_volume_host} - - func = topic_mapping.get(topic) - if func: - func(context, host, method, update_db=update_db, **kwargs) - else: - rpc.cast(context, - rpc.queue_get_for(context, topic, host), - {"method": method, "args": kwargs}) - LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'") - % locals()) + :returns: A Volume with the updated fields set properly. + ''' + now = timeutils.utcnow() + values = {'host': host, 'scheduled_at': now} + return db.volume_update(context, volume_id, values) class Scheduler(object): @@ -80,30 +55,28 @@ class Scheduler(object): def __init__(self): self.host_manager = importutils.import_object( - FLAGS.scheduler_host_manager) - - def get_host_list(self): - """Get a list of hosts from the HostManager.""" - return self.host_manager.get_host_list() - - def get_service_capabilities(self): - """Get the normalized set of capabilities for the services. - """ - return self.host_manager.get_service_capabilities() + CONF.scheduler_host_manager) + self.volume_rpcapi = volume_rpcapi.VolumeAPI() def update_service_capabilities(self, service_name, host, capabilities): """Process a capability update from a service node.""" self.host_manager.update_service_capabilities(service_name, - host, capabilities) + host, + capabilities) - def hosts_up(self, context, topic): - """Return the list of hosts that have a running service for topic.""" + def host_passes_filters(self, context, volume_id, host, filter_properties): + """Check if the specified host passes the filters.""" + raise NotImplementedError(_("Must implement host_passes_filters")) - services = db.service_get_all_by_topic(context, topic) - return [service['host'] - for service in services - if utils.service_is_up(service)] + def find_retype_host(self, context, request_spec, filter_properties={}, + migration_policy='never'): + """Find a host that can accept the volume with its new type.""" + raise NotImplementedError(_("Must implement find_retype_host")) def schedule(self, context, topic, method, *_args, **_kwargs): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) + + def schedule_create_volume(self, context, request_spec, filter_properties): + """Must override schedule method for scheduler to work.""" + raise NotImplementedError(_("Must implement schedule_create_volume")) diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py new file mode 100644 index 0000000000..83d246bb73 --- /dev/null +++ b/cinder/scheduler/filter_scheduler.py @@ -0,0 +1,279 @@ +# Copyright (c) 2011 Intel Corporation +# Copyright (c) 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The FilterScheduler is for creating volumes. +You can customize this scheduler by specifying your own volume Filters and +Weighing Functions. +""" + +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.scheduler import driver +from cinder.scheduler import scheduler_options + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class FilterScheduler(driver.Scheduler): + """Scheduler that can be used for filtering and weighing.""" + def __init__(self, *args, **kwargs): + super(FilterScheduler, self).__init__(*args, **kwargs) + self.cost_function_cache = None + self.options = scheduler_options.SchedulerOptions() + self.max_attempts = self._max_attempts() + + def schedule(self, context, topic, method, *args, **kwargs): + """The schedule() contract requires we return the one + best-suited host for this request. + """ + self._schedule(context, topic, *args, **kwargs) + + def _get_configuration_options(self): + """Fetch options dictionary. Broken out for testing.""" + return self.options.get_configuration() + + def populate_filter_properties(self, request_spec, filter_properties): + """Stuff things into filter_properties. Can be overridden in a + subclass to add more data. + """ + vol = request_spec['volume_properties'] + filter_properties['size'] = vol['size'] + filter_properties['availability_zone'] = vol.get('availability_zone') + filter_properties['user_id'] = vol.get('user_id') + filter_properties['metadata'] = vol.get('metadata') + filter_properties['qos_specs'] = vol.get('qos_specs') + + def schedule_create_volume(self, context, request_spec, filter_properties): + weighed_host = self._schedule(context, request_spec, + filter_properties) + + if not weighed_host: + raise exception.NoValidHost(reason="") + + host = weighed_host.obj.host + volume_id = request_spec['volume_id'] + snapshot_id = request_spec['snapshot_id'] + image_id = request_spec['image_id'] + + updated_volume = driver.volume_update_db(context, volume_id, host) + self._post_select_populate_filter_properties(filter_properties, + weighed_host.obj) + + # context is not serializable + filter_properties.pop('context', None) + + self.volume_rpcapi.create_volume(context, updated_volume, host, + request_spec, filter_properties, + allow_reschedule=True, + snapshot_id=snapshot_id, + image_id=image_id) + + def host_passes_filters(self, context, host, request_spec, + filter_properties): + """Check if the specified host passes the filters.""" + weighed_hosts = self._get_weighted_candidates(context, request_spec, + filter_properties) + for weighed_host in weighed_hosts: + host_state = weighed_host.obj + if host_state.host == host: + return host_state + + msg = (_('cannot place volume %(id)s on %(host)s') + % {'id': request_spec['volume_id'], 'host': host}) + raise exception.NoValidHost(reason=msg) + + def find_retype_host(self, context, request_spec, filter_properties={}, + migration_policy='never'): + """Find a host that can accept the volume with its new type.""" + current_host = request_spec['volume_properties']['host'] + + # The volume already exists on this host, and so we shouldn't check if + # it can accept the volume again in the CapacityFilter. + filter_properties['vol_exists_on'] = current_host + + weighed_hosts = self._get_weighted_candidates(context, request_spec, + filter_properties) + if not weighed_hosts: + msg = (_('No valid hosts for volume %(id)s with type %(type)s') + % {'id': request_spec['volume_id'], + 'type': request_spec['volume_type']}) + raise exception.NoValidHost(reason=msg) + + for weighed_host in weighed_hosts: + host_state = weighed_host.obj + if host_state.host == current_host: + return host_state + + if migration_policy == 'never': + msg = (_('Current host not valid for volume %(id)s with type ' + '%(type)s, migration not allowed') + % {'id': request_spec['volume_id'], + 'type': request_spec['volume_type']}) + raise exception.NoValidHost(reason=msg) + + top_host = self._choose_top_host(weighed_hosts, request_spec) + return top_host.obj + + def _post_select_populate_filter_properties(self, filter_properties, + host_state): + """Add additional information to the filter properties after a host has + been selected by the scheduling process. + """ + # Add a retry entry for the selected volume backend: + self._add_retry_host(filter_properties, host_state.host) + + def _add_retry_host(self, filter_properties, host): + """Add a retry entry for the selected volume backend. In the event that + the request gets re-scheduled, this entry will signal that the given + backend has already been tried. + """ + retry = filter_properties.get('retry', None) + if not retry: + return + hosts = retry['hosts'] + hosts.append(host) + + def _max_attempts(self): + max_attempts = CONF.scheduler_max_attempts + if max_attempts < 1: + msg = _("Invalid value for 'scheduler_max_attempts', " + "must be >=1") + raise exception.InvalidParameterValue(err=msg) + return max_attempts + + def _log_volume_error(self, volume_id, retry): + """If the request contained an exception from a previous volume + create operation, log it to aid debugging + """ + exc = retry.pop('exc', None) # string-ified exception from volume + if not exc: + return # no exception info from a previous attempt, skip + + hosts = retry.get('hosts', None) + if not hosts: + return # no previously attempted hosts, skip + + last_host = hosts[-1] + msg = _("Error scheduling %(volume_id)s from last vol-service: " + "%(last_host)s : %(exc)s") % { + 'volume_id': volume_id, + 'last_host': last_host, + 'exc': exc, + } + LOG.error(msg) + + def _populate_retry(self, filter_properties, properties): + """Populate filter properties with history of retries for this + request. If maximum retries is exceeded, raise NoValidHost. + """ + max_attempts = self.max_attempts + retry = filter_properties.pop('retry', {}) + + if max_attempts == 1: + # re-scheduling is disabled. + return + + # retry is enabled, update attempt count: + if retry: + retry['num_attempts'] += 1 + else: + retry = { + 'num_attempts': 1, + 'hosts': [] # list of volume service hosts tried + } + filter_properties['retry'] = retry + + volume_id = properties.get('volume_id') + self._log_volume_error(volume_id, retry) + + if retry['num_attempts'] > max_attempts: + msg = _("Exceeded max scheduling attempts %(max_attempts)d for " + "volume %(volume_id)s") % { + 'max_attempts': max_attempts, + 'volume_id': volume_id, + } + raise exception.NoValidHost(reason=msg) + + def _get_weighted_candidates(self, context, request_spec, + filter_properties=None): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + elevated = context.elevated() + + volume_properties = request_spec['volume_properties'] + # Since Cinder is using mixed filters from Oslo and it's own, which + # takes 'resource_XX' and 'volume_XX' as input respectively, copying + # 'volume_XX' to 'resource_XX' will make both filters happy. + resource_properties = volume_properties.copy() + volume_type = request_spec.get("volume_type", None) + resource_type = request_spec.get("volume_type", None) + request_spec.update({'resource_properties': resource_properties}) + + config_options = self._get_configuration_options() + + if filter_properties is None: + filter_properties = {} + self._populate_retry(filter_properties, resource_properties) + + filter_properties.update({'context': context, + 'request_spec': request_spec, + 'config_options': config_options, + 'volume_type': volume_type, + 'resource_type': resource_type}) + + self.populate_filter_properties(request_spec, + filter_properties) + + # Find our local list of acceptable hosts by filtering and + # weighing our options. we virtually consume resources on + # it so subsequent selections can adjust accordingly. + + # Note: remember, we are using an iterator here. So only + # traverse this list once. + hosts = self.host_manager.get_all_host_states(elevated) + + # Filter local hosts based on requirements ... + hosts = self.host_manager.get_filtered_hosts(hosts, + filter_properties) + if not hosts: + return [] + + LOG.debug(_("Filtered %s") % hosts) + # weighted_host = WeightedHost() ... the best + # host for the job. + weighed_hosts = self.host_manager.get_weighed_hosts(hosts, + filter_properties) + return weighed_hosts + + def _schedule(self, context, request_spec, filter_properties=None): + weighed_hosts = self._get_weighted_candidates(context, request_spec, + filter_properties) + if not weighed_hosts: + return None + return self._choose_top_host(weighed_hosts, request_spec) + + def _choose_top_host(self, weighed_hosts, request_spec): + top_host = weighed_hosts[0] + host_state = top_host.obj + LOG.debug(_("Choosing %s") % host_state.host) + volume_properties = request_spec['volume_properties'] + host_state.consume_from_volume(volume_properties) + return top_host diff --git a/cinder/scheduler/filters/__init__.py b/cinder/scheduler/filters/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/scheduler/filters/capacity_filter.py b/cinder/scheduler/filters/capacity_filter.py new file mode 100644 index 0000000000..4f06281d91 --- /dev/null +++ b/cinder/scheduler/filters/capacity_filter.py @@ -0,0 +1,63 @@ +# Copyright (c) 2012 Intel +# Copyright (c) 2012 OpenStack Foundation +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import math + +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters + + +LOG = logging.getLogger(__name__) + + +class CapacityFilter(filters.BaseHostFilter): + """CapacityFilter filters based on volume host's capacity utilization.""" + + def host_passes(self, host_state, filter_properties): + """Return True if host has sufficient capacity.""" + + # If the volume already exists on this host, don't fail it for + # insufficient capacity (e.g., if we are retyping) + if host_state.host == filter_properties.get('vol_exists_on'): + return True + + volume_size = filter_properties.get('size') + + if host_state.free_capacity_gb is None: + # Fail Safe + LOG.error(_("Free capacity not set: " + "volume node info collection broken.")) + return False + + free_space = host_state.free_capacity_gb + if free_space == 'infinite' or free_space == 'unknown': + # NOTE(zhiteng) for those back-ends cannot report actual + # available capacity, we assume it is able to serve the + # request. Even if it was not, the retry mechanism is + # able to handle the failure by rescheduling + return True + reserved = float(host_state.reserved_percentage) / 100 + free = math.floor(free_space * (1 - reserved)) + if free < volume_size: + LOG.warning(_("Insufficient free space for volume creation " + "(requested / avail): " + "%(requested)s/%(available)s") + % {'requested': volume_size, + 'available': free}) + + return free >= volume_size diff --git a/cinder/scheduler/flows/__init__.py b/cinder/scheduler/flows/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/scheduler/flows/create_volume.py b/cinder/scheduler/flows/create_volume.py new file mode 100644 index 0000000000..805622af29 --- /dev/null +++ b/cinder/scheduler/flows/create_volume.py @@ -0,0 +1,161 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import taskflow.engines +from taskflow.patterns import linear_flow +from taskflow import task + +from cinder import exception +from cinder import flow_utils +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder.openstack.common.notifier import api as notifier +from cinder import utils +from cinder.volume.flows import common + +LOG = logging.getLogger(__name__) + +ACTION = 'volume:create' + + +class ExtractSchedulerSpecTask(flow_utils.CinderTask): + """Extracts a spec object from a partial and/or incomplete request spec. + + Reversion strategy: N/A + """ + + default_provides = set(['request_spec']) + + def __init__(self, db, **kwargs): + super(ExtractSchedulerSpecTask, self).__init__(addons=[ACTION], + **kwargs) + self.db = db + + def _populate_request_spec(self, context, volume_id, snapshot_id, + image_id): + # Create the full request spec using the volume_id. + # + # NOTE(harlowja): this will fetch the volume from the database, if + # the volume has been deleted before we got here then this should fail. + # + # In the future we might want to have a lock on the volume_id so that + # the volume can not be deleted while its still being created? + if not volume_id: + msg = _("No volume_id provided to populate a request_spec from") + raise exception.InvalidInput(reason=msg) + volume_ref = self.db.volume_get(context, volume_id) + volume_type_id = volume_ref.get('volume_type_id') + vol_type = self.db.volume_type_get(context, volume_type_id) + return { + 'volume_id': volume_id, + 'snapshot_id': snapshot_id, + 'image_id': image_id, + 'volume_properties': { + 'size': utils.as_int(volume_ref.get('size'), quiet=False), + 'availability_zone': volume_ref.get('availability_zone'), + 'volume_type_id': volume_type_id, + }, + 'volume_type': list(dict(vol_type).iteritems()), + } + + def execute(self, context, request_spec, volume_id, snapshot_id, + image_id): + # For RPC version < 1.2 backward compatibility + if request_spec is None: + request_spec = self._populate_request_spec(context, volume_id, + snapshot_id, image_id) + return { + 'request_spec': request_spec, + } + + +def get_flow(context, db, driver, request_spec=None, + filter_properties=None, + volume_id=None, snapshot_id=None, image_id=None): + + """Constructs and returns the scheduler entrypoint flow. + + This flow will do the following: + + 1. Inject keys & values for dependent tasks. + 2. Extracts a scheduler specification from the provided inputs. + 3. Attaches 2 activated only on *failure* tasks (one to update the db + status and one to notify on the MQ of the failure that occured). + 4. Uses provided driver to to then select and continue processing of + volume request. + """ + create_what = { + 'context': context, + 'raw_request_spec': request_spec, + 'filter_properties': filter_properties, + 'volume_id': volume_id, + 'snapshot_id': snapshot_id, + 'image_id': image_id, + } + + flow_name = ACTION.replace(":", "_") + "_scheduler" + scheduler_flow = linear_flow.Flow(flow_name) + + # This will extract and clean the spec from the starting values. + scheduler_flow.add(ExtractSchedulerSpecTask( + db, + rebind={'request_spec': 'raw_request_spec'})) + + def schedule_create_volume(context, request_spec, filter_properties): + + def _log_failure(cause): + LOG.error(_("Failed to schedule_create_volume: %(cause)s") % + {'cause': cause}) + + def _notify_failure(cause): + """When scheduling fails send out a event that it failed.""" + topic = "scheduler.create_volume" + payload = { + 'request_spec': request_spec, + 'volume_properties': request_spec.get('volume_properties', {}), + 'volume_id': volume_id, + 'state': 'error', + 'method': 'create_volume', + 'reason': cause, + } + try: + publisher_id = notifier.publisher_id("scheduler") + notifier.notify(context, publisher_id, topic, notifier.ERROR, + payload) + except exception.CinderException: + LOG.exception(_("Failed notifying on %(topic)s " + "payload %(payload)s") % {'topic': topic, + 'payload': payload}) + + try: + driver.schedule_create_volume(context, request_spec, + filter_properties) + except exception.NoValidHost as e: + # Not host found happened, notify on the scheduler queue and log + # that this happened and set the volume to errored out and + # *do not* reraise the error (since whats the point). + _notify_failure(e) + _log_failure(e) + common.error_out_volume(context, db, volume_id, reason=e) + except Exception as e: + # Some other error happened, notify on the scheduler queue and log + # that this happened and set the volume to errored out and + # *do* reraise the error. + with excutils.save_and_reraise_exception(): + _notify_failure(e) + _log_failure(e) + common.error_out_volume(context, db, volume_id, reason=e) + + scheduler_flow.add(task.FunctorTask(schedule_create_volume)) + + # Now load (but do not run) the flow using the provided initial data. + return taskflow.engines.load(scheduler_flow, store=create_what) diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py index 99632bc869..b676f45a6e 100644 --- a/cinder/scheduler/host_manager.py +++ b/cinder/scheduler/host_manager.py @@ -1,4 +1,4 @@ -# Copyright (c) 2011 OpenStack, LLC. +# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,20 +17,296 @@ Manage hosts in the current zone. """ -# FIXME(ja): this code was written only for compute. re-implement for volumes +import UserDict + +from oslo.config import cfg + +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common.scheduler import filters +from cinder.openstack.common.scheduler import weights +from cinder.openstack.common import timeutils +from cinder import utils + + +host_manager_opts = [ + cfg.ListOpt('scheduler_default_filters', + default=[ + 'AvailabilityZoneFilter', + 'CapacityFilter', + 'CapabilitiesFilter' + ], + help='Which filter class names to use for filtering hosts ' + 'when not specified in the request.'), + cfg.ListOpt('scheduler_default_weighers', + default=[ + 'CapacityWeigher' + ], + help='Which weigher class names to use for weighing hosts.') +] + +CONF = cfg.CONF +CONF.register_opts(host_manager_opts) +CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager') + +LOG = logging.getLogger(__name__) + + +class ReadOnlyDict(UserDict.IterableUserDict): + """A read-only dict.""" + def __init__(self, source=None): + self.data = {} + self.update(source) + + def __setitem__(self, key, item): + raise TypeError + + def __delitem__(self, key): + raise TypeError + + def clear(self): + raise TypeError + + def pop(self, key, *args): + raise TypeError + + def popitem(self): + raise TypeError + + def update(self, source=None): + if source is None: + return + elif isinstance(source, UserDict.UserDict): + self.data = source.data + elif isinstance(source, type({})): + self.data = source + else: + raise TypeError class HostState(object): - pass + """Mutable and immutable information tracked for a host.""" + + def __init__(self, host, capabilities=None, service=None): + self.host = host + self.update_capabilities(capabilities, service) + + self.volume_backend_name = None + self.vendor_name = None + self.driver_version = 0 + self.storage_protocol = None + self.QoS_support = False + # Mutable available resources. + # These will change as resources are virtually "consumed". + self.total_capacity_gb = 0 + # capacity has been allocated in cinder POV, which should be + # sum(vol['size'] for vol in vols_on_hosts) + self.allocated_capacity_gb = 0 + self.free_capacity_gb = None + self.reserved_percentage = 0 + + self.updated = None + + def update_capabilities(self, capabilities=None, service=None): + # Read-only capability dicts + + if capabilities is None: + capabilities = {} + self.capabilities = ReadOnlyDict(capabilities) + if service is None: + service = {} + self.service = ReadOnlyDict(service) + + def update_from_volume_capability(self, capability): + """Update information about a host from its volume_node info.""" + if capability: + if self.updated and self.updated > capability['timestamp']: + return + + self.volume_backend = capability.get('volume_backend_name', None) + self.vendor_name = capability.get('vendor_name', None) + self.driver_version = capability.get('driver_version', None) + self.storage_protocol = capability.get('storage_protocol', None) + self.QoS_support = capability.get('QoS_support', False) + + self.total_capacity_gb = capability['total_capacity_gb'] + self.free_capacity_gb = capability['free_capacity_gb'] + self.allocated_capacity_gb = capability.get( + 'allocated_capacity_gb', 0) + self.reserved_percentage = capability['reserved_percentage'] + + self.updated = capability['timestamp'] + + def consume_from_volume(self, volume): + """Incrementally update host state from an volume.""" + volume_gb = volume['size'] + self.allocated_capacity_gb += volume_gb + if self.free_capacity_gb == 'infinite': + # There's virtually infinite space on back-end + pass + elif self.free_capacity_gb == 'unknown': + # Unable to determine the actual free space on back-end + pass + else: + self.free_capacity_gb -= volume_gb + self.updated = timeutils.utcnow() + + def __repr__(self): + return ("host '%s': free_capacity_gb: %s" % + (self.host, self.free_capacity_gb)) class HostManager(object): + """Base HostManager class.""" + + host_state_cls = HostState + + def __init__(self): + self.service_states = {} # { : {: {cap k : v}}} + self.host_state_map = {} + self.filter_handler = filters.HostFilterHandler('cinder.scheduler.' + 'filters') + self.filter_classes = self.filter_handler.get_all_classes() + self.weight_handler = weights.HostWeightHandler('cinder.scheduler.' + 'weights') + self.weight_classes = self.weight_handler.get_all_classes() + + default_filters = ['AvailabilityZoneFilter', + 'CapacityFilter', + 'CapabilitiesFilter'] + chance = 'cinder.scheduler.chance.ChanceScheduler' + simple = 'cinder.scheduler.simple.SimpleScheduler' + if CONF.scheduler_driver == simple: + CONF.set_override('scheduler_default_filters', default_filters) + CONF.set_override('scheduler_default_weighers', + ['AllocatedCapacityWeigher']) + elif CONF.scheduler_driver == chance: + CONF.set_override('scheduler_default_filters', default_filters) + CONF.set_override('scheduler_default_weighers', + ['ChanceWeigher']) + else: + # Do nothing when some other scheduler is configured + pass + + def _choose_host_filters(self, filter_cls_names): + """Since the caller may specify which filters to use we need + to have an authoritative list of what is permissible. This + function checks the filter names against a predefined set + of acceptable filters. + """ + if filter_cls_names is None: + filter_cls_names = CONF.scheduler_default_filters + if not isinstance(filter_cls_names, (list, tuple)): + filter_cls_names = [filter_cls_names] + good_filters = [] + bad_filters = [] + for filter_name in filter_cls_names: + found_class = False + for cls in self.filter_classes: + if cls.__name__ == filter_name: + found_class = True + good_filters.append(cls) + break + if not found_class: + bad_filters.append(filter_name) + if bad_filters: + msg = ", ".join(bad_filters) + raise exception.SchedulerHostFilterNotFound(filter_name=msg) + return good_filters + + def _choose_host_weighers(self, weight_cls_names): + """Since the caller may specify which weighers to use, we need + to have an authoritative list of what is permissible. This + function checks the weigher names against a predefined set + of acceptable weighers. + """ + if weight_cls_names is None: + weight_cls_names = CONF.scheduler_default_weighers + if not isinstance(weight_cls_names, (list, tuple)): + weight_cls_names = [weight_cls_names] + + good_weighers = [] + bad_weighers = [] + for weigher_name in weight_cls_names: + found_class = False + for cls in self.weight_classes: + if cls.__name__ == weigher_name: + good_weighers.append(cls) + found_class = True + break + if not found_class: + bad_weighers.append(weigher_name) + if bad_weighers: + msg = ", ".join(bad_weighers) + raise exception.SchedulerHostWeigherNotFound(weigher_name=msg) + return good_weighers + + def get_filtered_hosts(self, hosts, filter_properties, + filter_class_names=None): + """Filter hosts and return only ones passing all filters.""" + filter_classes = self._choose_host_filters(filter_class_names) + return self.filter_handler.get_filtered_objects(filter_classes, + hosts, + filter_properties) + + def get_weighed_hosts(self, hosts, weight_properties, + weigher_class_names=None): + """Weigh the hosts.""" + weigher_classes = self._choose_host_weighers(weigher_class_names) + return self.weight_handler.get_weighed_objects(weigher_classes, + hosts, + weight_properties) + + def update_service_capabilities(self, service_name, host, capabilities): + """Update the per-service capabilities based on this notification.""" + if service_name != 'volume': + LOG.debug(_('Ignoring %(service_name)s service update ' + 'from %(host)s'), + {'service_name': service_name, 'host': host}) + return + + LOG.debug(_("Received %(service_name)s service update from " + "%(host)s.") % + {'service_name': service_name, 'host': host}) + + # Copy the capabilities, so we don't modify the original dict + capab_copy = dict(capabilities) + capab_copy["timestamp"] = timeutils.utcnow() # Reported time + self.service_states[host] = capab_copy + + def get_all_host_states(self, context): + """Returns a dict of all the hosts the HostManager + knows about. Also, each of the consumable resources in HostState + are pre-populated and adjusted based on data in the db. - def get_host_list(self, *args): - pass + For example: + {'192.168.1.100': HostState(), ...} + """ - def update_service_capabilities(self, *args): - pass + # Get resource usage across the available volume nodes: + topic = CONF.volume_topic + volume_services = db.service_get_all_by_topic(context, topic) + self.host_state_map.clear() + for service in volume_services: + host = service['host'] + if not utils.service_is_up(service) or service['disabled']: + LOG.warn(_("volume service is down or disabled. " + "(host: %s)") % host) + continue + capabilities = self.service_states.get(host, None) + host_state = self.host_state_map.get(host) + if host_state: + # copy capabilities to host_state.capabilities + host_state.update_capabilities(capabilities, + dict(service.iteritems())) + else: + host_state = self.host_state_cls(host, + capabilities=capabilities, + service= + dict(service.iteritems())) + self.host_state_map[host] = host_state + # update host_state + host_state.update_from_volume_capability(capabilities) - def get_service_capabilities(self, *args): - pass + return self.host_state_map.itervalues() diff --git a/cinder/scheduler/manager.py b/cinder/scheduler/manager.py index c5ae6598f5..b7a25cc63d 100644 --- a/cinder/scheduler/manager.py +++ b/cinder/scheduler/manager.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 OpenStack, LLC. +# Copyright (c) 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -21,78 +19,194 @@ Scheduler Service """ -import functools +from oslo.config import cfg +from cinder import context from cinder import db -from cinder import flags -from cinder.openstack.common import log as logging +from cinder import exception from cinder import manager -from cinder.openstack.common import cfg from cinder.openstack.common import excutils from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common.notifier import api as notifier +from cinder import quota +from cinder.scheduler.flows import create_volume +from cinder.volume import rpcapi as volume_rpcapi -LOG = logging.getLogger(__name__) - scheduler_driver_opt = cfg.StrOpt('scheduler_driver', - default='cinder.scheduler.simple.SimpleScheduler', - help='Default driver to use for the scheduler') + default='cinder.scheduler.filter_scheduler.' + 'FilterScheduler', + help='Default scheduler driver to use') -FLAGS = flags.FLAGS -FLAGS.register_opt(scheduler_driver_opt) +CONF = cfg.CONF +CONF.register_opt(scheduler_driver_opt) + +QUOTAS = quota.QUOTAS + +LOG = logging.getLogger(__name__) class SchedulerManager(manager.Manager): - """Chooses a host to create volumes""" + """Chooses a host to create volumes.""" - RPC_API_VERSION = '1.0' + RPC_API_VERSION = '1.4' - def __init__(self, scheduler_driver=None, *args, **kwargs): + def __init__(self, scheduler_driver=None, service_name=None, + *args, **kwargs): if not scheduler_driver: - scheduler_driver = FLAGS.scheduler_driver + scheduler_driver = CONF.scheduler_driver + if scheduler_driver in ['cinder.scheduler.chance.ChanceScheduler', + 'cinder.scheduler.simple.SimpleScheduler']: + scheduler_driver = ('cinder.scheduler.filter_scheduler.' + 'FilterScheduler') + LOG.deprecated(_('ChanceScheduler and SimpleScheduler have been ' + 'deprecated due to lack of support for advanced ' + 'features like: volume types, volume encryption,' + ' QoS etc. These two schedulers can be fully ' + 'replaced by FilterScheduler with certain ' + 'combination of filters and weighers.')) self.driver = importutils.import_object(scheduler_driver) super(SchedulerManager, self).__init__(*args, **kwargs) - def __getattr__(self, key): - """Converts all method calls to use the schedule method""" - # NOTE(russellb) Because of what this is doing, we must be careful - # when changing the API of the scheduler drivers, as that changes - # the rpc API as well, and the version should be updated accordingly. - return functools.partial(self._schedule, key) - - def get_host_list(self, context): - """Get a list of hosts from the HostManager.""" - return self.driver.get_host_list() - - def get_service_capabilities(self, context): - """Get the normalized set of capabilities for this zone.""" - return self.driver.get_service_capabilities() + def init_host(self): + ctxt = context.get_admin_context() + self.request_service_capabilities(ctxt) def update_service_capabilities(self, context, service_name=None, - host=None, capabilities=None, **kwargs): + host=None, capabilities=None, **kwargs): """Process a capability update from a service node.""" if capabilities is None: capabilities = {} - self.driver.update_service_capabilities(service_name, host, - capabilities) + self.driver.update_service_capabilities(service_name, + host, + capabilities) - def _schedule(self, method, context, topic, *args, **kwargs): - """Tries to call schedule_* method on the driver to retrieve host. - Falls back to schedule(context, topic) if method doesn't exist. - """ - driver_method_name = 'schedule_%s' % method - try: - driver_method = getattr(self.driver, driver_method_name) - args = (context,) + args - except AttributeError, e: - LOG.warning(_("Driver Method %(driver_method_name)s missing: " - "%(e)s. Reverting to schedule()") % locals()) - driver_method = self.driver.schedule - args = (context, topic, method) + args + def create_volume(self, context, topic, volume_id, snapshot_id=None, + image_id=None, request_spec=None, + filter_properties=None): try: - return driver_method(*args, **kwargs) + flow_engine = create_volume.get_flow(context, + db, self.driver, + request_spec, + filter_properties, + volume_id, + snapshot_id, + image_id) except Exception: + LOG.exception(_("Failed to create scheduler manager volume flow")) + raise exception.CinderException( + _("Failed to create scheduler manager volume flow")) + flow_engine.run() + + def request_service_capabilities(self, context): + volume_rpcapi.VolumeAPI().publish_service_capabilities(context) + + def migrate_volume_to_host(self, context, topic, volume_id, host, + force_host_copy, request_spec, + filter_properties=None): + """Ensure that the host exists and can accept the volume.""" + + def _migrate_volume_set_error(self, context, ex, request_spec): + volume_state = {'volume_state': {'migration_status': None}} + self._set_volume_state_and_notify('migrate_volume_to_host', + volume_state, + context, ex, request_spec) + + try: + tgt_host = self.driver.host_passes_filters(context, host, + request_spec, + filter_properties) + except exception.NoValidHost as ex: + _migrate_volume_set_error(self, context, ex, request_spec) + except Exception as ex: + with excutils.save_and_reraise_exception(): + _migrate_volume_set_error(self, context, ex, request_spec) + else: + volume_ref = db.volume_get(context, volume_id) + volume_rpcapi.VolumeAPI().migrate_volume(context, volume_ref, + tgt_host, + force_host_copy) + + def retype(self, context, topic, volume_id, + request_spec, filter_properties=None): + """Schedule the modification of a volume's type. + + :param context: the request context + :param topic: the topic listened on + :param volume_id: the ID of the volume to retype + :param request_spec: parameters for this retype request + :param filter_properties: parameters to filter by + """ + def _retype_volume_set_error(self, context, ex, request_spec, + volume_ref, msg, reservations): + if reservations: + QUOTAS.rollback(context, reservations) + if (volume_ref['instance_uuid'] is None and + volume_ref['attached_host'] is None): + orig_status = 'available' + else: + orig_status = 'in-use' + volume_state = {'volume_state': {'status': orig_status}} + self._set_volume_state_and_notify('retype', volume_state, + context, ex, request_spec, msg) + + volume_ref = db.volume_get(context, volume_id) + reservations = request_spec.get('quota_reservations') + new_type = request_spec.get('volume_type') + if new_type is None: + msg = _('New volume type not specified in request_spec.') + ex = exception.ParameterNotFound(param='volume_type') + _retype_volume_set_error(self, context, ex, request_spec, + volume_ref, msg, reservations) + + # Default migration policy is 'never' + migration_policy = request_spec.get('migration_policy') + if not migration_policy: + migration_policy = 'never' + + try: + tgt_host = self.driver.find_retype_host(context, request_spec, + filter_properties, + migration_policy) + except exception.NoValidHost as ex: + msg = (_("Could not find a host for volume %(volume_id)s with " + "type %(type_id)s.") % + {'type_id': new_type['id'], 'volume_id': volume_id}) + _retype_volume_set_error(self, context, ex, request_spec, + volume_ref, msg, reservations) + except Exception as ex: with excutils.save_and_reraise_exception(): - volume_id = kwargs.get('volume_id') - db.volume_update(context, volume_id, {'status': 'error'}) + _retype_volume_set_error(self, context, ex, request_spec, + volume_ref, None, reservations) + else: + volume_rpcapi.VolumeAPI().retype(context, volume_ref, + new_type['id'], tgt_host, + migration_policy, reservations) + + def _set_volume_state_and_notify(self, method, updates, context, ex, + request_spec, msg=None): + # TODO(harlowja): move into a task that just does this later. + if not msg: + msg = (_("Failed to schedule_%(method)s: %(ex)s") % + {'method': method, 'ex': ex}) + LOG.error(msg) + + volume_state = updates['volume_state'] + properties = request_spec.get('volume_properties', {}) + + volume_id = request_spec.get('volume_id', None) + + if volume_id: + db.volume_update(context, volume_id, volume_state) + + payload = dict(request_spec=request_spec, + volume_properties=properties, + volume_id=volume_id, + state=volume_state, + method=method, + reason=ex) + + notifier.notify(context, notifier.publisher_id("scheduler"), + 'scheduler.' + method, notifier.ERROR, payload) diff --git a/cinder/scheduler/rpcapi.py b/cinder/scheduler/rpcapi.py index 325edcd338..40c72852ee 100644 --- a/cinder/scheduler/rpcapi.py +++ b/cinder/scheduler/rpcapi.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -18,11 +16,13 @@ Client side of the scheduler manager RPC API. """ -from cinder import flags +from oslo.config import cfg + +from cinder.openstack.common import jsonutils import cinder.openstack.common.rpc.proxy -FLAGS = flags.FLAGS +CONF = cfg.CONF class SchedulerAPI(cinder.openstack.common.rpc.proxy.RpcProxy): @@ -31,16 +31,62 @@ class SchedulerAPI(cinder.openstack.common.rpc.proxy.RpcProxy): API version history: 1.0 - Initial version. + 1.1 - Add create_volume() method + 1.2 - Add request_spec, filter_properties arguments + to create_volume() + 1.3 - Add migrate_volume_to_host() method + 1.4 - Add retype method ''' RPC_API_VERSION = '1.0' def __init__(self): - super(SchedulerAPI, self).__init__(topic=FLAGS.scheduler_topic, - default_version=self.RPC_API_VERSION) + super(SchedulerAPI, self).__init__( + topic=CONF.scheduler_topic, + default_version=self.RPC_API_VERSION) + + def create_volume(self, ctxt, topic, volume_id, snapshot_id=None, + image_id=None, request_spec=None, + filter_properties=None): + request_spec_p = jsonutils.to_primitive(request_spec) + return self.cast(ctxt, self.make_msg( + 'create_volume', + topic=topic, + volume_id=volume_id, + snapshot_id=snapshot_id, + image_id=image_id, + request_spec=request_spec_p, + filter_properties=filter_properties), + version='1.2') + + def migrate_volume_to_host(self, ctxt, topic, volume_id, host, + force_host_copy=False, request_spec=None, + filter_properties=None): + request_spec_p = jsonutils.to_primitive(request_spec) + return self.cast(ctxt, self.make_msg( + 'migrate_volume_to_host', + topic=topic, + volume_id=volume_id, + host=host, + force_host_copy=force_host_copy, + request_spec=request_spec_p, + filter_properties=filter_properties), + version='1.3') + + def retype(self, ctxt, topic, volume_id, + request_spec=None, filter_properties=None): + request_spec_p = jsonutils.to_primitive(request_spec) + return self.cast(ctxt, self.make_msg( + 'retype', + topic=topic, + volume_id=volume_id, + request_spec=request_spec_p, + filter_properties=filter_properties), + version='1.4') - def update_service_capabilities(self, ctxt, service_name, host, - capabilities): + def update_service_capabilities(self, ctxt, + service_name, host, + capabilities): self.fanout_cast(ctxt, self.make_msg('update_service_capabilities', - service_name=service_name, host=host, - capabilities=capabilities)) + service_name=service_name, host=host, + capabilities=capabilities)) diff --git a/cinder/scheduler/scheduler_options.py b/cinder/scheduler/scheduler_options.py new file mode 100644 index 0000000000..37484fe722 --- /dev/null +++ b/cinder/scheduler/scheduler_options.py @@ -0,0 +1,104 @@ +# Copyright (c) 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SchedulerOptions monitors a local .json file for changes and loads +it if needed. This file is converted to a data structure and passed +into the filtering and weighing functions which can use it for +dynamic configuration. +""" + +import datetime +import json +import os + +from oslo.config import cfg + +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils + + +scheduler_json_config_location_opt = cfg.StrOpt( + 'scheduler_json_config_location', + default='', + help='Absolute path to scheduler configuration JSON file.') + + +CONF = cfg.CONF +CONF.register_opt(scheduler_json_config_location_opt) + +LOG = logging.getLogger(__name__) + + +class SchedulerOptions(object): + """SchedulerOptions monitors a local .json file for changes. + + The file is reloaded if needed and converted to a data structure and + passed into the filtering and weighing functions which can use it + for dynamic configuration. + """ + + def __init__(self): + super(SchedulerOptions, self).__init__() + self.data = {} + self.last_modified = None + self.last_checked = None + + def _get_file_handle(self, filename): + """Get file handle. Broken out for testing.""" + return open(filename) + + def _get_file_timestamp(self, filename): + """Get the last modified datetime. Broken out for testing.""" + try: + return os.path.getmtime(filename) + except os.error as e: + LOG.exception(_("Could not stat scheduler options file " + "%(filename)s: '%(e)s'"), + {'filename': filename, 'e': e}) + raise + + def _load_file(self, handle): + """Decode the JSON file. Broken out for testing.""" + try: + return json.load(handle) + except ValueError as e: + LOG.exception(_("Could not decode scheduler options: '%s'") % e) + return {} + + def _get_time_now(self): + """Get current UTC. Broken out for testing.""" + return timeutils.utcnow() + + def get_configuration(self, filename=None): + """Check the json file for changes and load it if needed.""" + if not filename: + filename = CONF.scheduler_json_config_location + if not filename: + return self.data + if self.last_checked: + now = self._get_time_now() + if now - self.last_checked < datetime.timedelta(minutes=5): + return self.data + + last_modified = self._get_file_timestamp(filename) + if (not last_modified or not self.last_modified or + last_modified > self.last_modified): + self.data = self._load_file(self._get_file_handle(filename)) + self.last_modified = last_modified + if not self.data: + self.data = {} + + return self.data diff --git a/cinder/scheduler/simple.py b/cinder/scheduler/simple.py index d7c8328944..2dd59015ed 100644 --- a/cinder/scheduler/simple.py +++ b/cinder/scheduler/simple.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 OpenStack, LLC. +# Copyright (c) 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -18,61 +16,51 @@ # under the License. """ -Simple Scheduler -""" +Chance and Simple Scheduler are DEPRECATED. -from cinder import db -from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg -from cinder.scheduler import chance -from cinder.scheduler import driver -from cinder import utils +Chance and Simple scheduler implementation have been deprecated, as their +functionality can be implemented using the FilterScheduler, here's how: +If one would like to have scheduler randomly picks available back-end +(like ChanceScheduler did), use FilterScheduler with following combination +of filters and weighers. -simple_scheduler_opts = [ - cfg.IntOpt("max_gigabytes", - default=10000, - help="maximum number of volume gigabytes to allow per host"), - ] + scheduler_driver = cinder.scheduler.filter_scheduler.FilterScheduler + scheduler_default_filters = ['AvailabilityZoneFilter', 'CapacityFilter', + 'CapabilitiesFilter'] + scheduler_default_weighers = 'ChanceWeigher' -FLAGS = flags.FLAGS -FLAGS.register_opts(simple_scheduler_opts) +If one prefers the scheduler to pick up the back-end has most available +space that scheudler can see (like SimpleScheduler did), use following +combination of filters and weighers with FilterScheduler. + scheduler_driver = cinder.scheduler.filter_scheduler.FilterScheduler + scheduler_default_filters = ['AvailabilityZoneFilter', 'CapacityFilter', + 'CapabilitiesFilter'] + scheduler_default_weighers = 'AllocatedCapacityWeigher' + allocated_capacity_weight_multiplier = -1.0 -class SimpleScheduler(chance.ChanceScheduler): - """Implements Naive Scheduler that tries to find least loaded host.""" +Setting/leaving configure option +'scheduler_driver=cinder.scheduler.chance.ChanceScheduler' or +'scheduler_driver=cinder.scheduler.simple.SimpleScheduler' in cinder.conf +works exactly the same as described above since scheduler manager has been +updated to do the trick internally/transparently for users. - def schedule_create_volume(self, context, volume_id, **_kwargs): - """Picks a host that is up and has the fewest volumes.""" - elevated = context.elevated() +With that, FilterScheduler behaves mostly the same as Chance/SimpleScheduler, +with additional benefits of supporting volume types, volume encryption, QoS. +""" - volume_ref = db.volume_get(context, volume_id) - availability_zone = volume_ref.get('availability_zone') +from oslo.config import cfg - zone, host = None, None - if availability_zone: - zone, _x, host = availability_zone.partition(':') - if host and context.is_admin: - service = db.service_get_by_args(elevated, host, 'cinder-volume') - if not utils.service_is_up(service): - raise exception.WillNotSchedule(host=host) - driver.cast_to_volume_host(context, host, 'create_volume', - volume_id=volume_id, **_kwargs) - return None +simple_scheduler_opts = [ + cfg.IntOpt("max_gigabytes", + default=10000, + help="This configure option has been deprecated along with " + "the SimpleScheduler. New scheduler is able to gather " + "capacity information for each host, thus setting the " + "maximum number of volume gigabytes for host is no " + "longer needed. It's safe to remove this configure " + "from cinder.conf."), ] - results = db.service_get_all_volume_sorted(elevated) - if zone: - results = [(service, gigs) for (service, gigs) in results - if service['availability_zone'] == zone] - for result in results: - (service, volume_gigabytes) = result - if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes: - msg = _("Not enough allocatable volume gigabytes remaining") - raise exception.NoValidHost(reason=msg) - if utils.service_is_up(service) and not service['disabled']: - driver.cast_to_volume_host(context, service['host'], - 'create_volume', volume_id=volume_id, **_kwargs) - return None - msg = _("Is the appropriate service running?") - raise exception.NoValidHost(reason=msg) +CONF = cfg.CONF +CONF.register_opts(simple_scheduler_opts) diff --git a/cinder/scheduler/weights/__init__.py b/cinder/scheduler/weights/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/scheduler/weights/capacity.py b/cinder/scheduler/weights/capacity.py new file mode 100644 index 0000000000..d4f20b5e38 --- /dev/null +++ b/cinder/scheduler/weights/capacity.py @@ -0,0 +1,85 @@ +# Copyright (c) 2013 eBay Inc. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Weighers that weigh hosts by their capacity, including following two +weighers: + +1. Capacity Weigher. Weigh hosts by their available capacity. + +The default is to spread volumes across all hosts evenly. If you prefer +stacking, you can set the 'capacity_weight_multiplier' option to a negative +number and the weighing has the opposite effect of the default. + +2. Allocated Capacity Weigher. Weigh hosts by their allocated capacity. + +The default behavior is to place new volume to the host allocated the least +space. This weigher is intended to simulate the behavior of SimpleScheduler. +If you prefer to place volumes to host allocated the most space, you can +set the 'allocated_capacity_weight_multiplier' option to a postive number +and the weighing has the opposite effect of the default. +""" + + +import math + +from oslo.config import cfg + +from cinder.openstack.common.scheduler import weights + + +capacity_weight_opts = [ + cfg.FloatOpt('capacity_weight_multiplier', + default=1.0, + help='Multiplier used for weighing volume capacity. ' + 'Negative numbers mean to stack vs spread.'), + cfg.FloatOpt('allocated_capacity_weight_multiplier', + default=-1.0, + help='Multiplier used for weighing volume capacity. ' + 'Negative numbers mean to stack vs spread.'), +] + +CONF = cfg.CONF +CONF.register_opts(capacity_weight_opts) + + +class CapacityWeigher(weights.BaseHostWeigher): + def _weight_multiplier(self): + """Override the weight multiplier.""" + return CONF.capacity_weight_multiplier + + def _weigh_object(self, host_state, weight_properties): + """Higher weights win. We want spreading to be the default.""" + reserved = float(host_state.reserved_percentage) / 100 + free_space = host_state.free_capacity_gb + if free_space == 'infinite' or free_space == 'unknown': + #(zhiteng) 'infinite' and 'unknown' are treated the same + # here, for sorting purpose. + free = float('inf') + else: + free = math.floor(host_state.free_capacity_gb * (1 - reserved)) + return free + + +class AllocatedCapacityWeigher(weights.BaseHostWeigher): + def _weight_multiplier(self): + """Override the weight multiplier.""" + return CONF.allocated_capacity_weight_multiplier + + def _weigh_object(self, host_state, weight_properties): + # Higher weights win. We want spreading (choose host with lowest + # allocated_capacity first) to be the default. + allocated_space = host_state.allocated_capacity_gb + return allocated_space diff --git a/cinder/scheduler/weights/chance.py b/cinder/scheduler/weights/chance.py new file mode 100644 index 0000000000..4e79b79a2c --- /dev/null +++ b/cinder/scheduler/weights/chance.py @@ -0,0 +1,28 @@ +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Chance Weigher. Assign random weights to hosts. + +Used to spread volumes randomly across a list of equally suitable hosts. +""" + + +import random + +from cinder.openstack.common.scheduler import weights + + +class ChanceWeigher(weights.BaseHostWeigher): + def _weigh_object(self, host_state, weight_properties): + return random.random() diff --git a/cinder/service.py b/cinder/service.py index ecbea86e5c..a594722188 100644 --- a/cinder/service.py +++ b/cinder/service.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara @@ -19,23 +17,21 @@ """Generic Node base class for all workers that run on hosts.""" + import inspect import os import random -import signal -import eventlet -import greenlet +from oslo.config import cfg from cinder import context from cinder import db from cinder import exception -from cinder import flags -from cinder.openstack.common import log as logging -from cinder.openstack.common import cfg from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import loopingcall from cinder.openstack.common import rpc -from cinder import utils +from cinder.openstack.common import service from cinder import version from cinder import wsgi @@ -60,103 +56,44 @@ cfg.IntOpt('osapi_volume_listen_port', default=8776, help='port for os volume api to listen'), - ] - -FLAGS = flags.FLAGS -FLAGS.register_opts(service_opts) - + cfg.IntOpt('osapi_volume_workers', + help='Number of workers for OpenStack Volume API service'), ] -class Launcher(object): - """Launch one or more services and wait for them to complete.""" +CONF = cfg.CONF +CONF.register_opts(service_opts) - def __init__(self): - """Initialize the service launcher. - :returns: None - - """ - self._services = [] - - @staticmethod - def run_server(server): - """Start and wait for a server to finish. - - :param service: Server to run and wait for. - :returns: None - - """ - server.start() - server.wait() - - def launch_server(self, server): - """Load and start the given server. - - :param server: The server you would like to start. - :returns: None - - """ - gt = eventlet.spawn(self.run_server, server) - self._services.append(gt) - - def stop(self): - """Stop all services which are currently running. - - :returns: None - - """ - for service in self._services: - service.kill() - - def wait(self): - """Waits until all services have been stopped, and then returns. - - :returns: None - - """ - def sigterm(sig, frame): - LOG.audit(_("SIGTERM received")) - # NOTE(jk0): Raise a ^C which is caught by the caller and cleanly - # shuts down the service. This does not yet handle eventlet - # threads. - raise KeyboardInterrupt - - signal.signal(signal.SIGTERM, sigterm) - - for service in self._services: - try: - service.wait() - except greenlet.GreenletExit: - pass - - -class Service(object): +class Service(service.Service): """Service object for binaries running on hosts. A service takes a manager and enables rpc by listening to queues based on topic. It also periodically runs tasks on the manager and reports - it state to the database services table.""" + it state to the database services table. + """ def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, - *args, **kwargs): + service_name=None, *args, **kwargs): + super(Service, self).__init__() self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) - self.manager = manager_class(host=self.host, *args, **kwargs) + self.manager = manager_class(host=self.host, + service_name=service_name, + *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay - super(Service, self).__init__(*args, **kwargs) + self.basic_config_check() self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] def start(self): - vcs_string = version.version_string_with_vcs() - LOG.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), - {'topic': self.topic, 'vcs_string': vcs_string}) - self.manager.init_host() + version_string = version.version_string() + LOG.audit(_('Starting %(topic)s node (version %(version_string)s)'), + {'topic': self.topic, 'version_string': version_string}) self.model_disconnected = False ctxt = context.get_admin_context() try: @@ -176,16 +113,17 @@ def start(self): # Share this same connection for these Consumers self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False) - node_topic = '%s.%s' % (self.topic, self.host) + node_topic = '%s:%s' % (self.topic, self.host) self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False) self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True) # Consume from all consumers in a thread self.conn.consume_in_thread() + self.manager.init_host() if self.report_interval: - pulse = utils.LoopingCall(self.report_state) + pulse = loopingcall.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, initial_delay=self.report_interval) self.timers.append(pulse) @@ -196,13 +134,29 @@ def start(self): else: initial_delay = None - periodic = utils.LoopingCall(self.periodic_tasks) + periodic = loopingcall.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic) + def basic_config_check(self): + """Perform basic config checks before starting service.""" + # Make sure report interval is less than service down time + if self.report_interval: + if CONF.service_down_time <= self.report_interval: + new_down_time = int(self.report_interval * 2.5) + LOG.warn(_("Report interval must be less than service down " + "time. Current config service_down_time: " + "%(service_down_time)s, report_interval for this: " + "service is: %(report_interval)s. Setting global " + "service_down_time to: %(new_down_time)s") % + {'service_down_time': CONF.service_down_time, + 'report_interval': self.report_interval, + 'new_down_time': new_down_time}) + CONF.set_override('service_down_time', new_down_time) + def _create_service_ref(self, context): - zone = FLAGS.storage_availability_zone + zone = CONF.storage_availability_zone service_ref = db.service_create(context, {'host': self.host, 'binary': self.binary, @@ -218,37 +172,38 @@ def __getattr__(self, key): @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, report_interval=None, periodic_interval=None, - periodic_fuzzy_delay=None): + periodic_fuzzy_delay=None, service_name=None): """Instantiates class and passes back application object. - :param host: defaults to FLAGS.host + :param host: defaults to CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'cinder-' part - :param manager: defaults to FLAGS._manager - :param report_interval: defaults to FLAGS.report_interval - :param periodic_interval: defaults to FLAGS.periodic_interval - :param periodic_fuzzy_delay: defaults to FLAGS.periodic_fuzzy_delay + :param manager: defaults to CONF._manager + :param report_interval: defaults to CONF.report_interval + :param periodic_interval: defaults to CONF.periodic_interval + :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay """ if not host: - host = FLAGS.host + host = CONF.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = binary if not manager: subtopic = topic.rpartition('cinder-')[2] - manager = FLAGS.get('%s_manager' % subtopic, None) + manager = CONF.get('%s_manager' % subtopic, None) if report_interval is None: - report_interval = FLAGS.report_interval + report_interval = CONF.report_interval if periodic_interval is None: - periodic_interval = FLAGS.periodic_interval + periodic_interval = CONF.periodic_interval if periodic_fuzzy_delay is None: - periodic_fuzzy_delay = FLAGS.periodic_fuzzy_delay + periodic_fuzzy_delay = CONF.periodic_fuzzy_delay service_obj = cls(host, binary, topic, manager, report_interval=report_interval, periodic_interval=periodic_interval, - periodic_fuzzy_delay=periodic_fuzzy_delay) + periodic_fuzzy_delay=periodic_fuzzy_delay, + service_name=service_name) return service_obj @@ -274,6 +229,8 @@ def stop(self): pass self.timers = [] + super(Service, self).stop() + def wait(self): for x in self.timers: try: @@ -289,7 +246,7 @@ def periodic_tasks(self, raise_on_error=False): def report_state(self): """Update the state of this service in the datastore.""" ctxt = context.get_admin_context() - zone = FLAGS.storage_availability_zone + zone = CONF.storage_availability_zone state_catalog = {} try: try: @@ -305,7 +262,7 @@ def report_state(self): state_catalog['availability_zone'] = zone db.service_update(ctxt, - self.service_id, state_catalog) + self.service_id, state_catalog) # TODO(termie): make this pattern be more elegant. if getattr(self, 'model_disconnected', False): @@ -334,8 +291,15 @@ def __init__(self, name, loader=None): self.manager = self._get_manager() self.loader = loader or wsgi.Loader() self.app = self.loader.load_app(name) - self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0") - self.port = getattr(FLAGS, '%s_listen_port' % name, 0) + self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") + self.port = getattr(CONF, '%s_listen_port' % name, 0) + self.workers = getattr(CONF, '%s_workers' % name, None) + if self.workers < 1: + LOG.warn(_("Value of config option %(name)s_workers must be " + "integer greater than 1. Input value ignored.") % + {'name': name}) + # Reset workers to default + self.workers = None self.server = wsgi.Server(name, self.app, host=self.host, @@ -352,10 +316,10 @@ def _get_manager(self): """ fl = '%s_manager' % self.name - if not fl in FLAGS: + if fl not in CONF: return None - manager_class_name = FLAGS.get(fl, None) + manager_class_name = CONF.get(fl, None) if not manager_class_name: return None @@ -393,31 +357,37 @@ def wait(self): self.server.wait() +def process_launcher(): + return service.ProcessLauncher() + + # NOTE(vish): the global launcher is to maintain the existing # functionality of calling service.serve + # service.wait _launcher = None -def serve(*servers): +def serve(server, workers=None): global _launcher - if not _launcher: - _launcher = Launcher() - for server in servers: - _launcher.launch_server(server) + if _launcher: + raise RuntimeError(_('serve() can only be called once')) + + _launcher = service.launch(server, workers=workers) def wait(): - LOG.debug(_('Full set of FLAGS:')) - for flag in FLAGS: - flag_get = FLAGS.get(flag, None) + LOG.debug(_('Full set of CONF:')) + for flag in CONF: + flag_get = CONF.get(flag, None) # hide flag contents from log if contains a password # should use secret flag when switch over to openstack-common if ("_password" in flag or "_key" in flag or - (flag == "sql_connection" and "mysql:" in flag_get)): - LOG.debug(_('%(flag)s : FLAG SET ') % locals()) + (flag == "sql_connection" and + ("mysql:" in flag_get or "postgresql:" in flag_get))): + LOG.debug(_('%s : FLAG SET ') % flag) else: - LOG.debug('%(flag)s : %(flag_get)s' % locals()) + LOG.debug('%(flag)s : %(flag_get)s' % + {'flag': flag, 'flag_get': flag_get}) try: _launcher.wait() except KeyboardInterrupt: diff --git a/cinder/test.py b/cinder/test.py index dd111e9d91..da3b8a12e8 100644 --- a/cinder/test.py +++ b/cinder/test.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -18,163 +16,185 @@ """Base classes for our unit tests. -Allows overriding of flags for use of fakes, and some black magic for +Allows overriding of CONF for use of fakes, and some black magic for inline callbacks. """ -import functools + +import os +import shutil +import tempfile import uuid -import unittest +import fixtures import mox -import nose.plugins.skip +from oslo.config import cfg import stubout +import testtools +from testtools import matchers -from cinder import flags +from cinder.common import config # noqa Need to register global_opts +from cinder.db import migration +from cinder.openstack.common.db.sqlalchemy import session from cinder.openstack.common import log as logging -from cinder.openstack.common import cfg from cinder.openstack.common import timeutils from cinder import service -from cinder import tests -from cinder.tests import fake_flags +from cinder.tests import conf_fixture test_opts = [ cfg.StrOpt('sqlite_clean_db', default='clean.sqlite', - help='File name of clean sqlite db'), - cfg.BoolOpt('fake_tests', - default=True, - help='should we use everything for testing'), - ] + help='File name of clean sqlite db'), ] -FLAGS = flags.FLAGS -FLAGS.register_opts(test_opts) +CONF = cfg.CONF +CONF.register_opts(test_opts) LOG = logging.getLogger(__name__) - -class skip_test(object): - """Decorator that skips a test.""" - # TODO(tr3buchet): remember forever what comstud did here - def __init__(self, msg): - self.message = msg - - def __call__(self, func): - @functools.wraps(func) - def _skipper(*args, **kw): - """Wrapped skipper function.""" - raise nose.SkipTest(self.message) - return _skipper - - -class skip_if(object): - """Decorator that skips a test if condition is true.""" - def __init__(self, condition, msg): - self.condition = condition - self.message = msg - - def __call__(self, func): - @functools.wraps(func) - def _skipper(*args, **kw): - """Wrapped skipper function.""" - if self.condition: - raise nose.SkipTest(self.message) - func(*args, **kw) - return _skipper - - -class skip_unless(object): - """Decorator that skips a test if condition is not true.""" - def __init__(self, condition, msg): - self.condition = condition - self.message = msg - - def __call__(self, func): - @functools.wraps(func) - def _skipper(*args, **kw): - """Wrapped skipper function.""" - if not self.condition: - raise nose.SkipTest(self.message) - func(*args, **kw) - return _skipper - - -def skip_if_fake(func): - """Decorator that skips a test if running in fake mode.""" - def _skipper(*args, **kw): - """Wrapped skipper function.""" - if FLAGS.fake_tests: - raise unittest.SkipTest('Test cannot be run in fake mode') - else: - return func(*args, **kw) - return _skipper +_DB_CACHE = None class TestingException(Exception): pass -class TestCase(unittest.TestCase): +class Database(fixtures.Fixture): + + def __init__(self, db_session, db_migrate, sql_connection, + sqlite_db, sqlite_clean_db): + self.sql_connection = sql_connection + self.sqlite_db = sqlite_db + self.sqlite_clean_db = sqlite_clean_db + + self.engine = db_session.get_engine() + self.engine.dispose() + conn = self.engine.connect() + if sql_connection == "sqlite://": + if db_migrate.db_version() > db_migrate.db_initial_version(): + return + else: + testdb = os.path.join(CONF.state_path, sqlite_db) + if os.path.exists(testdb): + return + db_migrate.db_sync() +# self.post_migrations() + if sql_connection == "sqlite://": + conn = self.engine.connect() + self._DB = "".join(line for line in conn.connection.iterdump()) + self.engine.dispose() + else: + cleandb = os.path.join(CONF.state_path, sqlite_clean_db) + shutil.copyfile(testdb, cleandb) + + def setUp(self): + super(Database, self).setUp() + + if self.sql_connection == "sqlite://": + conn = self.engine.connect() + conn.connection.executescript(self._DB) + self.addCleanup(self.engine.dispose) + else: + shutil.copyfile( + os.path.join(CONF.state_path, self.sqlite_clean_db), + os.path.join(CONF.state_path, self.sqlite_db)) + + +class TestCase(testtools.TestCase): """Test case base class for all unit tests.""" def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() - fake_flags.set_defaults(FLAGS) - flags.parse_args([], default_config_files=[]) + test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) + try: + test_timeout = int(test_timeout) + except ValueError: + # If timeout value is invalid do not set a timeout. + test_timeout = 0 + if test_timeout > 0: + self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) + self.useFixture(fixtures.NestedTempfile()) + self.useFixture(fixtures.TempHomeDir()) + + if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or + os.environ.get('OS_STDOUT_CAPTURE') == '1'): + stdout = self.useFixture(fixtures.StringStream('stdout')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) + if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or + os.environ.get('OS_STDERR_CAPTURE') == '1'): + stderr = self.useFixture(fixtures.StringStream('stderr')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + + self.log_fixture = self.useFixture(fixtures.FakeLogger()) + + conf_fixture.set_defaults(CONF) + CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() - tests.reset_db() + + CONF.set_default('connection', 'sqlite://', 'database') + CONF.set_default('sqlite_synchronous', False) + + self.log_fixture = self.useFixture(fixtures.FakeLogger()) + + global _DB_CACHE + if not _DB_CACHE: + _DB_CACHE = Database(session, migration, + sql_connection=CONF.database.connection, + sqlite_db=CONF.sqlite_db, + sqlite_clean_db=CONF.sqlite_clean_db) + self.useFixture(_DB_CACHE) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators self.mox = mox.Mox() self.stubs = stubout.StubOutForTesting() + self.addCleanup(CONF.reset) + self.addCleanup(self.mox.UnsetStubs) + self.addCleanup(self.stubs.UnsetAll) + self.addCleanup(self.stubs.SmartUnsetAll) + self.addCleanup(self.mox.VerifyAll) self.injected = [] self._services = [] + CONF.set_override('fatal_exception_format_errors', True) + # This will be cleaned up by the NestedTempfile fixture + CONF.set_override('lock_path', tempfile.mkdtemp()) + def tearDown(self): """Runs after each test method to tear down test environment.""" - try: - self.mox.UnsetStubs() - self.stubs.UnsetAll() - self.stubs.SmartUnsetAll() - self.mox.VerifyAll() - super(TestCase, self).tearDown() - finally: - # Reset any overridden flags - FLAGS.reset() - - # Stop any timers - for x in self.injected: - try: - x.stop() - except AssertionError: - pass - - # Kill any services - for x in self._services: - try: - x.kill() - except Exception: - pass - - # Delete attributes that don't start with _ so they don't pin - # memory around unnecessarily for the duration of the test - # suite - for key in [k for k in self.__dict__.keys() if k[0] != '_']: - del self.__dict__[key] + + # Stop any timers + for x in self.injected: + try: + x.stop() + except AssertionError: + pass + + # Kill any services + for x in self._services: + try: + x.kill() + except Exception: + pass + + # Delete attributes that don't start with _ so they don't pin + # memory around unnecessarily for the duration of the test + # suite + for key in [k for k in self.__dict__.keys() if k[0] != '_']: + del self.__dict__[key] + super(TestCase, self).tearDown() def flags(self, **kw): - """Override flag variables for a test.""" + """Override CONF variables for a test.""" for k, v in kw.iteritems(): - FLAGS.set_override(k, v) + CONF.set_override(k, v) def start_service(self, name, host=None, **kwargs): host = host and host or uuid.uuid4().hex @@ -203,7 +223,8 @@ def raise_assertion(msg): d1str = str(d1) d2str = str(d2) base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' - 'd2: %(d2str)s' % locals()) + 'd2: %(d2str)s' % + {'msg': msg, 'd1str': d1str, 'd2str': d2str}) raise AssertionError(base_msg) d1keys = set(d1.keys()) @@ -212,7 +233,8 @@ def raise_assertion(msg): d1only = d1keys - d2keys d2only = d2keys - d1keys raise_assertion('Keys in d1 and not d2: %(d1only)s. ' - 'Keys in d2 and not d1: %(d2only)s' % locals()) + 'Keys in d2 and not d1: %(d2only)s' % + {'d1only': d1only, 'd2only': d2only}) for key in d1keys: d1value = d1[key] @@ -234,53 +256,31 @@ def raise_assertion(msg): continue elif d1value != d2value: raise_assertion("d1['%(key)s']=%(d1value)s != " - "d2['%(key)s']=%(d2value)s" % locals()) - - def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): - """Assert a list of dicts are equivalent.""" - def raise_assertion(msg): - L1str = str(L1) - L2str = str(L2) - base_msg = ('List of dictionaries do not match: %(msg)s ' - 'L1: %(L1str)s L2: %(L2str)s' % locals()) - raise AssertionError(base_msg) - - L1count = len(L1) - L2count = len(L2) - if L1count != L2count: - raise_assertion('Length mismatch: len(L1)=%(L1count)d != ' - 'len(L2)=%(L2count)d' % locals()) - - for d1, d2 in zip(L1, L2): - self.assertDictMatch(d1, d2, approx_equal=approx_equal, - tolerance=tolerance) - - def assertSubDictMatch(self, sub_dict, super_dict): - """Assert a sub_dict is subset of super_dict.""" - self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys()))) - for k, sub_value in sub_dict.items(): - super_value = super_dict[k] - if isinstance(sub_value, dict): - self.assertSubDictMatch(sub_value, super_value) - elif 'DONTCARE' in (sub_value, super_value): - continue - else: - self.assertEqual(sub_value, super_value) - - def assertIn(self, a, b, *args, **kwargs): - """Python < v2.7 compatibility. Assert 'a' in 'b'""" + "d2['%(key)s']=%(d2value)s" % + { + 'key': key, + 'd1value': d1value, + 'd2value': d2value, + }) + + def assertGreater(self, first, second, msg=None): + """Python < v2.7 compatibility. Assert 'first' > 'second'.""" try: - f = super(TestCase, self).assertIn + f = super(TestCase, self).assertGreater except AttributeError: - self.assertTrue(a in b, *args, **kwargs) + self.assertThat(first, + matchers.GreaterThan(second), + message=msg or '') else: - f(a, b, *args, **kwargs) + f(first, second, msg=msg) - def assertNotIn(self, a, b, *args, **kwargs): - """Python < v2.7 compatibility. Assert 'a' NOT in 'b'""" + def assertGreaterEqual(self, first, second, msg=None): + """Python < v2.7 compatibility. Assert 'first' >= 'second'.""" try: - f = super(TestCase, self).assertNotIn + f = super(TestCase, self).assertGreaterEqual except AttributeError: - self.assertFalse(a in b, *args, **kwargs) + self.assertThat(first, + matchers.Not(matchers.LessThan(second)), + message=msg or '') else: - f(a, b, *args, **kwargs) + f(first, second, msg=msg) diff --git a/cinder/testing/fake/__init__.py b/cinder/testing/fake/__init__.py deleted file mode 100644 index 5cdad4717e..0000000000 --- a/cinder/testing/fake/__init__.py +++ /dev/null @@ -1 +0,0 @@ -import rabbit diff --git a/cinder/testing/runner.py b/cinder/testing/runner.py deleted file mode 100644 index d92179f257..0000000000 --- a/cinder/testing/runner.py +++ /dev/null @@ -1,370 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Colorizer Code is borrowed from Twisted: -# Copyright (c) 2001-2010 Twisted Matrix Laboratories. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -"""Unittest runner for Cinder. - -To run all tests - python cinder/testing/runner.py - -To run a single test module: - python cinder/testing/runner.py test_compute - - or - - python cinder/testing/runner.py api.test_wsgi - -To run a single test: - python cinder/testing/runner.py - test_compute:ComputeTestCase.test_run_terminate - -""" - -import gettext -import heapq -import os -import unittest -import sys -import time - -import eventlet -from nose import config -from nose import core -from nose import result - -gettext.install('cinder', unicode=1) -reldir = os.path.join(os.path.dirname(__file__), '..', '..') -absdir = os.path.abspath(reldir) -sys.path.insert(0, absdir) - -from cinder.openstack.common import log as logging - - -class _AnsiColorizer(object): - """ - A colorizer is an object that loosely wraps around a stream, allowing - callers to write text to the stream in a particular color. - - Colorizer classes must implement C{supported()} and C{write(text, color)}. - """ - _colors = dict(black=30, red=31, green=32, yellow=33, - blue=34, magenta=35, cyan=36, white=37) - - def __init__(self, stream): - self.stream = stream - - def supported(cls, stream=sys.stdout): - """ - A class method that returns True if the current platform supports - coloring terminal output using this method. Returns False otherwise. - """ - if not stream.isatty(): - return False # auto color only on TTYs - try: - import curses - except ImportError: - return False - else: - try: - try: - return curses.tigetnum("colors") > 2 - except curses.error: - curses.setupterm() - return curses.tigetnum("colors") > 2 - except Exception: - raise - # guess false in case of error - return False - supported = classmethod(supported) - - def write(self, text, color): - """ - Write the given text to the stream in the given color. - - @param text: Text to be written to the stream. - - @param color: A string label for a color. e.g. 'red', 'white'. - """ - color = self._colors[color] - self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) - - -class _Win32Colorizer(object): - """ - See _AnsiColorizer docstring. - """ - def __init__(self, stream): - import win32console as win - red, green, blue, bold = (win.FOREGROUND_RED, win.FOREGROUND_GREEN, - win.FOREGROUND_BLUE, win.FOREGROUND_INTENSITY) - self.stream = stream - self.screenBuffer = win.GetStdHandle(win.STD_OUT_HANDLE) - self._colors = { - 'normal': red | green | blue, - 'red': red | bold, - 'green': green | bold, - 'blue': blue | bold, - 'yellow': red | green | bold, - 'magenta': red | blue | bold, - 'cyan': green | blue | bold, - 'white': red | green | blue | bold - } - - def supported(cls, stream=sys.stdout): - try: - import win32console - screenBuffer = win32console.GetStdHandle( - win32console.STD_OUT_HANDLE) - except ImportError: - return False - import pywintypes - try: - screenBuffer.SetConsoleTextAttribute( - win32console.FOREGROUND_RED | - win32console.FOREGROUND_GREEN | - win32console.FOREGROUND_BLUE) - except pywintypes.error: - return False - else: - return True - supported = classmethod(supported) - - def write(self, text, color): - color = self._colors[color] - self.screenBuffer.SetConsoleTextAttribute(color) - self.stream.write(text) - self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) - - -class _NullColorizer(object): - """ - See _AnsiColorizer docstring. - """ - def __init__(self, stream): - self.stream = stream - - def supported(cls, stream=sys.stdout): - return True - supported = classmethod(supported) - - def write(self, text, color): - self.stream.write(text) - - -def get_elapsed_time_color(elapsed_time): - if elapsed_time > 1.0: - return 'red' - elif elapsed_time > 0.25: - return 'yellow' - else: - return 'green' - - -class CinderTestResult(result.TextTestResult): - def __init__(self, *args, **kw): - self.show_elapsed = kw.pop('show_elapsed') - result.TextTestResult.__init__(self, *args, **kw) - self.num_slow_tests = 5 - self.slow_tests = [] # this is a fixed-sized heap - self._last_case = None - self.colorizer = None - # NOTE(vish): reset stdout for the terminal check - stdout = sys.stdout - sys.stdout = sys.__stdout__ - for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: - if colorizer.supported(): - self.colorizer = colorizer(self.stream) - break - sys.stdout = stdout - - # NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate - # error results in it failing to be initialized later. Otherwise, - # _handleElapsedTime will fail, causing the wrong error message to - # be outputted. - self.start_time = time.time() - - def getDescription(self, test): - return str(test) - - def _handleElapsedTime(self, test): - self.elapsed_time = time.time() - self.start_time - item = (self.elapsed_time, test) - # Record only the n-slowest tests using heap - if len(self.slow_tests) >= self.num_slow_tests: - heapq.heappushpop(self.slow_tests, item) - else: - heapq.heappush(self.slow_tests, item) - - def _writeElapsedTime(self, test): - color = get_elapsed_time_color(self.elapsed_time) - self.colorizer.write(" %.2f" % self.elapsed_time, color) - - def _writeResult(self, test, long_result, color, short_result, success): - if self.showAll: - self.colorizer.write(long_result, color) - if self.show_elapsed and success: - self._writeElapsedTime(test) - self.stream.writeln() - elif self.dots: - self.stream.write(short_result) - self.stream.flush() - - # NOTE(vish): copied from unittest with edit to add color - def addSuccess(self, test): - unittest.TestResult.addSuccess(self, test) - self._handleElapsedTime(test) - self._writeResult(test, 'OK', 'green', '.', True) - - # NOTE(vish): copied from unittest with edit to add color - def addFailure(self, test, err): - unittest.TestResult.addFailure(self, test, err) - self._handleElapsedTime(test) - self._writeResult(test, 'FAIL', 'red', 'F', False) - - # NOTE(vish): copied from nose with edit to add color - def addError(self, test, err): - """Overrides normal addError to add support for - errorClasses. If the exception is a registered class, the - error will be added to the list for that class, not errors. - """ - self._handleElapsedTime(test) - stream = getattr(self, 'stream', None) - ec, ev, tb = err - try: - exc_info = self._exc_info_to_string(err, test) - except TypeError: - # 2.3 compat - exc_info = self._exc_info_to_string(err) - for cls, (storage, label, isfail) in self.errorClasses.items(): - if result.isclass(ec) and issubclass(ec, cls): - if isfail: - test.passed = False - storage.append((test, exc_info)) - # Might get patched into a streamless result - if stream is not None: - if self.showAll: - message = [label] - detail = result._exception_detail(err[1]) - if detail: - message.append(detail) - stream.writeln(": ".join(message)) - elif self.dots: - stream.write(label[:1]) - return - self.errors.append((test, exc_info)) - test.passed = False - if stream is not None: - self._writeResult(test, 'ERROR', 'red', 'E', False) - - def startTest(self, test): - unittest.TestResult.startTest(self, test) - self.start_time = time.time() - current_case = test.test.__class__.__name__ - - if self.showAll: - if current_case != self._last_case: - self.stream.writeln(current_case) - self._last_case = current_case - - self.stream.write( - ' %s' % str(test.test._testMethodName).ljust(60)) - self.stream.flush() - - -class CinderTestRunner(core.TextTestRunner): - def __init__(self, *args, **kwargs): - self.show_elapsed = kwargs.pop('show_elapsed') - core.TextTestRunner.__init__(self, *args, **kwargs) - - def _makeResult(self): - return CinderTestResult(self.stream, - self.descriptions, - self.verbosity, - self.config, - show_elapsed=self.show_elapsed) - - def _writeSlowTests(self, result_): - # Pare out 'fast' tests - slow_tests = [item for item in result_.slow_tests - if get_elapsed_time_color(item[0]) != 'green'] - if slow_tests: - slow_total_time = sum(item[0] for item in slow_tests) - self.stream.writeln("Slowest %i tests took %.2f secs:" - % (len(slow_tests), slow_total_time)) - for elapsed_time, test in sorted(slow_tests, reverse=True): - time_str = "%.2f" % elapsed_time - self.stream.writeln(" %s %s" % (time_str.ljust(10), test)) - - def run(self, test): - result_ = core.TextTestRunner.run(self, test) - if self.show_elapsed: - self._writeSlowTests(result_) - return result_ - - -def run(): - # This is a fix to allow the --hide-elapsed flag while accepting - # arbitrary nosetest flags as well - argv = [x for x in sys.argv if x != '--hide-elapsed'] - hide_elapsed = argv != sys.argv - logging.setup("cinder") - - # If any argument looks like a test name but doesn't have "cinder.tests" in - # front of it, automatically add that so we don't have to type as much - for i, arg in enumerate(argv): - if arg.startswith('test_'): - argv[i] = 'cinder.tests.%s' % arg - - testdir = os.path.abspath(os.path.join("cinder", "tests")) - c = config.Config(stream=sys.stdout, - env=os.environ, - verbosity=3, - workingDir=testdir, - plugins=core.DefaultPluginManager()) - - runner = CinderTestRunner(stream=c.stream, - verbosity=c.verbosity, - config=c, - show_elapsed=not hide_elapsed) - sys.exit(not core.run(config=c, testRunner=runner, argv=argv)) - - -if __name__ == '__main__': - eventlet.monkey_patch() - run() diff --git a/cinder/tests/__init__.py b/cinder/tests/__init__.py index 3b95e052e4..302a1a2dba 100644 --- a/cinder/tests/__init__.py +++ b/cinder/tests/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -30,53 +28,11 @@ .. moduleauthor:: Andy Smith """ +import eventlet + +eventlet.monkey_patch() + # See http://code.google.com/p/python-nose/issues/detail?id=373 # The code below enables nosetests to work with i18n _() blocks import __builtin__ setattr(__builtin__, '_', lambda x: x) -import os -import shutil - -from cinder.db.sqlalchemy.session import get_engine -from cinder import flags - -FLAGS = flags.FLAGS - -_DB = None - - -def reset_db(): - if FLAGS.sql_connection == "sqlite://": - engine = get_engine() - engine.dispose() - conn = engine.connect() - conn.connection.executescript(_DB) - else: - shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db), - os.path.join(FLAGS.state_path, FLAGS.sqlite_db)) - - -def setup(): - import mox # Fail fast if you don't have mox. Workaround for bug 810424 - - from cinder.db import migration - from cinder.tests import fake_flags - fake_flags.set_defaults(FLAGS) - - if FLAGS.sql_connection == "sqlite://": - if migration.db_version() > 1: - return - else: - testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) - if os.path.exists(testdb): - return - migration.db_sync() - - if FLAGS.sql_connection == "sqlite://": - global _DB - engine = get_engine() - conn = engine.connect() - _DB = "".join(line for line in conn.connection.iterdump()) - else: - cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) - shutil.copyfile(testdb, cleandb) diff --git a/cinder/tests/api/__init__.py b/cinder/tests/api/__init__.py index 3be5ce944c..a2c6e2e27d 100644 --- a/cinder/tests/api/__init__.py +++ b/cinder/tests/api/__init__.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/api/openstack/common.py b/cinder/tests/api/common.py similarity index 93% rename from cinder/tests/api/openstack/common.py rename to cinder/tests/api/common.py index e030105653..c0ef610815 100644 --- a/cinder/tests/api/openstack/common.py +++ b/cinder/tests/api/common.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/api/openstack/volume/__init__.py b/cinder/tests/api/contrib/__init__.py similarity index 90% rename from cinder/tests/api/openstack/volume/__init__.py rename to cinder/tests/api/contrib/__init__.py index 3be5ce944c..a2c6e2e27d 100644 --- a/cinder/tests/api/openstack/volume/__init__.py +++ b/cinder/tests/api/contrib/__init__.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/api/contrib/test_admin_actions.py b/cinder/tests/api/contrib/test_admin_actions.py new file mode 100644 index 0000000000..097e1176b2 --- /dev/null +++ b/cinder/tests/api/contrib/test_admin_actions.py @@ -0,0 +1,745 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +import os +import shutil +import tempfile +import webob + +from oslo.config import cfg + +from cinder.brick.local_dev import lvm as brick_lvm +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder.openstack.common import timeutils +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder.volume import api as volume_api +from cinder.volume import utils as volutils + +CONF = cfg.CONF + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = fakes.router.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v2'] = api + return mapper + + +class AdminActionsTest(test.TestCase): + + def setUp(self): + super(AdminActionsTest, self).setUp() + self.tempdir = tempfile.mkdtemp() + self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') + self.flags(lock_path=self.tempdir) + self.volume_api = volume_api.API() + self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True) + + def tearDown(self): + shutil.rmtree(self.tempdir) + super(AdminActionsTest, self).tearDown() + + def test_reset_status_as_admin(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request status of 'error' + req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEqual(resp.status_int, 202) + volume = db.volume_get(ctx, volume['id']) + # status changed to 'error' + self.assertEqual(volume['status'], 'error') + + def test_reset_status_as_non_admin(self): + # current status is 'error' + volume = db.volume_create(context.get_admin_context(), + {'status': 'error', 'size': 1}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request changing status to available + req.body = jsonutils.dumps({'os-reset_status': {'status': + 'available'}}) + # non-admin context + req.environ['cinder.context'] = context.RequestContext('fake', 'fake') + resp = req.get_response(app()) + # request is not authorized + self.assertEqual(resp.status_int, 403) + volume = db.volume_get(context.get_admin_context(), volume['id']) + # status is still 'error' + self.assertEqual(volume['status'], 'error') + + def test_malformed_reset_status_body(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'size': 1}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # malformed request body + req.body = jsonutils.dumps({'os-reset_status': {'x-status': 'bad'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # bad request + self.assertEqual(resp.status_int, 400) + volume = db.volume_get(ctx, volume['id']) + # status is still 'available' + self.assertEqual(volume['status'], 'available') + + def test_invalid_status_for_volume(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'size': 1}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # 'invalid' is not a valid status + req.body = jsonutils.dumps({'os-reset_status': {'status': 'invalid'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # bad request + self.assertEqual(resp.status_int, 400) + volume = db.volume_get(ctx, volume['id']) + # status is still 'available' + self.assertEqual(volume['status'], 'available') + + def test_reset_status_for_missing_volume(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # missing-volume-id + req = webob.Request.blank('/v2/fake/volumes/%s/action' % + 'missing-volume-id') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # malformed request body + req.body = jsonutils.dumps({'os-reset_status': {'status': + 'available'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # not found + self.assertEqual(resp.status_int, 404) + self.assertRaises(exception.NotFound, db.volume_get, ctx, + 'missing-volume-id') + + def test_reset_attached_status(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': '', 'size': 1, + 'attach_status': 'attached'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request update attach_status to detached + body = {'os-reset_status': {'status': 'available', + 'attach_status': 'detached'}} + req.body = jsonutils.dumps(body) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEqual(resp.status_int, 202) + volume = db.volume_get(ctx, volume['id']) + # attach_status changed to 'detached' + self.assertEqual(volume['attach_status'], 'detached') + # status un-modified + self.assertEqual(volume['status'], 'available') + + def test_invalid_reset_attached_status(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': '', 'size': 1, + 'attach_status': 'detached'}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # 'invalid' is not a valid attach_status + body = {'os-reset_status': {'status': 'available', + 'attach_status': 'invalid'}} + req.body = jsonutils.dumps(body) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # bad request + self.assertEqual(resp.status_int, 400) + volume = db.volume_get(ctx, volume['id']) + # status and attach_status un-modified + self.assertEqual(volume['status'], 'available') + self.assertEqual(volume['attach_status'], 'detached') + + def test_snapshot_reset_status(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # snapshot in 'error_deleting' + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': '', 'size': 1}) + snapshot = db.snapshot_create(ctx, {'status': 'error_deleting', + 'volume_id': volume['id']}) + req = webob.Request.blank('/v2/fake/snapshots/%s/action' % + snapshot['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request status of 'error' + req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEqual(resp.status_int, 202) + snapshot = db.snapshot_get(ctx, snapshot['id']) + # status changed to 'error' + self.assertEqual(snapshot['status'], 'error') + + def test_invalid_status_for_snapshot(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # snapshot in 'available' + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': '', 'size': 1}) + snapshot = db.snapshot_create(ctx, {'status': 'available', + 'volume_id': volume['id']}) + req = webob.Request.blank('/v2/fake/snapshots/%s/action' % + snapshot['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # 'attaching' is not a valid status for snapshots + req.body = jsonutils.dumps({'os-reset_status': {'status': + 'attaching'}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEqual(resp.status_int, 400) + snapshot = db.snapshot_get(ctx, snapshot['id']) + # status is still 'available' + self.assertEqual(snapshot['status'], 'available') + + def test_force_delete(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is creating + volume = db.volume_create(ctx, {'size': 1}) + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.body = jsonutils.dumps({'os-force_delete': {}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # request is accepted + self.assertEqual(resp.status_int, 202) + # volume is deleted + self.assertRaises(exception.NotFound, db.volume_get, ctx, volume['id']) + + def test_force_delete_snapshot(self): + self.stubs.Set(os.path, 'exists', lambda x: True) + self.stubs.Set(volutils, 'clear_volume', + lambda a, b, volume_clear=CONF.volume_clear, + volume_clear_size=CONF.volume_clear_size: None) + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is creating + volume = db.volume_create(ctx, {'host': 'test', 'size': 1}) + snapshot = db.snapshot_create(ctx, {'status': 'creating', + 'volume_size': 1, + 'volume_id': volume['id']}) + path = '/v2/fake/snapshots/%s/action' % snapshot['id'] + req = webob.Request.blank(path) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.body = jsonutils.dumps({'os-force_delete': {}}) + # attach admin context to request + req.environ['cinder.context'] = ctx + # start service to handle rpc.cast for 'delete snapshot' + svc = self.start_service('volume', host='test') + # make request + resp = req.get_response(app()) + # request is accepted + self.assertEqual(resp.status_int, 202) + # snapshot is deleted + self.assertRaises(exception.NotFound, db.snapshot_get, ctx, + snapshot['id']) + # cleanup + svc.stop() + + def test_force_detach_instance_attached_volume(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': '', 'size': 1}) + connector = {'initiator': 'iqn.2012-07.org.fake:01'} + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + self.volume_api.reserve_volume(ctx, volume) + mountpoint = '/dev/vbd' + self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, + mountpoint, 'rw') + # volume is attached + volume = db.volume_get(ctx, volume['id']) + self.assertEqual(volume['status'], 'in-use') + self.assertEqual(volume['instance_uuid'], stubs.FAKE_UUID) + self.assertIsNone(volume['attached_host']) + self.assertEqual(volume['mountpoint'], mountpoint) + self.assertEqual(volume['attach_status'], 'attached') + admin_metadata = volume['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 2) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'False') + self.assertEqual(admin_metadata[1]['key'], 'attached_mode') + self.assertEqual(admin_metadata[1]['value'], 'rw') + conn_info = self.volume_api.initialize_connection(ctx, + volume, connector) + self.assertEqual(conn_info['data']['access_mode'], 'rw') + # build request to force detach + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request status of 'error' + req.body = jsonutils.dumps({'os-force_detach': None}) + # attach admin context to request + req.environ['cinder.context'] = ctx + # make request + resp = req.get_response(app()) + # request is accepted + self.assertEqual(resp.status_int, 202) + volume = db.volume_get(ctx, volume['id']) + # status changed to 'available' + self.assertEqual(volume['status'], 'available') + self.assertIsNone(volume['instance_uuid']) + self.assertIsNone(volume['attached_host']) + self.assertIsNone(volume['mountpoint']) + self.assertEqual(volume['attach_status'], 'detached') + admin_metadata = volume['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 1) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'False') + # cleanup + svc.stop() + + def test_force_detach_host_attached_volume(self): + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': '', 'size': 1}) + connector = {'initiator': 'iqn.2012-07.org.fake:01'} + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + self.volume_api.reserve_volume(ctx, volume) + mountpoint = '/dev/vbd' + host_name = 'fake-host' + self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'ro') + # volume is attached + volume = db.volume_get(ctx, volume['id']) + self.assertEqual(volume['status'], 'in-use') + self.assertIsNone(volume['instance_uuid']) + self.assertEqual(volume['attached_host'], host_name) + self.assertEqual(volume['mountpoint'], mountpoint) + self.assertEqual(volume['attach_status'], 'attached') + admin_metadata = volume['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 2) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'False') + self.assertEqual(admin_metadata[1]['key'], 'attached_mode') + self.assertEqual(admin_metadata[1]['value'], 'ro') + conn_info = self.volume_api.initialize_connection(ctx, + volume, connector) + self.assertEqual(conn_info['data']['access_mode'], 'ro') + # build request to force detach + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + # request status of 'error' + req.body = jsonutils.dumps({'os-force_detach': None}) + # attach admin context to request + req.environ['cinder.context'] = ctx + # make request + resp = req.get_response(app()) + # request is accepted + self.assertEqual(resp.status_int, 202) + volume = db.volume_get(ctx, volume['id']) + # status changed to 'available' + self.assertEqual(volume['status'], 'available') + self.assertIsNone(volume['instance_uuid']) + self.assertIsNone(volume['attached_host']) + self.assertIsNone(volume['mountpoint']) + self.assertEqual(volume['attach_status'], 'detached') + admin_metadata = volume['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 1) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'False') + # cleanup + svc.stop() + + def test_attach_in_used_volume_by_instance(self): + """Test that attaching to an in-use volume fails.""" + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': '', 'size': 1}) + connector = {'initiator': 'iqn.2012-07.org.fake:01'} + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + self.volume_api.reserve_volume(ctx, volume) + mountpoint = '/dev/vbd' + self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, + mountpoint, 'rw') + conn_info = self.volume_api.initialize_connection(ctx, + volume, connector) + self.assertEqual(conn_info['data']['access_mode'], 'rw') + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + fakes.get_fake_uuid(), + None, + mountpoint, + 'rw') + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + fakes.get_fake_uuid(), + None, + mountpoint, + 'ro') + # cleanup + svc.stop() + + def test_attach_in_used_volume_by_host(self): + """Test that attaching to an in-use volume fails.""" + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': '', 'size': 1}) + connector = {'initiator': 'iqn.2012-07.org.fake:01'} + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + self.volume_api.reserve_volume(ctx, volume) + mountpoint = '/dev/vbd' + host_name = 'fake_host' + self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'rw') + conn_info = self.volume_api.initialize_connection(ctx, + volume, connector) + conn_info['data']['access_mode'] = 'rw' + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + None, + host_name, + mountpoint, + 'rw') + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + None, + host_name, + mountpoint, + 'ro') + # cleanup + svc.stop() + + def test_invalid_iscsi_connector(self): + """Test connector without the initiator (required by iscsi driver).""" + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': '', 'size': 1}) + connector = {} + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + self.assertRaises(exception.VolumeBackendAPIException, + self.volume_api.initialize_connection, + ctx, volume, connector) + # cleanup + svc.stop() + + def test_attach_attaching_volume_with_different_instance(self): + """Test that attaching volume reserved for another instance fails.""" + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': '', 'size': 1}) + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + values = {'status': 'attaching', + 'instance_uuid': fakes.get_fake_uuid()} + db.volume_update(ctx, volume['id'], values) + mountpoint = '/dev/vbd' + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + stubs.FAKE_UUID, + None, + mountpoint, + 'rw') + # cleanup + svc.stop() + + def test_attach_attaching_volume_with_different_mode(self): + """Test that attaching volume reserved for another mode fails.""" + # admin context + ctx = context.RequestContext('admin', 'fake', True) + # current status is available + volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', + 'provider_location': '', 'size': 1}) + # start service to handle rpc messages for attach requests + svc = self.start_service('volume', host='test') + values = {'status': 'attaching', + 'instance_uuid': fakes.get_fake_uuid()} + db.volume_update(ctx, volume['id'], values) + db.volume_admin_metadata_update(ctx, volume['id'], + {"attached_mode": 'rw'}, False) + mountpoint = '/dev/vbd' + self.assertRaises(exception.InvalidVolume, + self.volume_api.attach, + ctx, + volume, + values['instance_uuid'], + None, + mountpoint, + 'ro') + # cleanup + svc.stop() + + def _migrate_volume_prep(self): + admin_ctx = context.get_admin_context() + # create volume's current host and the destination host + db.service_create(admin_ctx, + {'host': 'test', + 'topic': CONF.volume_topic, + 'created_at': timeutils.utcnow()}) + db.service_create(admin_ctx, + {'host': 'test2', + 'topic': CONF.volume_topic, + 'created_at': timeutils.utcnow()}) + # current status is available + volume = db.volume_create(admin_ctx, + {'status': 'available', + 'host': 'test', + 'provider_location': '', + 'attach_status': ''}) + return volume + + def _migrate_volume_exec(self, ctx, volume, host, expected_status, + force_host_copy=False): + admin_ctx = context.get_admin_context() + # build request to migrate to host + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + body = {'os-migrate_volume': {'host': host, + 'force_host_copy': force_host_copy}} + req.body = jsonutils.dumps(body) + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # verify status + self.assertEqual(resp.status_int, expected_status) + volume = db.volume_get(admin_ctx, volume['id']) + return volume + + def test_migrate_volume_success(self): + expected_status = 202 + host = 'test2' + ctx = context.RequestContext('admin', 'fake', True) + volume = self._migrate_volume_prep() + volume = self._migrate_volume_exec(ctx, volume, host, expected_status) + self.assertEqual(volume['migration_status'], 'starting') + + def test_migrate_volume_as_non_admin(self): + expected_status = 403 + host = 'test2' + ctx = context.RequestContext('fake', 'fake') + volume = self._migrate_volume_prep() + self._migrate_volume_exec(ctx, volume, host, expected_status) + + def test_migrate_volume_without_host_parameter(self): + expected_status = 400 + host = 'test3' + ctx = context.RequestContext('admin', 'fake', True) + volume = self._migrate_volume_prep() + # build request to migrate without host + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + body = {'os-migrate_volume': {'host': host, + 'force_host_copy': False}} + req.body = jsonutils.dumps(body) + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + # verify status + self.assertEqual(resp.status_int, expected_status) + + def test_migrate_volume_host_no_exist(self): + expected_status = 400 + host = 'test3' + ctx = context.RequestContext('admin', 'fake', True) + volume = self._migrate_volume_prep() + self._migrate_volume_exec(ctx, volume, host, expected_status) + + def test_migrate_volume_same_host(self): + expected_status = 400 + host = 'test' + ctx = context.RequestContext('admin', 'fake', True) + volume = self._migrate_volume_prep() + self._migrate_volume_exec(ctx, volume, host, expected_status) + + def test_migrate_volume_migrating(self): + expected_status = 400 + host = 'test2' + ctx = context.RequestContext('admin', 'fake', True) + volume = self._migrate_volume_prep() + model_update = {'migration_status': 'migrating'} + volume = db.volume_update(ctx, volume['id'], model_update) + self._migrate_volume_exec(ctx, volume, host, expected_status) + + def test_migrate_volume_with_snap(self): + expected_status = 400 + host = 'test2' + ctx = context.RequestContext('admin', 'fake', True) + volume = self._migrate_volume_prep() + db.snapshot_create(ctx, {'volume_id': volume['id']}) + self._migrate_volume_exec(ctx, volume, host, expected_status) + + def test_migrate_volume_bad_force_host_copy1(self): + expected_status = 400 + host = 'test2' + ctx = context.RequestContext('admin', 'fake', True) + volume = self._migrate_volume_prep() + self._migrate_volume_exec(ctx, volume, host, expected_status, + force_host_copy='foo') + + def test_migrate_volume_bad_force_host_copy2(self): + expected_status = 400 + host = 'test2' + ctx = context.RequestContext('admin', 'fake', True) + volume = self._migrate_volume_prep() + self._migrate_volume_exec(ctx, volume, host, expected_status, + force_host_copy=1) + + def _migrate_volume_comp_exec(self, ctx, volume, new_volume, error, + expected_status, expected_id, no_body=False): + req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) + req.method = 'POST' + req.headers['content-type'] = 'application/json' + body = {'new_volume': new_volume['id'], 'error': error} + if no_body: + req.body = jsonutils.dumps({'': body}) + else: + req.body = jsonutils.dumps({'os-migrate_volume_completion': body}) + req.environ['cinder.context'] = ctx + resp = req.get_response(app()) + resp_dict = ast.literal_eval(resp.body) + # verify status + self.assertEqual(resp.status_int, expected_status) + if expected_id: + self.assertEqual(resp_dict['save_volume_id'], expected_id) + else: + self.assertNotIn('save_volume_id', resp_dict) + + def test_migrate_volume_comp_as_non_admin(self): + admin_ctx = context.get_admin_context() + volume = db.volume_create(admin_ctx, {'id': 'fake1'}) + new_volume = db.volume_create(admin_ctx, {'id': 'fake2'}) + expected_status = 403 + expected_id = None + ctx = context.RequestContext('fake', 'fake') + volume = self._migrate_volume_comp_exec(ctx, volume, new_volume, False, + expected_status, expected_id) + + def test_migrate_volume_comp_no_mig_status(self): + admin_ctx = context.get_admin_context() + volume1 = db.volume_create(admin_ctx, {'id': 'fake1', + 'migration_status': 'foo'}) + volume2 = db.volume_create(admin_ctx, {'id': 'fake2', + 'migration_status': None}) + expected_status = 400 + expected_id = None + ctx = context.RequestContext('admin', 'fake', True) + volume = self._migrate_volume_comp_exec(ctx, volume1, volume2, False, + expected_status, expected_id) + volume = self._migrate_volume_comp_exec(ctx, volume2, volume1, False, + expected_status, expected_id) + + def test_migrate_volume_comp_bad_mig_status(self): + admin_ctx = context.get_admin_context() + volume1 = db.volume_create(admin_ctx, + {'id': 'fake1', + 'migration_status': 'migrating'}) + volume2 = db.volume_create(admin_ctx, + {'id': 'fake2', + 'migration_status': 'target:foo'}) + expected_status = 400 + expected_id = None + ctx = context.RequestContext('admin', 'fake', True) + volume = self._migrate_volume_comp_exec(ctx, volume1, volume2, False, + expected_status, expected_id) + + def test_migrate_volume_comp_no_action(self): + admin_ctx = context.get_admin_context() + volume = db.volume_create(admin_ctx, {'id': 'fake1'}) + new_volume = db.volume_create(admin_ctx, {'id': 'fake2'}) + expected_status = 400 + expected_id = None + ctx = context.RequestContext('fake', 'fake') + self._migrate_volume_comp_exec(ctx, volume, new_volume, False, + expected_status, expected_id, True) + + def test_migrate_volume_comp_from_nova(self): + admin_ctx = context.get_admin_context() + volume = db.volume_create(admin_ctx, + {'id': 'fake1', + 'status': 'in-use', + 'host': 'test', + 'migration_status': None, + 'attach_status': 'attached'}) + new_volume = db.volume_create(admin_ctx, + {'id': 'fake2', + 'status': 'available', + 'host': 'test', + 'migration_status': None, + 'attach_status': 'detached'}) + expected_status = 200 + expected_id = 'fake2' + ctx = context.RequestContext('admin', 'fake', True) + volume = self._migrate_volume_comp_exec(ctx, volume, new_volume, False, + expected_status, expected_id) diff --git a/cinder/tests/api/contrib/test_availability_zones.py b/cinder/tests/api/contrib/test_availability_zones.py new file mode 100644 index 0000000000..1c2bed8c36 --- /dev/null +++ b/cinder/tests/api/contrib/test_availability_zones.py @@ -0,0 +1,90 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree + +import cinder.api.contrib.availability_zones +import cinder.context +from cinder.openstack.common import timeutils +import cinder.test +import cinder.volume.api + + +created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099) +current_time = timeutils.utcnow() + + +def list_availability_zones(self): + return ( + {'name': 'ping', 'available': True}, + {'name': 'pong', 'available': False}, + ) + + +class FakeRequest(object): + environ = {'cinder.context': cinder.context.get_admin_context()} + GET = {} + + +class ControllerTestCase(cinder.test.TestCase): + + def setUp(self): + super(ControllerTestCase, self).setUp() + self.controller = cinder.api.contrib.availability_zones.Controller() + self.req = FakeRequest() + self.stubs.Set(cinder.volume.api.API, + 'list_availability_zones', + list_availability_zones) + + def test_list_hosts(self): + """Verify that the volume hosts are returned.""" + actual = self.controller.index(self.req) + expected = { + 'availabilityZoneInfo': [ + {'zoneName': 'ping', 'zoneState': {'available': True}}, + {'zoneName': 'pong', 'zoneState': {'available': False}}, + ], + } + self.assertEqual(expected, actual) + + +class XMLSerializerTest(cinder.test.TestCase): + + def test_index_xml(self): + fixture = { + 'availabilityZoneInfo': [ + {'zoneName': 'ping', 'zoneState': {'available': True}}, + {'zoneName': 'pong', 'zoneState': {'available': False}}, + ], + } + + serializer = cinder.api.contrib.availability_zones.ListTemplate() + text = serializer.serialize(fixture) + tree = etree.fromstring(text) + + self.assertEqual('availabilityZones', tree.tag) + self.assertEqual(2, len(tree)) + + self.assertEqual('availabilityZone', tree[0].tag) + + self.assertEqual('ping', tree[0].get('name')) + self.assertEqual('zoneState', tree[0][0].tag) + self.assertEqual('True', tree[0][0].get('available')) + + self.assertEqual('pong', tree[1].get('name')) + self.assertEqual('zoneState', tree[1][0].tag) + self.assertEqual('False', tree[1][0].get('available')) diff --git a/cinder/tests/api/contrib/test_backups.py b/cinder/tests/api/contrib/test_backups.py new file mode 100644 index 0000000000..a0474f3377 --- /dev/null +++ b/cinder/tests/api/contrib/test_backups.py @@ -0,0 +1,958 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for Backup code. +""" + +import json +from xml.dom import minidom + +import webob + +# needed for stubs to work +import cinder.backup +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import test +from cinder.tests.api import fakes +from cinder.tests import utils +# needed for stubs to work +import cinder.volume + + +LOG = logging.getLogger(__name__) + + +class BackupsAPITestCase(test.TestCase): + """Test Case for backups API.""" + + def setUp(self): + super(BackupsAPITestCase, self).setUp() + self.volume_api = cinder.volume.API() + self.backup_api = cinder.backup.API() + self.context = context.get_admin_context() + self.context.project_id = 'fake' + self.context.user_id = 'fake' + + def tearDown(self): + super(BackupsAPITestCase, self).tearDown() + + @staticmethod + def _create_backup(volume_id=1, + display_name='test_backup', + display_description='this is a test backup', + container='volumebackups', + status='creating', + size=0, object_count=0): + """Create a backup object.""" + backup = {} + backup['volume_id'] = volume_id + backup['user_id'] = 'fake' + backup['project_id'] = 'fake' + backup['host'] = 'testhost' + backup['availability_zone'] = 'az1' + backup['display_name'] = display_name + backup['display_description'] = display_description + backup['container'] = container + backup['status'] = status + backup['fail_reason'] = '' + backup['size'] = size + backup['object_count'] = object_count + return db.backup_create(context.get_admin_context(), backup)['id'] + + @staticmethod + def _get_backup_attrib(backup_id, attrib_name): + return db.backup_get(context.get_admin_context(), + backup_id)[attrib_name] + + @staticmethod + def _stub_service_get_all_by_topic(context, topic): + return [{'availability_zone': "fake_az", 'host': 'test_host', + 'disabled': 0, 'updated_at': timeutils.utcnow()}] + + def test_show_backup(self): + volume_id = utils.create_volume(self.context, size=5, + status='creating')['id'] + backup_id = self._create_backup(volume_id) + LOG.debug('Created backup with id %s' % backup_id) + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['backup']['availability_zone'], 'az1') + self.assertEqual(res_dict['backup']['container'], 'volumebackups') + self.assertEqual(res_dict['backup']['description'], + 'this is a test backup') + self.assertEqual(res_dict['backup']['name'], 'test_backup') + self.assertEqual(res_dict['backup']['id'], backup_id) + self.assertEqual(res_dict['backup']['object_count'], 0) + self.assertEqual(res_dict['backup']['size'], 0) + self.assertEqual(res_dict['backup']['status'], 'creating') + self.assertEqual(res_dict['backup']['volume_id'], volume_id) + + db.backup_destroy(context.get_admin_context(), backup_id) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_show_backup_xml_content_type(self): + volume_id = utils.create_volume(self.context, size=5, + status='creating')['id'] + backup_id = self._create_backup(volume_id) + req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + backup = dom.getElementsByTagName('backup') + name = backup.item(0).getAttribute('name') + container_name = backup.item(0).getAttribute('container') + self.assertEqual(container_name.strip(), "volumebackups") + self.assertEqual(name.strip(), "test_backup") + db.backup_destroy(context.get_admin_context(), backup_id) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_show_backup_with_backup_NotFound(self): + req = webob.Request.blank('/v2/fake/backups/9999') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Backup 9999 could not be found.') + + def test_list_backups_json(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['backups'][0]), 3) + self.assertEqual(res_dict['backups'][0]['id'], backup_id1) + self.assertEqual(res_dict['backups'][0]['name'], 'test_backup') + self.assertEqual(len(res_dict['backups'][1]), 3) + self.assertEqual(res_dict['backups'][1]['id'], backup_id2) + self.assertEqual(res_dict['backups'][1]['name'], 'test_backup') + self.assertEqual(len(res_dict['backups'][2]), 3) + self.assertEqual(res_dict['backups'][2]['id'], backup_id3) + self.assertEqual(res_dict['backups'][2]['name'], 'test_backup') + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_list_backups_xml(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups') + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + backup_list = dom.getElementsByTagName('backup') + + self.assertEqual(backup_list.item(0).attributes.length, 2) + self.assertEqual(backup_list.item(0).getAttribute('id'), + backup_id1) + self.assertEqual(backup_list.item(1).attributes.length, 2) + self.assertEqual(backup_list.item(1).getAttribute('id'), + backup_id2) + self.assertEqual(backup_list.item(2).attributes.length, 2) + self.assertEqual(backup_list.item(2).getAttribute('id'), + backup_id3) + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_list_backups_detail_json(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups/detail') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['backups'][0]), 12) + self.assertEqual(res_dict['backups'][0]['availability_zone'], 'az1') + self.assertEqual(res_dict['backups'][0]['container'], + 'volumebackups') + self.assertEqual(res_dict['backups'][0]['description'], + 'this is a test backup') + self.assertEqual(res_dict['backups'][0]['name'], + 'test_backup') + self.assertEqual(res_dict['backups'][0]['id'], backup_id1) + self.assertEqual(res_dict['backups'][0]['object_count'], 0) + self.assertEqual(res_dict['backups'][0]['size'], 0) + self.assertEqual(res_dict['backups'][0]['status'], 'creating') + self.assertEqual(res_dict['backups'][0]['volume_id'], '1') + + self.assertEqual(len(res_dict['backups'][1]), 12) + self.assertEqual(res_dict['backups'][1]['availability_zone'], 'az1') + self.assertEqual(res_dict['backups'][1]['container'], + 'volumebackups') + self.assertEqual(res_dict['backups'][1]['description'], + 'this is a test backup') + self.assertEqual(res_dict['backups'][1]['name'], + 'test_backup') + self.assertEqual(res_dict['backups'][1]['id'], backup_id2) + self.assertEqual(res_dict['backups'][1]['object_count'], 0) + self.assertEqual(res_dict['backups'][1]['size'], 0) + self.assertEqual(res_dict['backups'][1]['status'], 'creating') + self.assertEqual(res_dict['backups'][1]['volume_id'], '1') + + self.assertEqual(len(res_dict['backups'][2]), 12) + self.assertEqual(res_dict['backups'][2]['availability_zone'], 'az1') + self.assertEqual(res_dict['backups'][2]['container'], + 'volumebackups') + self.assertEqual(res_dict['backups'][2]['description'], + 'this is a test backup') + self.assertEqual(res_dict['backups'][2]['name'], + 'test_backup') + self.assertEqual(res_dict['backups'][2]['id'], backup_id3) + self.assertEqual(res_dict['backups'][2]['object_count'], 0) + self.assertEqual(res_dict['backups'][2]['size'], 0) + self.assertEqual(res_dict['backups'][2]['status'], 'creating') + self.assertEqual(res_dict['backups'][2]['volume_id'], '1') + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_list_backups_detail_xml(self): + backup_id1 = self._create_backup() + backup_id2 = self._create_backup() + backup_id3 = self._create_backup() + + req = webob.Request.blank('/v2/fake/backups/detail') + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + backup_detail = dom.getElementsByTagName('backup') + + self.assertEqual(backup_detail.item(0).attributes.length, 11) + self.assertEqual( + backup_detail.item(0).getAttribute('availability_zone'), 'az1') + self.assertEqual( + backup_detail.item(0).getAttribute('container'), 'volumebackups') + self.assertEqual( + backup_detail.item(0).getAttribute('description'), + 'this is a test backup') + self.assertEqual( + backup_detail.item(0).getAttribute('name'), 'test_backup') + self.assertEqual( + backup_detail.item(0).getAttribute('id'), backup_id1) + self.assertEqual( + int(backup_detail.item(0).getAttribute('object_count')), 0) + self.assertEqual( + int(backup_detail.item(0).getAttribute('size')), 0) + self.assertEqual( + backup_detail.item(0).getAttribute('status'), 'creating') + self.assertEqual( + int(backup_detail.item(0).getAttribute('volume_id')), 1) + + self.assertEqual(backup_detail.item(1).attributes.length, 11) + self.assertEqual( + backup_detail.item(1).getAttribute('availability_zone'), 'az1') + self.assertEqual( + backup_detail.item(1).getAttribute('container'), 'volumebackups') + self.assertEqual( + backup_detail.item(1).getAttribute('description'), + 'this is a test backup') + self.assertEqual( + backup_detail.item(1).getAttribute('name'), 'test_backup') + self.assertEqual( + backup_detail.item(1).getAttribute('id'), backup_id2) + self.assertEqual( + int(backup_detail.item(1).getAttribute('object_count')), 0) + self.assertEqual( + int(backup_detail.item(1).getAttribute('size')), 0) + self.assertEqual( + backup_detail.item(1).getAttribute('status'), 'creating') + self.assertEqual( + int(backup_detail.item(1).getAttribute('volume_id')), 1) + + self.assertEqual(backup_detail.item(2).attributes.length, 11) + self.assertEqual( + backup_detail.item(2).getAttribute('availability_zone'), 'az1') + self.assertEqual( + backup_detail.item(2).getAttribute('container'), 'volumebackups') + self.assertEqual( + backup_detail.item(2).getAttribute('description'), + 'this is a test backup') + self.assertEqual( + backup_detail.item(2).getAttribute('name'), 'test_backup') + self.assertEqual( + backup_detail.item(2).getAttribute('id'), backup_id3) + self.assertEqual( + int(backup_detail.item(2).getAttribute('object_count')), 0) + self.assertEqual( + int(backup_detail.item(2).getAttribute('size')), 0) + self.assertEqual( + backup_detail.item(2).getAttribute('status'), 'creating') + self.assertEqual( + int(backup_detail.item(2).getAttribute('volume_id')), 1) + + db.backup_destroy(context.get_admin_context(), backup_id3) + db.backup_destroy(context.get_admin_context(), backup_id2) + db.backup_destroy(context.get_admin_context(), backup_id1) + + def test_create_backup_json(self): + self.stubs.Set(cinder.db, 'service_get_all_by_topic', + self._stub_service_get_all_by_topic) + + volume_id = utils.create_volume(self.context, size=5)['id'] + + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "volume_id": volume_id, + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + + res_dict = json.loads(res.body) + LOG.info(res_dict) + + self.assertEqual(res.status_int, 202) + self.assertIn('id', res_dict['backup']) + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_create_backup_xml(self): + self.stubs.Set(cinder.db, 'service_get_all_by_topic', + self._stub_service_get_all_by_topic) + volume_id = utils.create_volume(self.context, size=2)['id'] + + req = webob.Request.blank('/v2/fake/backups') + req.body = ('' % volume_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + dom = minidom.parseString(res.body) + backup = dom.getElementsByTagName('backup') + self.assertTrue(backup.item(0).hasAttribute('id')) + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_create_backup_with_no_body(self): + # omit body from the request + req = webob.Request.blank('/v2/fake/backups') + req.body = json.dumps(None) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'The server could not comply with the request since' + ' it is either malformed or otherwise incorrect.') + + def test_create_backup_with_body_KeyError(self): + # omit volume_id from body + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Incorrect request body format') + + def test_create_backup_with_VolumeNotFound(self): + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "volume_id": 9999, + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Volume 9999 could not be found.') + + def test_create_backup_with_InvalidVolume(self): + # need to create the volume referenced below first + volume_id = utils.create_volume(self.context, size=5, + status='restoring')['id'] + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "volume_id": volume_id, + "container": "nightlybackups", + } + } + req = webob.Request.blank('/v2/fake/backups') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid volume: Volume to be backed up must' + ' be available') + + def test_create_backup_WithOUT_enabled_backup_service(self): + # need an enabled backup service available + def stub_empty_service_get_all_by_topic(ctxt, topic): + return [] + + self.stubs.Set(cinder.db, 'service_get_all_by_topic', + stub_empty_service_get_all_by_topic) + volume_id = utils.create_volume(self.context, size=2)['id'] + req = webob.Request.blank('/v2/fake/backups') + body = {"backup": {"display_name": "nightly001", + "display_description": + "Nightly Backup 03-Sep-2012", + "volume_id": volume_id, + "container": "nightlybackups", + } + } + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res.status_int, 500) + self.assertEqual(res_dict['computeFault']['code'], 500) + self.assertEqual(res_dict['computeFault']['message'], + 'Service cinder-backup could not be found.') + + volume = self.volume_api.get(context.get_admin_context(), volume_id) + self.assertEqual(volume['status'], 'available') + + def test_is_backup_service_enabled(self): + def empty_service(ctxt, topic): + return [] + + test_host = 'test_host' + alt_host = 'strange_host' + + #service host not match with volume's host + def host_not_match(context, topic): + return [{'availability_zone': "fake_az", 'host': alt_host, + 'disabled': 0, 'updated_at': timeutils.utcnow()}] + + #service az not match with volume's az + def az_not_match(context, topic): + return [{'availability_zone': "strange_az", 'host': test_host, + 'disabled': 0, 'updated_at': timeutils.utcnow()}] + + #service disabled + def disabled_service(context, topic): + return [{'availability_zone': "fake_az", 'host': test_host, + 'disabled': 1, 'updated_at': timeutils.utcnow()}] + + #dead service that last reported at 20th centry + def dead_service(context, topic): + return [{'availability_zone': "fake_az", 'host': alt_host, + 'disabled': 0, 'updated_at': '1989-04-16 02:55:44'}] + + #first service's host not match but second one works. + def multi_services(context, topic): + return [{'availability_zone': "fake_az", 'host': alt_host, + 'disabled': 0, 'updated_at': timeutils.utcnow()}, + {'availability_zone': "fake_az", 'host': test_host, + 'disabled': 0, 'updated_at': timeutils.utcnow()}] + + volume_id = utils.create_volume(self.context, size=2, + host=test_host)['id'] + volume = self.volume_api.get(context.get_admin_context(), volume_id) + + #test empty service + self.stubs.Set(cinder.db, 'service_get_all_by_topic', empty_service) + self.assertEqual(self.backup_api._is_backup_service_enabled(volume, + test_host), + False) + + #test host not match service + self.stubs.Set(cinder.db, 'service_get_all_by_topic', host_not_match) + self.assertEqual(self.backup_api._is_backup_service_enabled(volume, + test_host), + False) + + #test az not match service + self.stubs.Set(cinder.db, 'service_get_all_by_topic', az_not_match) + self.assertEqual(self.backup_api._is_backup_service_enabled(volume, + test_host), + False) + + #test disabled service + self.stubs.Set(cinder.db, 'service_get_all_by_topic', disabled_service) + self.assertEqual(self.backup_api._is_backup_service_enabled(volume, + test_host), + False) + + #test dead service + self.stubs.Set(cinder.db, 'service_get_all_by_topic', dead_service) + self.assertEqual(self.backup_api._is_backup_service_enabled(volume, + test_host), + False) + + #test multi services and the last service matches + self.stubs.Set(cinder.db, 'service_get_all_by_topic', multi_services) + self.assertEqual(self.backup_api._is_backup_service_enabled(volume, + test_host), + True) + + def test_delete_backup_available(self): + backup_id = self._create_backup(status='available') + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + self.assertEqual(self._get_backup_attrib(backup_id, 'status'), + 'deleting') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_delete_backup_error(self): + backup_id = self._create_backup(status='error') + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + self.assertEqual(self._get_backup_attrib(backup_id, 'status'), + 'deleting') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_delete_backup_with_backup_NotFound(self): + req = webob.Request.blank('/v2/fake/backups/9999') + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Backup 9999 could not be found.') + + def test_delete_backup_with_InvalidBackup(self): + backup_id = self._create_backup() + req = webob.Request.blank('/v2/fake/backups/%s' % + backup_id) + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid backup: Backup status must be ' + 'available or error') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_volume_id_specified_json(self): + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_id = utils.create_volume(self.context, size=5)['id'] + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 202) + self.assertEqual(res_dict['restore']['backup_id'], backup_id) + self.assertEqual(res_dict['restore']['volume_id'], volume_id) + + def test_restore_backup_volume_id_specified_xml(self): + backup_id = self._create_backup(status='available') + volume_id = utils.create_volume(self.context, size=2)['id'] + + req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) + req.body = '' % volume_id + req.method = 'POST' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + dom = minidom.parseString(res.body) + restore = dom.getElementsByTagName('restore') + self.assertEqual(restore.item(0).getAttribute('backup_id'), + backup_id) + self.assertEqual(restore.item(0).getAttribute('volume_id'), volume_id) + + db.backup_destroy(context.get_admin_context(), backup_id) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_restore_backup_with_no_body(self): + # omit body from the request + backup_id = self._create_backup(status='available') + + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.body = json.dumps(None) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Incorrect request body format') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_body_KeyError(self): + # omit restore from body + backup_id = self._create_backup(status='available') + + req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) + body = {"": {}} + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Incorrect request body format') + + def test_restore_backup_volume_id_unspecified(self): + + # intercept volume creation to ensure created volume + # has status of available + def fake_volume_api_create(cls, context, size, name, description): + volume_id = utils.create_volume(self.context, size=size)['id'] + return db.volume_get(context, volume_id) + + self.stubs.Set(cinder.volume.API, 'create', + fake_volume_api_create) + + backup_id = self._create_backup(size=5, status='available') + + body = {"restore": {}} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 202) + self.assertEqual(res_dict['restore']['backup_id'], backup_id) + + def test_restore_backup_with_InvalidInput(self): + + def fake_backup_api_restore_throwing_InvalidInput(cls, context, + backup_id, + volume_id): + msg = _("Invalid input") + raise exception.InvalidInput(reason=msg) + + self.stubs.Set(cinder.backup.API, 'restore', + fake_backup_api_restore_throwing_InvalidInput) + + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_id = utils.create_volume(self.context, size=0)['id'] + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid input received: Invalid input') + + def test_restore_backup_with_InvalidVolume(self): + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_id = utils.create_volume(self.context, size=5, + status='attaching')['id'] + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid volume: Volume to be restored to must ' + 'be available') + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_InvalidBackup(self): + backup_id = self._create_backup(status='restoring') + # need to create the volume referenced below first + volume_id = utils.create_volume(self.context, size=5)['id'] + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid backup: Backup status must be available') + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_BackupNotFound(self): + # need to create the volume referenced below first + volume_id = utils.create_volume(self.context, size=5)['id'] + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/9999/restore') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Backup 9999 could not be found.') + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_restore_backup_with_VolumeNotFound(self): + backup_id = self._create_backup(status='available') + + body = {"restore": {"volume_id": "9999", }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Volume 9999 could not be found.') + + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_with_VolumeSizeExceedsAvailableQuota(self): + + def fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota( + cls, context, backup_id, volume_id): + raise exception.VolumeSizeExceedsAvailableQuota(requested='2', + consumed='2', + quota='3') + + self.stubs.Set( + cinder.backup.API, + 'restore', + fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota) + + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_id = utils.create_volume(self.context, size=5)['id'] + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 413) + self.assertEqual(res_dict['overLimit']['code'], 413) + self.assertEqual(res_dict['overLimit']['message'], + 'Requested volume or snapshot exceeds allowed ' + 'Gigabytes quota. Requested 2G, quota is 3G and ' + '2G has been consumed.') + + def test_restore_backup_with_VolumeLimitExceeded(self): + + def fake_backup_api_restore_throwing_VolumeLimitExceeded(cls, + context, + backup_id, + volume_id): + raise exception.VolumeLimitExceeded(allowed=1) + + self.stubs.Set(cinder.backup.API, 'restore', + fake_backup_api_restore_throwing_VolumeLimitExceeded) + + backup_id = self._create_backup(status='available') + # need to create the volume referenced below first + volume_id = utils.create_volume(self.context, size=5)['id'] + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 413) + self.assertEqual(res_dict['overLimit']['code'], 413) + self.assertEqual(res_dict['overLimit']['message'], + 'Maximum number of volumes allowed (1) exceeded') + + def test_restore_backup_to_undersized_volume(self): + backup_size = 10 + backup_id = self._create_backup(status='available', size=backup_size) + # need to create the volume referenced below first + volume_size = 5 + volume_id = utils.create_volume(self.context, size=volume_size)['id'] + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid volume: volume size %d is too ' + 'small to restore backup of size %d.' + % (volume_size, backup_size)) + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) + + def test_restore_backup_to_oversized_volume(self): + backup_id = self._create_backup(status='available', size=10) + # need to create the volume referenced below first + volume_id = utils.create_volume(self.context, size=15)['id'] + + body = {"restore": {"volume_id": volume_id, }} + req = webob.Request.blank('/v2/fake/backups/%s/restore' % + backup_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 202) + self.assertEqual(res_dict['restore']['backup_id'], backup_id) + self.assertEqual(res_dict['restore']['volume_id'], volume_id) + + db.volume_destroy(context.get_admin_context(), volume_id) + db.backup_destroy(context.get_admin_context(), backup_id) diff --git a/cinder/tests/api/openstack/volume/contrib/test_extended_snapshot_attributes.py b/cinder/tests/api/contrib/test_extended_snapshot_attributes.py similarity index 69% rename from cinder/tests/api/openstack/volume/contrib/test_extended_snapshot_attributes.py rename to cinder/tests/api/contrib/test_extended_snapshot_attributes.py index 43490fbc19..e90c291634 100644 --- a/cinder/tests/api/openstack/volume/contrib/test_extended_snapshot_attributes.py +++ b/cinder/tests/api/contrib/test_extended_snapshot_attributes.py @@ -1,4 +1,4 @@ -# Copyright 2012 OpenStack LLC. +# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,37 +13,31 @@ # License for the specific language governing permissions and limitations # under the License. + from lxml import etree import webob -from cinder.api.openstack.volume.contrib import extended_snapshot_attributes -from cinder import exception -from cinder import flags +from cinder.api.contrib import extended_snapshot_attributes from cinder.openstack.common import jsonutils from cinder import test -from cinder.tests.api.openstack import fakes +from cinder.tests.api import fakes from cinder import volume -FLAGS = flags.FLAGS - - UUID1 = '00000000-0000-0000-0000-000000000001' UUID2 = '00000000-0000-0000-0000-000000000002' def _get_default_snapshot_param(): - return { - 'id': UUID1, - 'volume_id': 12, - 'status': 'available', - 'volume_size': 100, - 'created_at': None, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'project_id': 'fake', - 'progress': '0%' - } + return {'id': UUID1, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', + 'project_id': 'fake', + 'progress': '0%'} def fake_snapshot_get(self, context, snapshot_id): @@ -80,38 +74,27 @@ def _get_snapshots(self, body): def assertSnapshotAttributes(self, snapshot, project_id, progress): self.assertEqual(snapshot.get('%sproject_id' % self.prefix), - project_id) + project_id) self.assertEqual(snapshot.get('%sprogress' % self.prefix), progress) def test_show(self): - url = '/v1/fake/snapshots/%s' % UUID2 + url = '/v2/fake/snapshots/%s' % UUID2 res = self._make_request(url) self.assertEqual(res.status_int, 200) self.assertSnapshotAttributes(self._get_snapshot(res.body), - project_id='fake', - progress='0%') + project_id='fake', + progress='0%') def test_detail(self): - url = '/v1/fake/snapshots/detail' + url = '/v2/fake/snapshots/detail' res = self._make_request(url) self.assertEqual(res.status_int, 200) for i, snapshot in enumerate(self._get_snapshots(res.body)): self.assertSnapshotAttributes(snapshot, - project_id='fake', - progress='0%') - - def test_no_instance_passthrough_404(self): - - def fake_snapshot_get(*args, **kwargs): - raise exception.InstanceNotFound() - - self.stubs.Set(volume.api.API, 'get_snapshot', fake_snapshot_get) - url = '/v1/fake/snapshots/70f6db34-de8d-4fbd-aafb-4065bdfa6115' - res = self._make_request(url) - - self.assertEqual(res.status_int, 404) + project_id='fake', + progress='0%') class ExtendedSnapshotAttributesXmlTest(ExtendedSnapshotAttributesTest): diff --git a/cinder/tests/api/contrib/test_hosts.py b/cinder/tests/api/contrib/test_hosts.py new file mode 100644 index 0000000000..7f8c542b59 --- /dev/null +++ b/cinder/tests/api/contrib/test_hosts.py @@ -0,0 +1,198 @@ +# Copyright (c) 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree +import webob.exc + +from cinder.api.contrib import hosts as os_hosts +from cinder import context +from cinder import db +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import test + + +LOG = logging.getLogger(__name__) +created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099) +curr_time = timeutils.utcnow() + +SERVICE_LIST = [ + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}, + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}, + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}, + {'created_at': created_time, 'updated_at': curr_time, + 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, + 'availability_zone': 'cinder'}] + +LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}, + {'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}, + {'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}, + {'service-status': 'available', 'service': 'cinder-volume', + 'zone': 'cinder', 'service-state': 'enabled', + 'host_name': 'test.host.1', 'last-update': curr_time}] + + +def stub_service_get_all(self, req): + return SERVICE_LIST + + +class FakeRequest(object): + environ = {'cinder.context': context.get_admin_context()} + GET = {} + + +class FakeRequestWithcinderZone(object): + environ = {'cinder.context': context.get_admin_context()} + GET = {'zone': 'cinder'} + + +class HostTestCase(test.TestCase): + """Test Case for hosts.""" + + def setUp(self): + super(HostTestCase, self).setUp() + self.controller = os_hosts.HostController() + self.req = FakeRequest() + self.stubs.Set(db, 'service_get_all', + stub_service_get_all) + + def _test_host_update(self, host, key, val, expected_value): + body = {key: val} + result = self.controller.update(self.req, host, body=body) + self.assertEqual(result[key], expected_value) + + def test_list_hosts(self): + """Verify that the volume hosts are returned.""" + hosts = os_hosts._list_hosts(self.req) + self.assertEqual(hosts, LIST_RESPONSE) + + cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume') + expected = [host for host in LIST_RESPONSE + if host['service'] == 'cinder-volume'] + self.assertEqual(cinder_hosts, expected) + + def test_list_hosts_with_zone(self): + req = FakeRequestWithcinderZone() + hosts = os_hosts._list_hosts(req) + self.assertEqual(hosts, LIST_RESPONSE) + + def test_bad_status_value(self): + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'test.host.1', body={'status': 'bad'}) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, + self.req, + 'test.host.1', + body={'status': 'disablabc'}) + + def test_bad_update_key(self): + bad_body = {'crazy': 'bad'} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'test.host.1', body=bad_body) + + def test_bad_update_key_and_correct_udpate_key(self): + bad_body = {'status': 'disable', 'crazy': 'bad'} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'test.host.1', body=bad_body) + + def test_good_udpate_keys(self): + body = {'status': 'disable'} + self.assertRaises(NotImplementedError, self.controller.update, + self.req, 'test.host.1', body=body) + + def test_bad_host(self): + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, + self.req, + 'bogus_host_name', + body={'disabled': 0}) + + def test_show_forbidden(self): + self.req.environ['cinder.context'].is_admin = False + dest = 'dummydest' + self.assertRaises(webob.exc.HTTPForbidden, + self.controller.show, + self.req, dest) + self.req.environ['cinder.context'].is_admin = True + + def test_show_host_not_exist(self): + """A host given as an argument does not exists.""" + self.req.environ['cinder.context'].is_admin = True + dest = 'dummydest' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + self.req, dest) + + +class HostSerializerTest(test.TestCase): + def setUp(self): + super(HostSerializerTest, self).setUp() + self.deserializer = os_hosts.HostDeserializer() + + def test_index_serializer(self): + serializer = os_hosts.HostIndexTemplate() + text = serializer.serialize({"hosts": LIST_RESPONSE}) + + tree = etree.fromstring(text) + + self.assertEqual('hosts', tree.tag) + self.assertEqual(len(LIST_RESPONSE), len(tree)) + for i in range(len(LIST_RESPONSE)): + self.assertEqual('host', tree[i].tag) + self.assertEqual(LIST_RESPONSE[i]['service-status'], + tree[i].get('service-status')) + self.assertEqual(LIST_RESPONSE[i]['service'], + tree[i].get('service')) + self.assertEqual(LIST_RESPONSE[i]['zone'], + tree[i].get('zone')) + self.assertEqual(LIST_RESPONSE[i]['service-state'], + tree[i].get('service-state')) + self.assertEqual(LIST_RESPONSE[i]['host_name'], + tree[i].get('host_name')) + self.assertEqual(str(LIST_RESPONSE[i]['last-update']), + tree[i].get('last-update')) + + def test_update_serializer_with_status(self): + exemplar = dict(host='test.host.1', status='enabled') + serializer = os_hosts.HostUpdateTemplate() + text = serializer.serialize(exemplar) + + tree = etree.fromstring(text) + + self.assertEqual('host', tree.tag) + for key, value in exemplar.items(): + self.assertEqual(value, tree.get(key)) + + def test_update_deserializer(self): + exemplar = dict(status='enabled', foo='bar') + intext = ("\n" + 'enabledbar') + result = self.deserializer.deserialize(intext) + + self.assertEqual(dict(body=exemplar), result) diff --git a/cinder/tests/api/contrib/test_qos_specs_manage.py b/cinder/tests/api/contrib/test_qos_specs_manage.py new file mode 100644 index 0000000000..0c3afd7f2c --- /dev/null +++ b/cinder/tests/api/contrib/test_qos_specs_manage.py @@ -0,0 +1,602 @@ +# Copyright 2013 eBay Inc. +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from xml.dom import minidom + +import webob + +from cinder.api.contrib import qos_specs_manage +from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common.notifier import test_notifier +from cinder import test +from cinder.tests.api import fakes +from cinder.volume import qos_specs + + +def stub_qos_specs(id): + res = dict(name='qos_specs_' + str(id)) + res.update(dict(consumer='back-end')) + res.update(dict(id=str(id))) + specs = {"key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + res.update(dict(specs=specs)) + return res + + +def stub_qos_associates(id): + return [{ + 'association_type': 'volume_type', + 'name': 'FakeVolTypeName', + 'id': 'FakeVolTypeID'}] + + +def return_qos_specs_get_all(context): + return [ + stub_qos_specs(1), + stub_qos_specs(2), + stub_qos_specs(3), + ] + + +def return_qos_specs_get_qos_specs(context, id): + if id == "777": + raise exception.QoSSpecsNotFound(specs_id=id) + return stub_qos_specs(int(id)) + + +def return_qos_specs_delete(context, id, force): + if id == "777": + raise exception.QoSSpecsNotFound(specs_id=id) + elif id == "666": + raise exception.QoSSpecsInUse(specs_id=id) + pass + + +def return_qos_specs_delete_keys(context, id, keys): + if id == "777": + raise exception.QoSSpecsNotFound(specs_id=id) + + if 'foo' in keys: + raise exception.QoSSpecsKeyNotFound(specs_id=id, + specs_key='foo') + + +def return_qos_specs_update(context, id, specs): + if id == "777": + raise exception.QoSSpecsNotFound(specs_id=id) + elif id == "888": + raise exception.InvalidQoSSpecs(reason=str(id)) + elif id == "999": + raise exception.QoSSpecsUpdateFailed(specs_id=id, + qos_specs=specs) + pass + + +def return_qos_specs_create(context, name, specs): + if name == "666": + raise exception.QoSSpecsExists(specs_id=name) + elif name == "555": + raise exception.QoSSpecsCreateFailed(name=id, qos_specs=specs) + pass + + +def return_qos_specs_get_by_name(context, name): + if name == "777": + raise exception.QoSSpecsNotFound(specs_id=name) + + return stub_qos_specs(int(name.split("_")[2])) + + +def return_get_qos_associations(context, id): + if id == "111": + raise exception.QoSSpecsNotFound(specs_id=id) + elif id == "222": + raise exception.CinderException() + + return stub_qos_associates(id) + + +def return_associate_qos_specs(context, id, type_id): + if id == "111": + raise exception.QoSSpecsNotFound(specs_id=id) + elif id == "222": + raise exception.QoSSpecsAssociateFailed(specs_id=id, + type_id=type_id) + elif id == "333": + raise exception.QoSSpecsDisassociateFailed(specs_id=id, + type_id=type_id) + + if type_id == "1234": + raise exception.VolumeTypeNotFound( + volume_type_id=type_id) + + pass + + +def return_disassociate_all(context, id): + if id == "111": + raise exception.QoSSpecsNotFound(specs_id=id) + elif id == "222": + raise exception.QoSSpecsDisassociateFailed(specs_id=id, + type_id=None) + + +class QoSSpecManageApiTest(test.TestCase): + def setUp(self): + super(QoSSpecManageApiTest, self).setUp() + self.flags(host='fake', + notification_driver=[test_notifier.__name__]) + self.controller = qos_specs_manage.QoSSpecsController() + #reset notifier drivers left over from other api/contrib tests + notifier_api._reset_drivers() + test_notifier.NOTIFICATIONS = [] + + def tearDown(self): + notifier_api._reset_drivers() + super(QoSSpecManageApiTest, self).tearDown() + + def test_index(self): + self.stubs.Set(qos_specs, 'get_all_specs', + return_qos_specs_get_all) + + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') + res = self.controller.index(req) + + self.assertEqual(3, len(res['qos_specs'])) + + names = set() + for item in res['qos_specs']: + self.assertEqual('value1', item['specs']['key1']) + names.add(item['name']) + expected_names = ['qos_specs_1', 'qos_specs_2', 'qos_specs_3'] + self.assertEqual(names, set(expected_names)) + + def test_index_xml_response(self): + self.stubs.Set(qos_specs, 'get_all_specs', + return_qos_specs_get_all) + + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') + res = self.controller.index(req) + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + qos_specs_response = dom.getElementsByTagName('qos_spec') + + names = set() + for qos_spec in qos_specs_response: + name = qos_spec.getAttribute('name') + names.add(name) + + expected_names = ['qos_specs_1', 'qos_specs_2', 'qos_specs_3'] + self.assertEqual(names, set(expected_names)) + + def test_qos_specs_delete(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'delete', + return_qos_specs_delete) + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.controller.delete(req, 1) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_qos_specs_delete_not_found(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'delete', + return_qos_specs_delete) + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, + req, '777') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_qos_specs_delete_inuse(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'delete', + return_qos_specs_delete) + + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, + req, '666') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_qos_specs_delete_inuse_force(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'delete', + return_qos_specs_delete) + + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666?force=True') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.assertRaises(webob.exc.HTTPInternalServerError, + self.controller.delete, + req, '666') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_qos_specs_delete_keys(self): + self.stubs.Set(qos_specs, 'delete_keys', + return_qos_specs_delete_keys) + body = {"keys": ['bar', 'zoo']} + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666/delete_keys') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.controller.delete_keys(req, '666', body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_qos_specs_delete_keys_qos_notfound(self): + self.stubs.Set(qos_specs, 'delete_keys', + return_qos_specs_delete_keys) + body = {"keys": ['bar', 'zoo']} + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777/delete_keys') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete_keys, + req, '777', body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_qos_specs_delete_keys_badkey(self): + self.stubs.Set(qos_specs, 'delete_keys', + return_qos_specs_delete_keys) + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666/delete_keys') + body = {"keys": ['foo', 'zoo']} + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.delete_keys, + req, '666', body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_create(self): + self.stubs.Set(qos_specs, 'create', + return_qos_specs_create) + self.stubs.Set(qos_specs, 'get_qos_specs_by_name', + return_qos_specs_get_by_name) + + body = {"qos_specs": {"name": "qos_specs_1", + "key1": "value1"}} + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + res_dict = self.controller.create(req, body) + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + self.assertEqual('qos_specs_1', res_dict['qos_specs']['name']) + + def test_create_conflict(self): + self.stubs.Set(qos_specs, 'create', + return_qos_specs_create) + self.stubs.Set(qos_specs, 'get_qos_specs_by_name', + return_qos_specs_get_by_name) + + body = {"qos_specs": {"name": "666", + "key1": "value1"}} + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.assertRaises(webob.exc.HTTPConflict, + self.controller.create, req, body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_create_failed(self): + self.stubs.Set(qos_specs, 'create', + return_qos_specs_create) + self.stubs.Set(qos_specs, 'get_qos_specs_by_name', + return_qos_specs_get_by_name) + + body = {"qos_specs": {"name": "555", + "key1": "value1"}} + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.assertRaises(webob.exc.HTTPInternalServerError, + self.controller.create, req, body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def _create_qos_specs_bad_body(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs') + req.method = 'POST' + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_create_no_body(self): + self._create_qos_specs_bad_body(body=None) + + def test_create_missing_specs_name(self): + body = {'foo': {'a': 'b'}} + self._create_qos_specs_bad_body(body=body) + + def test_create_malformed_entity(self): + body = {'qos_specs': 'string'} + self._create_qos_specs_bad_body(body=body) + + def test_update(self): + self.stubs.Set(qos_specs, 'update', + return_qos_specs_update) + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/555') + body = {'qos_specs': {'key1': 'value1', + 'key2': 'value2'}} + res = self.controller.update(req, '555', body) + self.assertDictMatch(res, body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_update_not_found(self): + self.stubs.Set(qos_specs, 'update', + return_qos_specs_update) + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777') + body = {'qos_specs': {'key1': 'value1', + 'key2': 'value2'}} + self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, + req, '777', body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_update_invalid_input(self): + self.stubs.Set(qos_specs, 'update', + return_qos_specs_update) + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/888') + body = {'qos_specs': {'key1': 'value1', + 'key2': 'value2'}} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + req, '888', body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_update_failed(self): + self.stubs.Set(qos_specs, 'update', + return_qos_specs_update) + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/999') + body = {'qos_specs': {'key1': 'value1', + 'key2': 'value2'}} + self.assertRaises(webob.exc.HTTPInternalServerError, + self.controller.update, + req, '999', body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_show(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1') + res_dict = self.controller.show(req, '1') + + self.assertEqual('1', res_dict['qos_specs']['id']) + self.assertEqual('qos_specs_1', res_dict['qos_specs']['name']) + + def test_show_xml_response(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1') + res = self.controller.show(req, '1') + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + qos_spec_response = dom.getElementsByTagName('qos_spec') + qos_spec = qos_spec_response.item(0) + + id = qos_spec.getAttribute('id') + name = qos_spec.getAttribute('name') + consumer = qos_spec.getAttribute('consumer') + + self.assertEqual(id, u'1') + self.assertEqual(name, 'qos_specs_1') + self.assertEqual(consumer, 'back-end') + + def test_get_associations(self): + self.stubs.Set(qos_specs, 'get_associations', + return_get_qos_associations) + + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/1/associations') + res = self.controller.associations(req, '1') + + self.assertEqual('FakeVolTypeName', + res['qos_associations'][0]['name']) + self.assertEqual('FakeVolTypeID', + res['qos_associations'][0]['id']) + + def test_get_associations_xml_response(self): + self.stubs.Set(qos_specs, 'get_associations', + return_get_qos_associations) + + req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1/associations') + res = self.controller.associations(req, '1') + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + associations_response = dom.getElementsByTagName('associations') + association = associations_response.item(0) + + id = association.getAttribute('id') + name = association.getAttribute('name') + association_type = association.getAttribute('association_type') + + self.assertEqual(id, 'FakeVolTypeID') + self.assertEqual(name, 'FakeVolTypeName') + self.assertEqual(association_type, 'volume_type') + + def test_get_associations_not_found(self): + self.stubs.Set(qos_specs, 'get_associations', + return_get_qos_associations) + + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/111/associations') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.associations, + req, '111') + + def test_get_associations_failed(self): + self.stubs.Set(qos_specs, 'get_associations', + return_get_qos_associations) + + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/222/associations') + self.assertRaises(webob.exc.HTTPInternalServerError, + self.controller.associations, + req, '222') + + def test_associate(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'associate_qos_with_type', + return_associate_qos_specs) + + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/1/associate?vol_type_id=111') + res = self.controller.associate(req, '1') + + self.assertEqual(res.status_int, 202) + + def test_associate_no_type(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'associate_qos_with_type', + return_associate_qos_specs) + + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/1/associate') + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.associate, req, '1') + + def test_associate_not_found(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'associate_qos_with_type', + return_associate_qos_specs) + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/111/associate?vol_type_id=12') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.associate, req, '111') + + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/1/associate?vol_type_id=1234') + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.associate, req, '1') + + def test_associate_fail(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'associate_qos_with_type', + return_associate_qos_specs) + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/222/associate?vol_type_id=1000') + self.assertRaises(webob.exc.HTTPInternalServerError, + self.controller.associate, req, '222') + + def test_disassociate(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'disassociate_qos_specs', + return_associate_qos_specs) + + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/1/disassociate?vol_type_id=111') + res = self.controller.disassociate(req, '1') + self.assertEqual(res.status_int, 202) + + def test_disassociate_no_type(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'disassociate_qos_specs', + return_associate_qos_specs) + + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/1/disassociate') + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.disassociate, req, '1') + + def test_disassociate_not_found(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'disassociate_qos_specs', + return_associate_qos_specs) + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/111/disassociate?vol_type_id=12') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.disassociate, req, '111') + + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/1/disassociate?vol_type_id=1234') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.disassociate, req, '1') + + def test_disassociate_failed(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'disassociate_qos_specs', + return_associate_qos_specs) + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/333/disassociate?vol_type_id=1000') + self.assertRaises(webob.exc.HTTPInternalServerError, + self.controller.disassociate, req, '333') + + def test_disassociate_all(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'disassociate_all', + return_disassociate_all) + + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/1/disassociate_all') + res = self.controller.disassociate_all(req, '1') + self.assertEqual(res.status_int, 202) + + def test_disassociate_all_not_found(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'disassociate_all', + return_disassociate_all) + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/111/disassociate_all') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.disassociate_all, req, '111') + + def test_disassociate_all_failed(self): + self.stubs.Set(qos_specs, 'get_qos_specs', + return_qos_specs_get_qos_specs) + self.stubs.Set(qos_specs, 'disassociate_all', + return_disassociate_all) + req = fakes.HTTPRequest.blank( + '/v2/fake/qos-specs/222/disassociate_all') + self.assertRaises(webob.exc.HTTPInternalServerError, + self.controller.disassociate_all, req, '222') diff --git a/cinder/tests/api/contrib/test_quotas.py b/cinder/tests/api/contrib/test_quotas.py new file mode 100644 index 0000000000..2b44ebe1f4 --- /dev/null +++ b/cinder/tests/api/contrib/test_quotas.py @@ -0,0 +1,131 @@ +# +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for cinder.api.contrib.quotas.py +""" + +from lxml import etree +import webob.exc + + +from cinder.api.contrib import quotas +from cinder import context +from cinder import db +from cinder import test + + +def make_body(root=True, gigabytes=1000, snapshots=10, + volumes=10, tenant_id='foo'): + resources = {'gigabytes': gigabytes, + 'snapshots': snapshots, + 'volumes': volumes} + # need to consider preexisting volume types as well + volume_types = db.volume_type_get_all(context.get_admin_context()) + for volume_type in volume_types: + resources['gigabytes_' + volume_type] = -1 + resources['snapshots_' + volume_type] = -1 + resources['volumes_' + volume_type] = -1 + + if tenant_id: + resources['id'] = tenant_id + if root: + result = {'quota_set': resources} + else: + result = resources + return result + + +class QuotaSetsControllerTest(test.TestCase): + + def setUp(self): + super(QuotaSetsControllerTest, self).setUp() + self.controller = quotas.QuotaSetsController() + + self.req = self.mox.CreateMockAnything() + self.req.environ = {'cinder.context': context.get_admin_context()} + self.req.environ['cinder.context'].is_admin = True + + def test_defaults(self): + result = self.controller.defaults(self.req, 'foo') + self.assertDictMatch(result, make_body()) + + def test_show(self): + result = self.controller.show(self.req, 'foo') + self.assertDictMatch(result, make_body()) + + def test_show_not_authorized(self): + self.req.environ['cinder.context'].is_admin = False + self.req.environ['cinder.context'].user_id = 'bad_user' + self.req.environ['cinder.context'].project_id = 'bad_project' + self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, + self.req, 'foo') + + def test_update(self): + body = make_body(gigabytes=2000, snapshots=15, + volumes=5, tenant_id=None) + result = self.controller.update(self.req, 'foo', body) + self.assertDictMatch(result, body) + + def test_update_wrong_key(self): + body = {'quota_set': {'bad': 'bad'}} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'foo', body) + + def test_update_invalid_key_value(self): + body = {'quota_set': {'gigabytes': "should_be_int"}} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'foo', body) + + def test_update_bad_quota_limit(self): + body = {'quota_set': {'gigabytes': -1000}} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'foo', body) + + def test_update_no_admin(self): + self.req.environ['cinder.context'].is_admin = False + self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, + self.req, 'foo', make_body(tenant_id=None)) + + def test_update_without_quota_set_field(self): + body = {'fake_quota_set': {'gigabytes': 100}} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'foo', body) + + def test_update_empty_body(self): + body = {} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'foo', body) + + +class QuotaSerializerTest(test.TestCase): + + def setUp(self): + super(QuotaSerializerTest, self).setUp() + self.req = self.mox.CreateMockAnything() + self.req.environ = {'cinder.context': context.get_admin_context()} + + def test_update_serializer(self): + serializer = quotas.QuotaTemplate() + quota_set = make_body(root=False) + text = serializer.serialize({'quota_set': quota_set}) + tree = etree.fromstring(text) + self.assertEqual(tree.tag, 'quota_set') + self.assertEqual(tree.get('id'), quota_set['id']) + body = make_body(root=False, tenant_id=None) + for node in tree: + self.assertIn(node.tag, body) + self.assertEqual(str(body[node.tag]), node.text) diff --git a/cinder/tests/api/contrib/test_quotas_classes.py b/cinder/tests/api/contrib/test_quotas_classes.py new file mode 100644 index 0000000000..80cb26dde1 --- /dev/null +++ b/cinder/tests/api/contrib/test_quotas_classes.py @@ -0,0 +1,154 @@ +# Copyright 2013 Huawei Technologies Co., Ltd +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for cinder.api.contrib.quota_classes.py +""" + +from lxml import etree +import webob.exc + + +from cinder.api.contrib import quota_classes +from cinder import context +from cinder import quota +from cinder import test +from cinder.volume import volume_types + + +QUOTAS = quota.QUOTAS + + +def make_body(root=True, gigabytes=1000, snapshots=10, + volumes=10, volume_types_faked=None, + tenant_id='foo'): + resources = {'gigabytes': gigabytes, + 'snapshots': snapshots, + 'volumes': volumes} + if not volume_types_faked: + volume_types_faked = {'fake_type': None} + for volume_type in volume_types_faked: + resources['gigabytes_' + volume_type] = -1 + resources['snapshots_' + volume_type] = -1 + resources['volumes_' + volume_type] = -1 + + if tenant_id: + resources['id'] = tenant_id + if root: + result = {'quota_class_set': resources} + else: + result = resources + return result + + +def make_response_body(root=True, ctxt=None, quota_class='foo', + request_body=None, tenant_id='foo'): + resources = {} + if not ctxt: + ctxt = context.get_admin_context() + resources.update(QUOTAS.get_class_quotas(ctxt, quota_class)) + if not request_body and not request_body['quota_class_set']: + resources.update(request_body['quota_class_set']) + + if tenant_id: + resources['id'] = tenant_id + if root: + result = {'quota_class_set': resources} + else: + result = resources + return result + + +class QuotaClassSetsControllerTest(test.TestCase): + + def setUp(self): + super(QuotaClassSetsControllerTest, self).setUp() + self.controller = quota_classes.QuotaClassSetsController() + + self.ctxt = context.get_admin_context() + self.req = self.mox.CreateMockAnything() + self.req.environ = {'cinder.context': self.ctxt} + self.req.environ['cinder.context'].is_admin = True + + def test_show(self): + volume_types.create(self.ctxt, 'fake_type') + result = self.controller.show(self.req, 'foo') + self.assertDictMatch(result, make_body()) + + def test_show_not_authorized(self): + self.req.environ['cinder.context'].is_admin = False + self.req.environ['cinder.context'].user_id = 'bad_user' + self.req.environ['cinder.context'].project_id = 'bad_project' + self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, + self.req, 'foo') + + def test_update(self): + volume_types.create(self.ctxt, 'fake_type') + body = make_body(gigabytes=2000, snapshots=15, + volumes=5, tenant_id=None) + result = self.controller.update(self.req, 'foo', body) + self.assertDictMatch(result, body) + + def test_update_wrong_key(self): + volume_types.create(self.ctxt, 'fake_type') + body = {'quota_class_set': {'bad': 'bad'}} + result = self.controller.update(self.req, 'foo', body) + self.assertDictMatch(result, make_body(tenant_id=None)) + + def test_update_invalid_key_value(self): + body = {'quota_class_set': {'gigabytes': "should_be_int"}} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'foo', body) + + def test_update_bad_quota_limit(self): + body = {'quota_class_set': {'gigabytes': -1000}} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, 'foo', body) + + def test_update_no_admin(self): + self.req.environ['cinder.context'].is_admin = False + self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, + self.req, 'foo', make_body(tenant_id=None)) + + def test_update_with_more_volume_types(self): + volume_types.create(self.ctxt, 'fake_type_1') + volume_types.create(self.ctxt, 'fake_type_2') + body = {'quota_class_set': {'gigabytes_fake_type_1': 1111, + 'volumes_fake_type_2': 2222}} + result = self.controller.update(self.req, 'foo', body) + self.assertDictMatch(result, make_response_body(ctxt=self.ctxt, + quota_class='foo', + request_body=body, + tenant_id=None)) + + +class QuotaClassesSerializerTest(test.TestCase): + + def setUp(self): + super(QuotaClassesSerializerTest, self).setUp() + self.req = self.mox.CreateMockAnything() + self.req.environ = {'cinder.context': context.get_admin_context()} + + def test_update_serializer(self): + serializer = quota_classes.QuotaClassTemplate() + quota_class_set = make_body(root=False) + text = serializer.serialize({'quota_class_set': quota_class_set}) + tree = etree.fromstring(text) + self.assertEqual(tree.tag, 'quota_class_set') + self.assertEqual(tree.get('id'), quota_class_set['id']) + body = make_body(root=False, tenant_id=None) + for node in tree: + self.assertIn(node.tag, body) + self.assertEqual(str(body[node.tag]), node.text) diff --git a/cinder/tests/api/contrib/test_scheduler_hints.py b/cinder/tests/api/contrib/test_scheduler_hints.py new file mode 100644 index 0000000000..5f8728ba9f --- /dev/null +++ b/cinder/tests/api/contrib/test_scheduler_hints.py @@ -0,0 +1,96 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +import cinder +from cinder.api.openstack import wsgi +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs + +UUID = fakes.FAKE_UUID + + +class SchedulerHintsTestCase(test.TestCase): + + def setUp(self): + super(SchedulerHintsTestCase, self).setUp() + self.fake_instance = stubs.stub_volume(1, uuid=UUID) + self.fake_instance['created_at'] =\ + datetime.datetime(2013, 1, 1, 1, 1, 1) + self.flags( + osapi_volume_extension=[ + 'cinder.api.contrib.select_extensions'], + osapi_volume_ext_list=['Scheduler_hints']) + self.app = fakes.wsgi_app() + + def test_create_server_without_hints(self): + + @wsgi.response(202) + def fake_create(*args, **kwargs): + self.assertNotIn('scheduler_hints', kwargs['body']) + return self.fake_instance + + self.stubs.Set(cinder.api.v2.volumes.VolumeController, 'create', + fake_create) + + req = fakes.HTTPRequest.blank('/v2/fake/volumes') + req.method = 'POST' + req.content_type = 'application/json' + body = {'id': id, + 'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175', + 'volume_id': '1', } + req.body = jsonutils.dumps(body) + res = req.get_response(self.app) + self.assertEqual(202, res.status_int) + + def test_create_server_with_hints(self): + + @wsgi.response(202) + def fake_create(*args, **kwargs): + self.assertIn('scheduler_hints', kwargs['body']) + self.assertEqual(kwargs['body']['scheduler_hints'], {"a": "b"}) + return self.fake_instance + + self.stubs.Set(cinder.api.v2.volumes.VolumeController, 'create', + fake_create) + + req = fakes.HTTPRequest.blank('/v2/fake/volumes') + req.method = 'POST' + req.content_type = 'application/json' + body = {'id': id, + 'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175', + 'volume_id': '1', + 'scheduler_hints': {'a': 'b'}, } + + req.body = jsonutils.dumps(body) + res = req.get_response(self.app) + self.assertEqual(202, res.status_int) + + def test_create_server_bad_hints(self): + req = fakes.HTTPRequest.blank('/v2/fake/volumes') + req.method = 'POST' + req.content_type = 'application/json' + body = {'volume': { + 'id': id, + 'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175', + 'volume_id': '1', + 'scheduler_hints': 'a', }} + + req.body = jsonutils.dumps(body) + res = req.get_response(self.app) + self.assertEqual(400, res.status_int) diff --git a/cinder/tests/api/contrib/test_services.py b/cinder/tests/api/contrib/test_services.py new file mode 100644 index 0000000000..420fcd8545 --- /dev/null +++ b/cinder/tests/api/contrib/test_services.py @@ -0,0 +1,277 @@ +# Copyright 2012 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.api.contrib import services +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import timeutils +from cinder import policy +from cinder import test +from cinder.tests.api import fakes +from datetime import datetime + + +fake_services_list = [{'binary': 'cinder-scheduler', + 'host': 'host1', + 'availability_zone': 'cinder', + 'id': 1, + 'disabled': True, + 'updated_at': datetime(2012, 10, 29, 13, 42, 2), + 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, + {'binary': 'cinder-volume', + 'host': 'host1', + 'availability_zone': 'cinder', + 'id': 2, + 'disabled': True, + 'updated_at': datetime(2012, 10, 29, 13, 42, 5), + 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, + {'binary': 'cinder-scheduler', + 'host': 'host2', + 'availability_zone': 'cinder', + 'id': 3, + 'disabled': False, + 'updated_at': datetime(2012, 9, 19, 6, 55, 34), + 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, + {'binary': 'cinder-volume', + 'host': 'host2', + 'availability_zone': 'cinder', + 'id': 4, + 'disabled': True, + 'updated_at': datetime(2012, 9, 18, 8, 3, 38), + 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, + ] + + +class FakeRequest(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {} + + +# NOTE(uni): deprecating service request key, binary takes precedence +# Still keeping service key here for API compatibility sake. +class FakeRequestWithService(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {"service": "cinder-volume"} + + +class FakeRequestWithBinary(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {"binary": "cinder-volume"} + + +class FakeRequestWithHost(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {"host": "host1"} + + +# NOTE(uni): deprecating service request key, binary takes precedence +# Still keeping service key here for API compatibility sake. +class FakeRequestWithHostService(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {"host": "host1", "service": "cinder-volume"} + + +class FakeRequestWithHostBinary(object): + environ = {"cinder.context": context.get_admin_context()} + GET = {"host": "host1", "binary": "cinder-volume"} + + +def fake_service_get_all(context): + return fake_services_list + + +def fake_service_get_by_host_binary(context, host, binary): + for service in fake_services_list: + if service['host'] == host and service['binary'] == binary: + return service + return None + + +def fake_service_get_by_id(value): + for service in fake_services_list: + if service['id'] == value: + return service + return None + + +def fake_service_update(context, service_id, values): + service = fake_service_get_by_id(service_id) + if service is None: + raise exception.ServiceNotFound(service_id=service_id) + else: + {'host': 'host1', 'service': 'cinder-volume', + 'disabled': values['disabled']} + + +def fake_policy_enforce(context, action, target): + pass + + +def fake_utcnow(): + return datetime(2012, 10, 29, 13, 42, 11) + + +class ServicesTest(test.TestCase): + + def setUp(self): + super(ServicesTest, self).setUp() + + self.stubs.Set(db, "service_get_all", fake_service_get_all) + self.stubs.Set(timeutils, "utcnow", fake_utcnow) + self.stubs.Set(db, "service_get_by_args", + fake_service_get_by_host_binary) + self.stubs.Set(db, "service_update", fake_service_update) + self.stubs.Set(policy, "enforce", fake_policy_enforce) + + self.context = context.get_admin_context() + self.controller = services.ServiceController() + + def tearDown(self): + super(ServicesTest, self).tearDown() + + def test_services_list(self): + req = FakeRequest() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-scheduler', + 'host': 'host1', 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, 29, 13, 42, 2)}, + {'binary': 'cinder-volume', + 'host': 'host1', 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}, + {'binary': 'cinder-scheduler', 'host': 'host2', + 'zone': 'cinder', + 'status': 'enabled', 'state': 'down', + 'updated_at': datetime(2012, 9, 19, 6, 55, 34)}, + {'binary': 'cinder-volume', 'host': 'host2', + 'zone': 'cinder', + 'status': 'disabled', 'state': 'down', + 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]} + self.assertEqual(res_dict, response) + + def test_services_list_with_host(self): + req = FakeRequestWithHost() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-scheduler', + 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, + 29, 13, 42, 2)}, + {'binary': 'cinder-volume', 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', 'state': 'up', + 'updated_at': datetime(2012, 10, 29, + 13, 42, 5)}]} + self.assertEqual(res_dict, response) + + def test_services_list_with_service(self): + req = FakeRequestWithService() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-volume', + 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'up', + 'updated_at': datetime(2012, 10, 29, + 13, 42, 5)}, + {'binary': 'cinder-volume', + 'host': 'host2', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'down', + 'updated_at': datetime(2012, 9, 18, + 8, 3, 38)}]} + self.assertEqual(res_dict, response) + + def test_services_list_with_binary(self): + req = FakeRequestWithBinary() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-volume', + 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'up', + 'updated_at': datetime(2012, 10, 29, + 13, 42, 5)}, + {'binary': 'cinder-volume', + 'host': 'host2', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'down', + 'updated_at': datetime(2012, 9, 18, + 8, 3, 38)}]} + self.assertEqual(res_dict, response) + + def test_services_list_with_host_service(self): + req = FakeRequestWithHostService() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-volume', + 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'up', + 'updated_at': datetime(2012, 10, 29, + 13, 42, 5)}]} + self.assertEqual(res_dict, response) + + def test_services_list_with_host_binary(self): + req = FakeRequestWithHostBinary() + res_dict = self.controller.index(req) + + response = {'services': [{'binary': 'cinder-volume', + 'host': 'host1', + 'zone': 'cinder', + 'status': 'disabled', + 'state': 'up', + 'updated_at': datetime(2012, 10, 29, + 13, 42, 5)}]} + self.assertEqual(res_dict, response) + + def test_services_enable_with_service_key(self): + body = {'host': 'host1', 'service': 'cinder-volume'} + req = fakes.HTTPRequest.blank('/v1/fake/os-services/enable') + res_dict = self.controller.update(req, "enable", body) + + self.assertEqual(res_dict['status'], 'enabled') + + def test_services_enable_with_binary_key(self): + body = {'host': 'host1', 'binary': 'cinder-volume'} + req = fakes.HTTPRequest.blank('/v1/fake/os-services/enable') + res_dict = self.controller.update(req, "enable", body) + + self.assertEqual(res_dict['status'], 'enabled') + + def test_services_disable_with_service_key(self): + req = fakes.HTTPRequest.blank('/v1/fake/os-services/disable') + body = {'host': 'host1', 'service': 'cinder-volume'} + res_dict = self.controller.update(req, "disable", body) + + self.assertEqual(res_dict['status'], 'disabled') + + def test_services_disable_with_binary_key(self): + req = fakes.HTTPRequest.blank('/v1/fake/os-services/disable') + body = {'host': 'host1', 'binary': 'cinder-volume'} + res_dict = self.controller.update(req, "disable", body) + + self.assertEqual(res_dict['status'], 'disabled') diff --git a/cinder/tests/api/contrib/test_snapshot_actions.py b/cinder/tests/api/contrib/test_snapshot_actions.py new file mode 100644 index 0000000000..37b4d1971c --- /dev/null +++ b/cinder/tests/api/contrib/test_snapshot_actions.py @@ -0,0 +1,80 @@ +# Copyright 2013, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder import db +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs + + +class SnapshotActionsTest(test.TestCase): + + def setUp(self): + super(SnapshotActionsTest, self).setUp() + + def test_update_snapshot_status(self): + self.stubs.Set(db, 'snapshot_get', stub_snapshot_get) + self.stubs.Set(db, 'snapshot_update', stub_snapshot_update) + + body = {'os-update_snapshot_status': {'status': 'available'}} + req = webob.Request.blank('/v2/fake/snapshots/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_update_snapshot_status_invalid_status(self): + self.stubs.Set(db, 'snapshot_get', stub_snapshot_get) + body = {'os-update_snapshot_status': {'status': 'in-use'}} + req = webob.Request.blank('/v2/fake/snapshots/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_update_snapshot_status_without_status(self): + self.stubs.Set(db, 'snapshot_get', stub_snapshot_get) + body = {'os-update_snapshot_status': {}} + req = webob.Request.blank('/v2/fake/snapshots/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + +def stub_snapshot_get(context, snapshot_id): + snapshot = stubs.stub_snapshot(snapshot_id) + if snapshot_id == 3: + snapshot['status'] = 'error' + elif snapshot_id == 1: + snapshot['status'] = 'creating' + elif snapshot_id == 7: + snapshot['status'] = 'available' + else: + snapshot['status'] = 'creating' + + return snapshot + + +def stub_snapshot_update(self, context, id, **kwargs): + pass diff --git a/cinder/tests/api/openstack/volume/contrib/test_types_extra_specs.py b/cinder/tests/api/contrib/test_types_extra_specs.py similarity index 57% rename from cinder/tests/api/openstack/volume/contrib/test_types_extra_specs.py rename to cinder/tests/api/contrib/test_types_extra_specs.py index 0a613ccc0a..33ee7a138e 100644 --- a/cinder/tests/api/openstack/volume/contrib/test_types_extra_specs.py +++ b/cinder/tests/api/contrib/test_types_extra_specs.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. +# Copyright (c) 2011 OpenStack Foundation # Copyright 2011 University of Southern California # All Rights Reserved. # @@ -20,9 +18,14 @@ from lxml import etree import webob -from cinder.api.openstack.volume.contrib import types_extra_specs +import mock + +from cinder.api.contrib import types_extra_specs +from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common.notifier import test_notifier from cinder import test -from cinder.tests.api.openstack import fakes +from cinder.tests.api import fakes import cinder.wsgi @@ -43,13 +46,16 @@ def delete_volume_type_extra_specs(context, volume_type_id, key): pass +def delete_volume_type_extra_specs_not_found(context, volume_type_id, key): + raise exception.VolumeTypeExtraSpecsNotFound("Not Found") + + def stub_volume_type_extra_specs(): - specs = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5"} + specs = {"key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} return specs @@ -61,9 +67,18 @@ class VolumeTypesExtraSpecsTest(test.TestCase): def setUp(self): super(VolumeTypesExtraSpecsTest, self).setUp() + self.flags(host='fake', + notification_driver=[test_notifier.__name__]) self.stubs.Set(cinder.db, 'volume_type_get', volume_type_get) - self.api_path = '/v1/fake/os-volume-types/1/extra_specs' + self.api_path = '/v2/fake/os-volume-types/1/extra_specs' self.controller = types_extra_specs.VolumeTypeExtraSpecsController() + """to reset notifier drivers left over from other api/contrib tests""" + notifier_api._reset_drivers() + test_notifier.NOTIFICATIONS = [] + + def tearDown(self): + notifier_api._reset_drivers() + super(VolumeTypesExtraSpecsTest, self).tearDown() def test_index(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', @@ -104,8 +119,18 @@ def test_delete(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete', delete_volume_type_extra_specs) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) req = fakes.HTTPRequest.blank(self.api_path + '/key5') self.controller.delete(req, 1, 'key5') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_delete_not_found(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete', + delete_volume_type_extra_specs_not_found) + + req = fakes.HTTPRequest.blank(self.api_path + '/key6') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, + req, 1, 'key6') def test_create(self): self.stubs.Set(cinder.db, @@ -113,19 +138,71 @@ def test_create(self): return_create_volume_type_extra_specs) body = {"extra_specs": {"key1": "value1"}} + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) req = fakes.HTTPRequest.blank(self.api_path) res_dict = self.controller.create(req, 1, body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) self.assertEqual('value1', res_dict['extra_specs']['key1']) + @mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create') + def test_create_key_allowed_chars( + self, volume_type_extra_specs_update_or_create): + mock_return_value = {"key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + volume_type_extra_specs_update_or_create.\ + return_value = mock_return_value + + body = {"extra_specs": {"other_alphanum.-_:": "value1"}} + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + + req = fakes.HTTPRequest.blank(self.api_path) + res_dict = self.controller.create(req, 1, body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + self.assertEqual('value1', + res_dict['extra_specs']['other_alphanum.-_:']) + + @mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create') + def test_create_too_many_keys_allowed_chars( + self, volume_type_extra_specs_update_or_create): + mock_return_value = {"key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + volume_type_extra_specs_update_or_create.\ + return_value = mock_return_value + + body = {"extra_specs": {"other_alphanum.-_:": "value1", + "other2_alphanum.-_:": "value2", + "other3_alphanum.-_:": "value3"}} + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + + req = fakes.HTTPRequest.blank(self.api_path) + res_dict = self.controller.create(req, 1, body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + self.assertEqual('value1', + res_dict['extra_specs']['other_alphanum.-_:']) + self.assertEqual('value2', + res_dict['extra_specs']['other2_alphanum.-_:']) + self.assertEqual('value3', + res_dict['extra_specs']['other3_alphanum.-_:']) + def test_update_item(self): self.stubs.Set(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"key1": "value1"} + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) req = fakes.HTTPRequest.blank(self.api_path + '/key1') res_dict = self.controller.update(req, 1, 'key1', body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) self.assertEqual('value1', res_dict['key1']) @@ -149,6 +226,45 @@ def test_update_item_body_uri_mismatch(self): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'bad', body) + def _extra_specs_empty_update(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') + req.method = 'POST' + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, '1', body) + + def test_update_no_body(self): + self._extra_specs_empty_update(body=None) + + def test_update_empty_body(self): + self._extra_specs_empty_update(body={}) + + def _extra_specs_create_bad_body(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') + req.method = 'POST' + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, '1', body) + + def test_create_no_body(self): + self._extra_specs_create_bad_body(body=None) + + def test_create_missing_volume(self): + body = {'foo': {'a': 'b'}} + self._extra_specs_create_bad_body(body=body) + + def test_create_malformed_entity(self): + body = {'extra_specs': 'string'} + self._extra_specs_create_bad_body(body=body) + + def test_create_invalid_key(self): + body = {"extra_specs": {"ke/y1": "value1"}} + self._extra_specs_create_bad_body(body=body) + + def test_create_invalid_too_many_key(self): + body = {"key1": "value1", "ke/y2": "value2", "key3": "value3"} + self._extra_specs_create_bad_body(body=body) + class VolumeTypeExtraSpecsSerializerTest(test.TestCase): def test_index_create_serializer(self): @@ -158,14 +274,13 @@ def test_index_create_serializer(self): extra_specs = stub_volume_type_extra_specs() text = serializer.serialize(dict(extra_specs=extra_specs)) - print text tree = etree.fromstring(text) self.assertEqual('extra_specs', tree.tag) self.assertEqual(len(extra_specs), len(tree)) seen = set(extra_specs.keys()) for child in tree: - self.assertTrue(child.tag in seen) + self.assertIn(child.tag, seen) self.assertEqual(extra_specs[child.tag], child.text) seen.remove(child.tag) self.assertEqual(len(seen), 0) @@ -176,51 +291,8 @@ def test_update_show_serializer(self): exemplar = dict(key1='value1') text = serializer.serialize(exemplar) - print text tree = etree.fromstring(text) self.assertEqual('key1', tree.tag) self.assertEqual('value1', tree.text) self.assertEqual(0, len(tree)) - - -class VolumeTypeExtraSpecsUnprocessableEntityTestCase(test.TestCase): - - """ - Tests of places we throw 422 Unprocessable Entity from - """ - - def setUp(self): - super(VolumeTypeExtraSpecsUnprocessableEntityTestCase, self).setUp() - self.controller = types_extra_specs.VolumeTypeExtraSpecsController() - - def _unprocessable_extra_specs_create(self, body): - req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') - req.method = 'POST' - - self.assertRaises(webob.exc.HTTPUnprocessableEntity, - self.controller.create, req, '1', body) - - def test_create_no_body(self): - self._unprocessable_extra_specs_create(body=None) - - def test_create_missing_volume(self): - body = {'foo': {'a': 'b'}} - self._unprocessable_extra_specs_create(body=body) - - def test_create_malformed_entity(self): - body = {'extra_specs': 'string'} - self._unprocessable_extra_specs_create(body=body) - - def _unprocessable_extra_specs_update(self, body): - req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') - req.method = 'POST' - - self.assertRaises(webob.exc.HTTPUnprocessableEntity, - self.controller.update, req, '1', body) - - def test_update_no_body(self): - self._unprocessable_extra_specs_update(body=None) - - def test_update_empty_body(self): - self._unprocessable_extra_specs_update(body={}) diff --git a/cinder/tests/api/openstack/volume/contrib/test_types_manage.py b/cinder/tests/api/contrib/test_types_manage.py similarity index 58% rename from cinder/tests/api/openstack/volume/contrib/test_types_manage.py rename to cinder/tests/api/contrib/test_types_manage.py index c2ee34989f..6be714f226 100644 --- a/cinder/tests/api/openstack/volume/contrib/test_types_manage.py +++ b/cinder/tests/api/contrib/test_types_manage.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,20 +15,21 @@ import webob -from cinder.api.openstack.volume.contrib import types_manage +from cinder.api.contrib import types_manage from cinder import exception +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common.notifier import test_notifier from cinder import test +from cinder.tests.api import fakes from cinder.volume import volume_types -from cinder.tests.api.openstack import fakes def stub_volume_type(id): - specs = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5"} + specs = {"key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs) @@ -44,6 +45,12 @@ def return_volume_types_destroy(context, name): pass +def return_volume_types_with_volumes_destroy(context, id): + if id == "1": + raise exception.VolumeTypeInUse(volume_type_id=id) + pass + + def return_volume_types_create(context, name, specs): pass @@ -57,7 +64,16 @@ def return_volume_types_get_by_name(context, name): class VolumeTypesManageApiTest(test.TestCase): def setUp(self): super(VolumeTypesManageApiTest, self).setUp() + self.flags(host='fake', + notification_driver=[test_notifier.__name__]) self.controller = types_manage.VolumeTypesManageController() + """to reset notifier drivers left over from other api/contrib tests""" + notifier_api._reset_drivers() + test_notifier.NOTIFICATIONS = [] + + def tearDown(self): + notifier_api._reset_drivers() + super(VolumeTypesManageApiTest, self).tearDown() def test_volume_types_delete(self): self.stubs.Set(volume_types, 'get_volume_type', @@ -65,8 +81,10 @@ def test_volume_types_delete(self): self.stubs.Set(volume_types, 'destroy', return_volume_types_destroy) - req = fakes.HTTPRequest.blank('/v1/fake/types/1') + req = fakes.HTTPRequest.blank('/v2/fake/types/1') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) self.controller._delete(req, 1) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) def test_volume_types_delete_not_found(self): self.stubs.Set(volume_types, 'get_volume_type', @@ -74,9 +92,21 @@ def test_volume_types_delete_not_found(self): self.stubs.Set(volume_types, 'destroy', return_volume_types_destroy) - req = fakes.HTTPRequest.blank('/v1/fake/types/777') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + req = fakes.HTTPRequest.blank('/v2/fake/types/777') self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete, req, '777') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + def test_volume_types_with_volumes_destroy(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + self.stubs.Set(volume_types, 'destroy', + return_volume_types_with_volumes_destroy) + req = fakes.HTTPRequest.blank('/v2/fake/types/1') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.controller._delete(req, 1) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) def test_create(self): self.stubs.Set(volume_types, 'create', @@ -86,37 +116,28 @@ def test_create(self): body = {"volume_type": {"name": "vol_type_1", "extra_specs": {"key1": "value1"}}} - req = fakes.HTTPRequest.blank('/v1/fake/types') + req = fakes.HTTPRequest.blank('/v2/fake/types') + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) res_dict = self.controller._create(req, body) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) self.assertEqual(1, len(res_dict)) self.assertEqual('vol_type_1', res_dict['volume_type']['name']) - -class VolumeTypesUnprocessableEntityTestCase(test.TestCase): - - """ - Tests of places we throw 422 Unprocessable Entity from - """ - - def setUp(self): - super(VolumeTypesUnprocessableEntityTestCase, self).setUp() - self.controller = types_manage.VolumeTypesManageController() - - def _unprocessable_volume_type_create(self, body): + def _create_volume_type_bad_body(self, body): req = fakes.HTTPRequest.blank('/v2/fake/types') req.method = 'POST' - - self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.assertRaises(webob.exc.HTTPBadRequest, self.controller._create, req, body) def test_create_no_body(self): - self._unprocessable_volume_type_create(body=None) + self._create_volume_type_bad_body(body=None) def test_create_missing_volume(self): body = {'foo': {'a': 'b'}} - self._unprocessable_volume_type_create(body=body) + self._create_volume_type_bad_body(body=body) def test_create_malformed_entity(self): body = {'volume_type': 'string'} - self._unprocessable_volume_type_create(body=body) + self._create_volume_type_bad_body(body=body) diff --git a/cinder/tests/api/contrib/test_used_limits.py b/cinder/tests/api/contrib/test_used_limits.py new file mode 100644 index 0000000000..d102253e8f --- /dev/null +++ b/cinder/tests/api/contrib/test_used_limits.py @@ -0,0 +1,67 @@ +# vim: tabstop=5 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.contrib import used_limits +from cinder.api.openstack import wsgi +from cinder import quota +from cinder import test +from cinder.tests.api import fakes + + +class FakeRequest(object): + def __init__(self, context): + self.environ = {'cinder.context': context} + + +class UsedLimitsTestCase(test.TestCase): + def setUp(self): + """Run before each test.""" + super(UsedLimitsTestCase, self).setUp() + self.controller = used_limits.UsedLimitsController() + + def test_used_limits(self): + fake_req = FakeRequest(fakes.FakeRequestContext('fake', 'fake')) + obj = { + "limits": { + "rate": [], + "absolute": {}, + }, + } + res = wsgi.ResponseObject(obj) + quota_map = { + 'totalVolumesUsed': 'volumes', + 'totalGigabytesUsed': 'gigabytes', + } + + limits = {} + for display_name, q in quota_map.iteritems(): + limits[q] = {'limit': 2, + 'in_use': 1} + + def stub_get_project_quotas(context, project_id, usages=True): + return limits + + self.stubs.Set(quota.QUOTAS, "get_project_quotas", + stub_get_project_quotas) + + self.mox.ReplayAll() + + self.controller.index(fake_req, res) + abs_limits = res.obj['limits']['absolute'] + for used_limit, value in abs_limits.iteritems(): + self.assertEqual(value, + limits[quota_map[used_limit]]['in_use']) diff --git a/cinder/tests/api/contrib/test_volume_actions.py b/cinder/tests/api/contrib/test_volume_actions.py new file mode 100644 index 0000000000..c0f6ded600 --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_actions.py @@ -0,0 +1,597 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import uuid + +import mock +import webob + +from cinder.api.contrib import volume_actions +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder.openstack.common.rpc import common as rpc_common +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder import volume +from cinder.volume import api as volume_api + + +class VolumeActionsTest(test.TestCase): + + _actions = ('os-detach', 'os-reserve', 'os-unreserve') + + _methods = ('attach', 'detach', 'reserve_volume', 'unreserve_volume') + + def setUp(self): + super(VolumeActionsTest, self).setUp() + self.UUID = uuid.uuid4() + self.api_patchers = {} + for _meth in self._methods: + self.api_patchers[_meth] = mock.patch('cinder.volume.API.' + _meth) + self.api_patchers[_meth].start() + self.api_patchers[_meth].return_value = True + + vol = {'id': 'fake', 'host': 'fake', 'status': 'available', 'size': 1, + 'migration_status': None, 'volume_type_id': 'fake'} + self.get_patcher = mock.patch('cinder.volume.API.get') + self.mock_volume_get = self.get_patcher.start() + self.mock_volume_get.return_value = vol + self.update_patcher = mock.patch('cinder.volume.API.update') + self.mock_volume_update = self.update_patcher.start() + self.mock_volume_update.return_value = vol + + self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') + + def tearDown(self): + for patcher in self.api_patchers: + self.api_patchers[patcher].stop() + self.update_patcher.stop() + self.get_patcher.stop() + super(VolumeActionsTest, self).tearDown() + + def test_simple_api_actions(self): + app = fakes.wsgi_app() + for _action in self._actions: + req = webob.Request.blank('/v2/fake/volumes/%s/action' % + self.UUID) + req.method = 'POST' + req.body = jsonutils.dumps({_action: None}) + req.content_type = 'application/json' + res = req.get_response(app) + self.assertEqual(res.status_int, 202) + + def test_initialize_connection(self): + with mock.patch.object(volume_api.API, + 'initialize_connection') as init_conn: + init_conn.return_value = {} + body = {'os-initialize_connection': {'connector': 'fake'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + + def test_initialize_connection_without_connector(self): + with mock.patch.object(volume_api.API, + 'initialize_connection') as init_conn: + init_conn.return_value = {} + body = {'os-initialize_connection': {}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_initialize_connection_exception(self): + with mock.patch.object(volume_api.API, + 'initialize_connection') as init_conn: + init_conn.side_effect = \ + exception.VolumeBackendAPIException(data=None) + body = {'os-initialize_connection': {'connector': 'fake'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 500) + + def test_terminate_connection(self): + with mock.patch.object(volume_api.API, + 'terminate_connection') as terminate_conn: + terminate_conn.return_value = {} + body = {'os-terminate_connection': {'connector': 'fake'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_terminate_connection_without_connector(self): + with mock.patch.object(volume_api.API, + 'terminate_connection') as terminate_conn: + terminate_conn.return_value = {} + body = {'os-terminate_connection': {}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_terminate_connection_with_exception(self): + with mock.patch.object(volume_api.API, + 'terminate_connection') as terminate_conn: + terminate_conn.side_effect = \ + exception.VolumeBackendAPIException(data=None) + body = {'os-terminate_connection': {'connector': 'fake'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 500) + + def test_attach_to_instance(self): + body = {'os-attach': {'instance_uuid': 'fake', + 'mountpoint': '/dev/vdc', + 'mode': 'rw'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_attach_to_host(self): + # using 'read-write' mode attach volume by default + body = {'os-attach': {'host_name': 'fake_host', + 'mountpoint': '/dev/vdc'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_attach_with_invalid_arguments(self): + # Invalid request to attach volume an invalid target + body = {'os-attach': {'mountpoint': '/dev/vdc'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.headers["content-type"] = "application/json" + req.body = jsonutils.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + # Invalid request to attach volume to an instance and a host + body = {'os-attach': {'instance_uuid': 'fake', + 'host_name': 'fake_host', + 'mountpoint': '/dev/vdc'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.headers["content-type"] = "application/json" + req.body = jsonutils.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + # Invalid request to attach volume with an invalid mode + body = {'os-attach': {'instance_uuid': 'fake', + 'mountpoint': '/dev/vdc', + 'mode': 'rr'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.headers["content-type"] = "application/json" + req.body = jsonutils.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + body = {'os-attach': {'host_name': 'fake_host', + 'mountpoint': '/dev/vdc', + 'mode': 'ww'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.headers["content-type"] = "application/json" + req.body = jsonutils.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_begin_detaching(self): + def fake_begin_detaching(*args, **kwargs): + return {} + self.stubs.Set(volume.API, 'begin_detaching', + fake_begin_detaching) + + body = {'os-begin_detaching': {'fake': 'fake'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_roll_detaching(self): + def fake_roll_detaching(*args, **kwargs): + return {} + self.stubs.Set(volume.API, 'roll_detaching', + fake_roll_detaching) + + body = {'os-roll_detaching': {'fake': 'fake'}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_extend_volume(self): + def fake_extend_volume(*args, **kwargs): + return {} + self.stubs.Set(volume.API, 'extend', + fake_extend_volume) + + body = {'os-extend': {'new_size': 5}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_update_readonly_flag(self): + def fake_update_readonly_flag(*args, **kwargs): + return {} + self.stubs.Set(volume.API, 'update_readonly_flag', + fake_update_readonly_flag) + + def make_update_readonly_flag_test(self, readonly, return_code): + body = {"os-update_readonly_flag": {"readonly": readonly}} + if readonly is None: + body = {"os-update_readonly_flag": {}} + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, return_code) + + make_update_readonly_flag_test(self, True, 202) + make_update_readonly_flag_test(self, False, 202) + make_update_readonly_flag_test(self, '1', 202) + make_update_readonly_flag_test(self, '0', 202) + make_update_readonly_flag_test(self, 'true', 202) + make_update_readonly_flag_test(self, 'false', 202) + make_update_readonly_flag_test(self, 'tt', 400) + make_update_readonly_flag_test(self, 11, 400) + make_update_readonly_flag_test(self, None, 400) + + +class VolumeRetypeActionsTest(VolumeActionsTest): + def setUp(self): + def get_vol_type(*args, **kwargs): + d1 = {'id': 'fake', 'qos_specs_id': 'fakeqid1', 'extra_specs': {}} + d2 = {'id': 'foo', 'qos_specs_id': 'fakeqid2', 'extra_specs': {}} + return d1 if d1['id'] == args[1] else d2 + + self.retype_patchers = {} + self.retype_mocks = {} + paths = ['cinder.volume.volume_types.get_volume_type', + 'cinder.volume.volume_types.get_volume_type_by_name', + 'cinder.volume.qos_specs.get_qos_specs', + 'cinder.quota.QUOTAS.add_volume_type_opts', + 'cinder.quota.QUOTAS.reserve'] + for path in paths: + name = path.split('.')[-1] + self.retype_patchers[name] = mock.patch(path) + self.retype_mocks[name] = self.retype_patchers[name].start() + + self.retype_mocks['get_volume_type'].side_effect = get_vol_type + self.retype_mocks['get_volume_type_by_name'].side_effect = get_vol_type + self.retype_mocks['add_volume_type_opts'].return_value = None + self.retype_mocks['reserve'].return_value = None + + super(VolumeRetypeActionsTest, self).setUp() + + def tearDown(self): + for name, patcher in self.retype_patchers.iteritems(): + patcher.stop() + super(VolumeRetypeActionsTest, self).tearDown() + + def _retype_volume_exec(self, expected_status, new_type='foo'): + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + retype_body = {'new_type': new_type, 'migration_policy': 'never'} + req.body = jsonutils.dumps({'os-retype': retype_body}) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, expected_status) + + @mock.patch('cinder.volume.qos_specs.get_qos_specs') + def test_retype_volume_success(self, _mock_get_qspecs): + # Test that the retype API works for both available and in-use + self._retype_volume_exec(202) + self.mock_volume_get.return_value['status'] = 'in-use' + specs = {'qos_specs': {'id': 'fakeqid1', 'consumer': 'back-end'}} + _mock_get_qspecs.return_value = specs + self._retype_volume_exec(202) + + def test_retype_volume_no_body(self): + # Request with no body should fail + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.body = jsonutils.dumps({'os-retype': None}) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_retype_volume_bad_policy(self): + # Request with invalid migration policy should fail + req = webob.Request.blank('/v2/fake/volumes/1/action') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + retype_body = {'new_type': 'foo', 'migration_policy': 'invalid'} + req.body = jsonutils.dumps({'os-retype': retype_body}) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_retype_volume_bad_status(self): + # Should fail if volume does not have proper status + self.mock_volume_get.return_value['status'] = 'error' + self._retype_volume_exec(400) + + def test_retype_type_no_exist(self): + # Should fail if new type does not exist + exc = exception.VolumeTypeNotFound('exc') + self.retype_mocks['get_volume_type'].side_effect = exc + self._retype_volume_exec(404) + + def test_retype_same_type(self): + # Should fail if new type and old type are the same + self._retype_volume_exec(400, new_type='fake') + + def test_retype_over_quota(self): + # Should fail if going over quota for new type + exc = exception.OverQuota(overs=['gigabytes'], + quotas={'gigabytes': 20}, + usages={'gigabytes': {'reserved': 5, + 'in_use': 15}}) + self.retype_mocks['reserve'].side_effect = exc + self._retype_volume_exec(413) + + @mock.patch('cinder.volume.qos_specs.get_qos_specs') + def _retype_volume_diff_qos(self, vol_status, consumer, expected_status, + _mock_get_qspecs): + def fake_get_qos(ctxt, qos_id): + d1 = {'qos_specs': {'id': 'fakeqid1', 'consumer': consumer}} + d2 = {'qos_specs': {'id': 'fakeqid2', 'consumer': consumer}} + return d1 if d1['qos_specs']['id'] == qos_id else d2 + + self.mock_volume_get.return_value['status'] = vol_status + _mock_get_qspecs.side_effect = fake_get_qos + self._retype_volume_exec(expected_status) + + def test_retype_volume_diff_qos_fe_in_use(self): + # should fail if changing qos enforced by front-end for in-use volumes + self._retype_volume_diff_qos('in-use', 'front-end', 400) + + def test_retype_volume_diff_qos_fe_available(self): + # should NOT fail if changing qos enforced by FE for available volumes + self._retype_volume_diff_qos('available', 'front-end', 202) + + def test_retype_volume_diff_qos_be(self): + # should NOT fail if changing qos enforced by back-end + self._retype_volume_diff_qos('available', 'back-end', 202) + self._retype_volume_diff_qos('in-use', 'back-end', 202) + + +def stub_volume_get(self, context, volume_id): + volume = stubs.stub_volume(volume_id) + if volume_id == 5: + volume['status'] = 'in-use' + else: + volume['status'] = 'available' + return volume + + +def stub_upload_volume_to_image_service(self, context, volume, metadata, + force): + ret = {"id": volume['id'], + "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), + "status": 'uploading', + "display_description": volume['display_description'], + "size": volume['size'], + "volume_type": volume['volume_type'], + "image_id": 1, + "container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name'} + return ret + + +class VolumeImageActionsTest(test.TestCase): + def setUp(self): + super(VolumeImageActionsTest, self).setUp() + self.controller = volume_actions.VolumeActionsController() + + self.stubs.Set(volume_api.API, 'get', stub_volume_get) + + def test_copy_volume_to_image(self): + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + res_dict = self.controller._volume_upload_image(req, id, body) + expected = {'os-volume_upload_image': {'id': id, + 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'status': 'uploading', + 'display_description': 'displaydesc', + 'size': 1, + 'volume_type': {'name': 'vol_type_name'}, + 'image_id': 1, + 'container_format': 'bare', + 'disk_format': 'raw', + 'image_name': 'image_name'}} + self.assertDictMatch(res_dict, expected) + + def test_copy_volume_to_image_volumenotfound(self): + def stub_volume_get_raise_exc(self, context, volume_id): + raise exception.VolumeNotFound(volume_id=volume_id) + + self.stubs.Set(volume_api.API, 'get', stub_volume_get_raise_exc) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller._volume_upload_image, + req, + id, + body) + + def test_copy_volume_to_image_invalidvolume(self): + def stub_upload_volume_to_image_service_raise(self, context, volume, + metadata, force): + raise exception.InvalidVolume(reason='blah') + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service_raise) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._volume_upload_image, + req, + id, + body) + + def test_copy_volume_to_image_valueerror(self): + def stub_upload_volume_to_image_service_raise(self, context, volume, + metadata, force): + raise ValueError + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service_raise) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._volume_upload_image, + req, + id, + body) + + def test_copy_volume_to_image_remoteerror(self): + def stub_upload_volume_to_image_service_raise(self, context, volume, + metadata, force): + raise rpc_common.RemoteError + self.stubs.Set(volume_api.API, + "copy_volume_to_image", + stub_upload_volume_to_image_service_raise) + + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": 'image_name', + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._volume_upload_image, + req, + id, + body) + + def test_volume_upload_image_typeerror(self): + id = 1 + body = {"os-volume_upload_image_fake": "fake"} + req = webob.Request.blank('/v2/tenant1/volumes/%s/action' % id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_volume_upload_image_without_type(self): + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": None, + "force": True} + body = {"": vol} + req = webob.Request.blank('/v2/tenant1/volumes/%s/action' % id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_extend_volume_valueerror(self): + id = 1 + body = {'os-extend': {'new_size': 'fake'}} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._extend, + req, + id, + body) + + def test_copy_volume_to_image_notimagename(self): + id = 1 + vol = {"container_format": 'bare', + "disk_format": 'raw', + "image_name": None, + "force": True} + body = {"os-volume_upload_image": vol} + req = fakes.HTTPRequest.blank('/v2/tenant1/volumes/%s/action' % id) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._volume_upload_image, + req, + id, + body) diff --git a/cinder/tests/api/contrib/test_volume_encryption_metadata.py b/cinder/tests/api/contrib/test_volume_encryption_metadata.py new file mode 100644 index 0000000000..5d321435b0 --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_encryption_metadata.py @@ -0,0 +1,222 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import webob + +from cinder.api.contrib import volume_encryption_metadata +from cinder import context +from cinder import db +from cinder import test +from cinder.tests.api import fakes +from cinder.volume import volume_types + + +def return_volume_type_encryption_metadata(context, volume_type_id): + return stub_volume_type_encryption() + + +def stub_volume_type_encryption(): + values = { + 'cipher': 'cipher', + 'key_size': 256, + 'provider': 'nova.volume.encryptors.base.VolumeEncryptor', + 'volume_type_id': 'volume_type', + 'control_location': 'front-end', + } + return values + + +class VolumeEncryptionMetadataTest(test.TestCase): + @staticmethod + def _create_volume(context, + display_name='test_volume', + display_description='this is a test volume', + status='creating', + availability_zone='fake_az', + host='fake_host', + size=1): + """Create a volume object.""" + volume = { + 'size': size, + 'user_id': 'fake', + 'project_id': 'fake', + 'status': status, + 'display_name': display_name, + 'display_description': display_description, + 'attach_status': 'detached', + 'availability_zone': availability_zone, + 'host': host, + 'encryption_key_id': 'fake_key', + } + return db.volume_create(context, volume)['id'] + + def setUp(self): + super(VolumeEncryptionMetadataTest, self).setUp() + self.controller = (volume_encryption_metadata. + VolumeEncryptionMetadataController()) + self.stubs.Set(db.sqlalchemy.api, 'volume_type_encryption_get', + return_volume_type_encryption_metadata) + + self.ctxt = context.RequestContext('fake', 'fake') + self.volume_id = self._create_volume(self.ctxt) + + def tearDown(self): + db.volume_destroy(self.ctxt.elevated(), self.volume_id) + super(VolumeEncryptionMetadataTest, self).tearDown() + + def test_index(self): + self.stubs.Set(volume_types, 'is_encrypted', lambda *a, **kw: True) + + req = webob.Request.blank('/v2/fake/volumes/%s/encryption' + % self.volume_id) + res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) + self.assertEqual(200, res.status_code) + res_dict = json.loads(res.body) + + expected = { + "encryption_key_id": "fake_key", + "control_location": "front-end", + "cipher": "cipher", + "provider": "nova.volume.encryptors.base.VolumeEncryptor", + "key_size": 256, + } + self.assertEqual(expected, res_dict) + + def test_index_bad_tenant_id(self): + self.stubs.Set(volume_types, 'is_encrypted', lambda *a, **kw: True) + + req = webob.Request.blank('/v2/%s/volumes/%s/encryption' + % ('bad-tenant-id', self.volume_id)) + res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) + self.assertEqual(400, res.status_code) + + res_dict = json.loads(res.body) + expected = {'badRequest': {'code': 400, + 'message': 'Malformed request url'}} + self.assertEqual(expected, res_dict) + + def test_index_bad_volume_id(self): + self.stubs.Set(volume_types, 'is_encrypted', lambda *a, **kw: True) + + bad_volume_id = 'bad_volume_id' + req = webob.Request.blank('/v2/fake/volumes/%s/encryption' + % bad_volume_id) + res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) + self.assertEqual(404, res.status_code) + + res_dict = json.loads(res.body) + expected = {'itemNotFound': {'code': 404, + 'message': 'VolumeNotFound: Volume ' + '%s could not be found.' + % bad_volume_id}} + self.assertEqual(expected, res_dict) + + def test_show_key(self): + self.stubs.Set(volume_types, 'is_encrypted', lambda *a, **kw: True) + + req = webob.Request.blank('/v2/fake/volumes/%s/encryption/' + 'encryption_key_id' % self.volume_id) + res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) + self.assertEqual(200, res.status_code) + + self.assertEqual('fake_key', res.body) + + def test_show_control(self): + self.stubs.Set(volume_types, 'is_encrypted', lambda *a, **kw: True) + + req = webob.Request.blank('/v2/fake/volumes/%s/encryption/' + 'control_location' % self.volume_id) + res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) + self.assertEqual(200, res.status_code) + + self.assertEqual('front-end', res.body) + + def test_show_provider(self): + self.stubs.Set(volume_types, 'is_encrypted', lambda *a, **kw: True) + + req = webob.Request.blank('/v2/fake/volumes/%s/encryption/' + 'provider' % self.volume_id) + res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) + self.assertEqual(200, res.status_code) + + self.assertEqual('nova.volume.encryptors.base.VolumeEncryptor', + res.body) + + def test_show_bad_tenant_id(self): + self.stubs.Set(volume_types, 'is_encrypted', lambda *a, **kw: True) + + req = webob.Request.blank('/v2/%s/volumes/%s/encryption/' + 'encryption_key_id' % ('bad-tenant-id', + self.volume_id)) + res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) + self.assertEqual(400, res.status_code) + + res_dict = json.loads(res.body) + expected = {'badRequest': {'code': 400, + 'message': 'Malformed request url'}} + self.assertEqual(expected, res_dict) + + def test_show_bad_volume_id(self): + self.stubs.Set(volume_types, 'is_encrypted', lambda *a, **kw: True) + + bad_volume_id = 'bad_volume_id' + req = webob.Request.blank('/v2/fake/volumes/%s/encryption/' + 'encryption_key_id' % bad_volume_id) + res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) + self.assertEqual(404, res.status_code) + + res_dict = json.loads(res.body) + expected = {'itemNotFound': {'code': 404, + 'message': 'VolumeNotFound: Volume ' + '%s could not be found.' + % bad_volume_id}} + self.assertEqual(expected, res_dict) + + def test_retrieve_key_admin(self): + self.stubs.Set(volume_types, 'is_encrypted', lambda *a, **kw: True) + + ctxt = context.RequestContext('fake', 'fake', is_admin=True) + + req = webob.Request.blank('/v2/fake/volumes/%s/encryption/' + 'encryption_key_id' % self.volume_id) + res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) + self.assertEqual(200, res.status_code) + + self.assertEqual('fake_key', res.body) + + def test_show_volume_not_encrypted_type(self): + self.stubs.Set(volume_types, 'is_encrypted', lambda *a, **kw: False) + + req = webob.Request.blank('/v2/fake/volumes/%s/encryption/' + 'encryption_key_id' % self.volume_id) + res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) + self.assertEqual(200, res.status_code) + self.assertEqual(0, len(res.body)) + + def test_index_volume_not_encrypted_type(self): + self.stubs.Set(volume_types, 'is_encrypted', lambda *a, **kw: False) + + req = webob.Request.blank('/v2/fake/volumes/%s/encryption' + % self.volume_id) + res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) + + self.assertEqual(200, res.status_code) + res_dict = json.loads(res.body) + + expected = { + 'encryption_key_id': None + } + self.assertEqual(expected, res_dict) diff --git a/cinder/tests/api/contrib/test_volume_host_attribute.py b/cinder/tests/api/contrib/test_volume_host_attribute.py new file mode 100644 index 0000000000..1ff0aeee4e --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_host_attribute.py @@ -0,0 +1,137 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import uuid + +from lxml import etree +import webob + +from cinder import context +from cinder import db +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +def fake_volume_get(*args, **kwargs): + return { + 'id': 'fake', + 'host': 'host001', + 'status': 'available', + 'size': 5, + 'availability_zone': 'somewhere', + 'created_at': datetime.datetime.now(), + 'attach_status': None, + 'display_name': 'anothervolume', + 'display_description': 'Just another volume!', + 'volume_type_id': None, + 'snapshot_id': None, + 'project_id': 'fake', + 'migration_status': None, + '_name_id': 'fake2', + } + + +def fake_volume_get_all(*args, **kwargs): + return [fake_volume_get()] + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = fakes.router.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v2'] = api + return mapper + + +class VolumeHostAttributeTest(test.TestCase): + + def setUp(self): + super(VolumeHostAttributeTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_get) + self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) + self.stubs.Set(db, 'volume_get', fake_volume_get) + + self.UUID = uuid.uuid4() + + def test_get_volume_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertEqual(vol['os-vol-host-attr:host'], 'host001') + + def test_get_volume_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertNotIn('os-vol-host-attr:host', vol) + + def test_list_detail_volumes_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertEqual(vol[0]['os-vol-host-attr:host'], 'host001') + + def test_list_detail_volumes_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertNotIn('os-vol-host-attr:host', vol[0]) + + def test_list_simple_volumes_no_host(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertNotIn('os-vol-host-attr:host', vol[0]) + + def test_get_volume_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = etree.XML(res.body) + host_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_host_attribute/api/v1}host') + self.assertEqual(vol.get(host_key), 'host001') + + def test_list_volumes_detail_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = list(etree.XML(res.body))[0] + host_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_host_attribute/api/v1}host') + self.assertEqual(vol.get(host_key), 'host001') diff --git a/cinder/tests/api/contrib/test_volume_image_metadata.py b/cinder/tests/api/contrib/test_volume_image_metadata.py new file mode 100644 index 0000000000..7520c7464f --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_image_metadata.py @@ -0,0 +1,136 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import uuid +from xml.dom import minidom + +import webob + +from cinder.api import common +from cinder.api.openstack.wsgi import MetadataXMLDeserializer +from cinder.api.openstack.wsgi import XMLDeserializer +from cinder import db +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +def fake_volume_get(*args, **kwargs): + return { + 'id': 'fake', + 'host': 'host001', + 'status': 'available', + 'size': 5, + 'availability_zone': 'somewhere', + 'created_at': datetime.datetime.now(), + 'attach_status': None, + 'display_name': 'anothervolume', + 'display_description': 'Just another volume!', + 'volume_type_id': None, + 'snapshot_id': None, + 'project_id': 'fake', + } + + +def fake_volume_get_all(*args, **kwargs): + return [fake_volume_get()] + + +fake_image_metadata = { + 'image_id': 'someid', + 'image_name': 'fake', + 'kernel_id': 'somekernel', + 'ramdisk_id': 'someramdisk', +} + + +def fake_get_volume_image_metadata(*args, **kwargs): + return fake_image_metadata + + +def fake_get_volumes_image_metadata(*args, **kwargs): + return {'fake': fake_image_metadata} + + +class VolumeImageMetadataTest(test.TestCase): + content_type = 'application/json' + + def setUp(self): + super(VolumeImageMetadataTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_get) + self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) + self.stubs.Set(volume.API, 'get_volume_image_metadata', + fake_get_volume_image_metadata) + self.stubs.Set(volume.API, 'get_volumes_image_metadata', + fake_get_volumes_image_metadata) + self.stubs.Set(db, 'volume_get', fake_volume_get) + self.UUID = uuid.uuid4() + + def _make_request(self, url): + req = webob.Request.blank(url) + req.accept = self.content_type + res = req.get_response(fakes.wsgi_app()) + return res + + def _get_image_metadata(self, body): + return json.loads(body)['volume']['volume_image_metadata'] + + def _get_image_metadata_list(self, body): + return [ + volume['volume_image_metadata'] + for volume in json.loads(body)['volumes'] + ] + + def test_get_volume(self): + res = self._make_request('/v2/fake/volumes/%s' % self.UUID) + self.assertEqual(res.status_int, 200) + self.assertEqual(self._get_image_metadata(res.body), + fake_image_metadata) + + def test_list_detail_volumes(self): + res = self._make_request('/v2/fake/volumes/detail') + self.assertEqual(res.status_int, 200) + self.assertEqual(self._get_image_metadata_list(res.body)[0], + fake_image_metadata) + + +class ImageMetadataXMLDeserializer(common.MetadataXMLDeserializer): + metadata_node_name = "volume_image_metadata" + + +class VolumeImageMetadataXMLTest(VolumeImageMetadataTest): + content_type = 'application/xml' + + def _get_image_metadata(self, body): + deserializer = XMLDeserializer() + volume = deserializer.find_first_child_named( + minidom.parseString(body), 'volume') + image_metadata = deserializer.find_first_child_named( + volume, 'volume_image_metadata') + return MetadataXMLDeserializer().extract_metadata(image_metadata) + + def _get_image_metadata_list(self, body): + deserializer = XMLDeserializer() + volumes = deserializer.find_first_child_named( + minidom.parseString(body), 'volumes') + volume_list = deserializer.find_children_named(volumes, 'volume') + image_metadata_list = [ + deserializer.find_first_child_named( + volume, 'volume_image_metadata' + ) + for volume in volume_list] + return map(MetadataXMLDeserializer().extract_metadata, + image_metadata_list) diff --git a/cinder/tests/api/contrib/test_volume_migration_status_attribute.py b/cinder/tests/api/contrib/test_volume_migration_status_attribute.py new file mode 100644 index 0000000000..ee390cc0e8 --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_migration_status_attribute.py @@ -0,0 +1,145 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import uuid + +from lxml import etree +import webob + +from cinder import context +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +def fake_volume_get(*args, **kwargs): + return { + 'id': 'fake', + 'host': 'host001', + 'status': 'available', + 'size': 5, + 'availability_zone': 'somewhere', + 'created_at': datetime.datetime.now(), + 'attach_status': None, + 'display_name': 'anothervolume', + 'display_description': 'Just another volume!', + 'volume_type_id': None, + 'snapshot_id': None, + 'project_id': 'fake', + 'migration_status': 'migrating', + '_name_id': 'fake2', + } + + +def fake_volume_get_all(*args, **kwargs): + return [fake_volume_get()] + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = fakes.router.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v2'] = api + return mapper + + +class VolumeMigStatusAttributeTest(test.TestCase): + + def setUp(self): + super(VolumeMigStatusAttributeTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_get) + self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) + self.UUID = uuid.uuid4() + + def test_get_volume_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertEqual(vol['os-vol-mig-status-attr:migstat'], 'migrating') + self.assertEqual(vol['os-vol-mig-status-attr:name_id'], 'fake2') + + def test_get_volume_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertNotIn('os-vol-mig-status-attr:migstat', vol) + self.assertNotIn('os-vol-mig-status-attr:name_id', vol) + + def test_list_detail_volumes_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertEqual(vol[0]['os-vol-mig-status-attr:migstat'], 'migrating') + self.assertEqual(vol[0]['os-vol-mig-status-attr:name_id'], 'fake2') + + def test_list_detail_volumes_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertNotIn('os-vol-mig-status-attr:migstat', vol[0]) + self.assertNotIn('os-vol-mig-status-attr:name_id', vol[0]) + + def test_list_simple_volumes_no_migration_status(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertNotIn('os-vol-mig-status-attr:migstat', vol[0]) + self.assertNotIn('os-vol-mig-status-attr:name_id', vol[0]) + + def test_get_volume_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = etree.XML(res.body) + mig_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_mig_status_attribute/api/v1}migstat') + self.assertEqual(vol.get(mig_key), 'migrating') + mig_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_mig_status_attribute/api/v1}name_id') + self.assertEqual(vol.get(mig_key), 'fake2') + + def test_list_volumes_detail_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = list(etree.XML(res.body))[0] + mig_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_mig_status_attribute/api/v1}migstat') + self.assertEqual(vol.get(mig_key), 'migrating') + mig_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_mig_status_attribute/api/v1}name_id') + self.assertEqual(vol.get(mig_key), 'fake2') diff --git a/cinder/tests/api/contrib/test_volume_tenant_attribute.py b/cinder/tests/api/contrib/test_volume_tenant_attribute.py new file mode 100644 index 0000000000..5323b741cc --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_tenant_attribute.py @@ -0,0 +1,137 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import uuid + +from lxml import etree +import webob + +from cinder import context +from cinder import test +from cinder.tests.api import fakes +from cinder import volume + + +PROJECT_ID = '88fd1da4-f464-4a87-9ce5-26f2f40743b9' + + +def fake_volume_get(*args, **kwargs): + return { + 'id': 'fake', + 'host': 'host001', + 'status': 'available', + 'size': 5, + 'availability_zone': 'somewhere', + 'created_at': datetime.datetime.now(), + 'attach_status': None, + 'display_name': 'anothervolume', + 'display_description': 'Just another volume!', + 'volume_type_id': None, + 'snapshot_id': None, + 'project_id': PROJECT_ID, + 'migration_status': None, + '_name_id': 'fake2', + } + + +def fake_volume_get_all(*args, **kwargs): + return [fake_volume_get()] + + +def app(): + # no auth, just let environ['cinder.context'] pass through + api = fakes.router.APIRouter() + mapper = fakes.urlmap.URLMap() + mapper['/v2'] = api + return mapper + + +class VolumeTenantAttributeTest(test.TestCase): + + def setUp(self): + super(VolumeTenantAttributeTest, self).setUp() + self.stubs.Set(volume.API, 'get', fake_volume_get) + self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) + self.UUID = uuid.uuid4() + + def test_get_volume_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertEqual(vol['os-vol-tenant-attr:tenant_id'], PROJECT_ID) + + def test_get_volume_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volume'] + self.assertNotIn('os-vol-tenant-attr:tenant_id', vol) + + def test_list_detail_volumes_allowed(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertEqual(vol[0]['os-vol-tenant-attr:tenant_id'], PROJECT_ID) + + def test_list_detail_volumes_unallowed(self): + ctx = context.RequestContext('non-admin', 'fake', False) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertNotIn('os-vol-tenant-attr:tenant_id', vol[0]) + + def test_list_simple_volumes_no_tenant_id(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes') + req.method = 'GET' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = json.loads(res.body)['volumes'] + self.assertNotIn('os-vol-tenant-attr:tenant_id', vol[0]) + + def test_get_volume_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID) + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = etree.XML(res.body) + tenant_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_tenant_attribute/api/v1}tenant_id') + self.assertEqual(vol.get(tenant_key), PROJECT_ID) + + def test_list_volumes_detail_xml(self): + ctx = context.RequestContext('admin', 'fake', True) + req = webob.Request.blank('/v2/fake/volumes/detail') + req.method = 'GET' + req.accept = 'application/xml' + req.environ['cinder.context'] = ctx + res = req.get_response(app()) + vol = list(etree.XML(res.body))[0] + tenant_key = ('{http://docs.openstack.org/volume/ext/' + 'volume_tenant_attribute/api/v1}tenant_id') + self.assertEqual(vol.get(tenant_key), PROJECT_ID) diff --git a/cinder/tests/api/contrib/test_volume_transfer.py b/cinder/tests/api/contrib/test_volume_transfer.py new file mode 100644 index 0000000000..21d53f4792 --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_transfer.py @@ -0,0 +1,575 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for volume transfer code. +""" + +import json +from xml.dom import minidom + +import webob + +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests.api import fakes +from cinder.transfer import API +import cinder.volume + + +LOG = logging.getLogger(__name__) +volume_transfer_api = API() + + +class VolumeTransferAPITestCase(test.TestCase): + """Test Case for transfers API.""" + + def setUp(self): + super(VolumeTransferAPITestCase, self).setUp() + + def tearDown(self): + super(VolumeTransferAPITestCase, self).tearDown() + + @staticmethod + def _create_transfer(volume_id=1, + display_name='test_transfer'): + """Create a transfer object.""" + return volume_transfer_api.create(context.get_admin_context(), + volume_id, + display_name) + + @staticmethod + def _create_volume(display_name='test_volume', + display_description='this is a test volume', + status='available', + size=1): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['status'] = status + vol['display_name'] = display_name + vol['display_description'] = display_description + vol['attach_status'] = status + return db.volume_create(context.get_admin_context(), vol)['id'] + + def test_show_transfer(self): + volume_id = self._create_volume(size=5) + transfer = self._create_transfer(volume_id) + LOG.debug('Created transfer with id %s' % transfer) + req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' % + transfer['id']) + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['transfer']['name'], 'test_transfer') + self.assertEqual(res_dict['transfer']['id'], transfer['id']) + self.assertEqual(res_dict['transfer']['volume_id'], volume_id) + + db.transfer_destroy(context.get_admin_context(), transfer['id']) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_show_transfer_xml_content_type(self): + volume_id = self._create_volume(size=5) + transfer = self._create_transfer(volume_id) + req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' % + transfer['id']) + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + transfer_xml = dom.getElementsByTagName('transfer') + name = transfer_xml.item(0).getAttribute('name') + self.assertEqual(name.strip(), "test_transfer") + + db.transfer_destroy(context.get_admin_context(), transfer['id']) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_show_transfer_with_transfer_NotFound(self): + req = webob.Request.blank('/v2/fake/os-volume-transfer/1234') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Transfer 1234 could not be found.') + + def test_list_transfers_json(self): + volume_id_1 = self._create_volume(size=5) + volume_id_2 = self._create_volume(size=5) + transfer1 = self._create_transfer(volume_id_1) + transfer2 = self._create_transfer(volume_id_2) + + req = webob.Request.blank('/v2/fake/os-volume-transfer') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['transfers'][0]), 4) + self.assertEqual(res_dict['transfers'][0]['id'], transfer1['id']) + self.assertEqual(res_dict['transfers'][0]['name'], 'test_transfer') + self.assertEqual(len(res_dict['transfers'][1]), 4) + self.assertEqual(res_dict['transfers'][1]['name'], 'test_transfer') + + db.transfer_destroy(context.get_admin_context(), transfer2['id']) + db.transfer_destroy(context.get_admin_context(), transfer1['id']) + db.volume_destroy(context.get_admin_context(), volume_id_1) + db.volume_destroy(context.get_admin_context(), volume_id_2) + + def test_list_transfers_xml(self): + volume_id_1 = self._create_volume(size=5) + volume_id_2 = self._create_volume(size=5) + transfer1 = self._create_transfer(volume_id_1) + transfer2 = self._create_transfer(volume_id_2) + + req = webob.Request.blank('/v2/fake/os-volume-transfer') + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + transfer_list = dom.getElementsByTagName('transfer') + self.assertEqual(transfer_list.item(0).attributes.length, 3) + self.assertEqual(transfer_list.item(0).getAttribute('id'), + transfer1['id']) + self.assertEqual(transfer_list.item(1).attributes.length, 3) + self.assertEqual(transfer_list.item(1).getAttribute('id'), + transfer2['id']) + + db.transfer_destroy(context.get_admin_context(), transfer2['id']) + db.transfer_destroy(context.get_admin_context(), transfer1['id']) + db.volume_destroy(context.get_admin_context(), volume_id_2) + db.volume_destroy(context.get_admin_context(), volume_id_1) + + def test_list_transfers_detail_json(self): + volume_id_1 = self._create_volume(size=5) + volume_id_2 = self._create_volume(size=5) + transfer1 = self._create_transfer(volume_id_1) + transfer2 = self._create_transfer(volume_id_2) + + req = webob.Request.blank('/v2/fake/os-volume-transfer/detail') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['transfers'][0]), 5) + self.assertEqual(res_dict['transfers'][0]['name'], + 'test_transfer') + self.assertEqual(res_dict['transfers'][0]['id'], transfer1['id']) + self.assertEqual(res_dict['transfers'][0]['volume_id'], volume_id_1) + + self.assertEqual(len(res_dict['transfers'][1]), 5) + self.assertEqual(res_dict['transfers'][1]['name'], + 'test_transfer') + self.assertEqual(res_dict['transfers'][1]['id'], transfer2['id']) + self.assertEqual(res_dict['transfers'][1]['volume_id'], volume_id_2) + + db.transfer_destroy(context.get_admin_context(), transfer2['id']) + db.transfer_destroy(context.get_admin_context(), transfer1['id']) + db.volume_destroy(context.get_admin_context(), volume_id_2) + db.volume_destroy(context.get_admin_context(), volume_id_1) + + def test_list_transfers_detail_xml(self): + volume_id_1 = self._create_volume(size=5) + volume_id_2 = self._create_volume(size=5) + transfer1 = self._create_transfer(volume_id_1) + transfer2 = self._create_transfer(volume_id_2) + + req = webob.Request.blank('/v2/fake/os-volume-transfer/detail') + req.method = 'GET' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + dom = minidom.parseString(res.body) + transfer_detail = dom.getElementsByTagName('transfer') + + self.assertEqual(transfer_detail.item(0).attributes.length, 4) + self.assertEqual( + transfer_detail.item(0).getAttribute('name'), 'test_transfer') + self.assertEqual( + transfer_detail.item(0).getAttribute('id'), transfer1['id']) + self.assertEqual(transfer_detail.item(0).getAttribute('volume_id'), + volume_id_1) + + self.assertEqual(transfer_detail.item(1).attributes.length, 4) + self.assertEqual( + transfer_detail.item(1).getAttribute('name'), 'test_transfer') + self.assertEqual( + transfer_detail.item(1).getAttribute('id'), transfer2['id']) + self.assertEqual(transfer_detail.item(1).getAttribute('volume_id'), + volume_id_2) + + db.transfer_destroy(context.get_admin_context(), transfer2['id']) + db.transfer_destroy(context.get_admin_context(), transfer1['id']) + db.volume_destroy(context.get_admin_context(), volume_id_2) + db.volume_destroy(context.get_admin_context(), volume_id_1) + + def test_create_transfer_json(self): + volume_id = self._create_volume(status='available', size=5) + body = {"transfer": {"display_name": "transfer1", + "volume_id": volume_id}} + + req = webob.Request.blank('/v2/fake/os-volume-transfer') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + + res_dict = json.loads(res.body) + LOG.info(res_dict) + + self.assertEqual(res.status_int, 202) + self.assertIn('id', res_dict['transfer']) + self.assertIn('auth_key', res_dict['transfer']) + self.assertIn('created_at', res_dict['transfer']) + self.assertIn('name', res_dict['transfer']) + self.assertIn('volume_id', res_dict['transfer']) + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_create_transfer_xml(self): + volume_size = 2 + volume_id = self._create_volume(status='available', size=volume_size) + + req = webob.Request.blank('/v2/fake/os-volume-transfer') + req.body = ('' % volume_id) + req.method = 'POST' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + dom = minidom.parseString(res.body) + transfer = dom.getElementsByTagName('transfer') + self.assertTrue(transfer.item(0).hasAttribute('id')) + self.assertTrue(transfer.item(0).hasAttribute('auth_key')) + self.assertTrue(transfer.item(0).hasAttribute('created_at')) + self.assertEqual(transfer.item(0).getAttribute('name'), 'transfer-001') + self.assertTrue(transfer.item(0).hasAttribute('volume_id')) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_create_transfer_with_no_body(self): + req = webob.Request.blank('/v2/fake/os-volume-transfer') + req.body = json.dumps(None) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'The server could not comply with the request since' + ' it is either malformed or otherwise incorrect.') + + def test_create_transfer_with_body_KeyError(self): + body = {"transfer": {"display_name": "transfer1"}} + req = webob.Request.blank('/v2/fake/os-volume-transfer') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Incorrect request body format') + + def test_create_transfer_with_VolumeNotFound(self): + body = {"transfer": {"display_name": "transfer1", + "volume_id": 1234}} + + req = webob.Request.blank('/v2/fake/os-volume-transfer') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Volume 1234 could not be found.') + + def test_create_transfer_with_InvalidVolume(self): + volume_id = self._create_volume(status='attached') + body = {"transfer": {"display_name": "transfer1", + "volume_id": volume_id}} + req = webob.Request.blank('/v2/fake/os-volume-transfer') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid volume: status must be available') + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_delete_transfer_awaiting_transfer(self): + volume_id = self._create_volume() + transfer = self._create_transfer(volume_id) + req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' % + transfer['id']) + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + + # verify transfer has been deleted + req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' % + transfer['id']) + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Transfer %s could not be found.' % transfer['id']) + self.assertEqual(db.volume_get(context.get_admin_context(), + volume_id)['status'], 'available') + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_delete_transfer_with_transfer_NotFound(self): + req = webob.Request.blank('/v2/fake/os-volume-transfer/9999') + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'Transfer 9999 could not be found.') + + def test_accept_transfer_volume_id_specified_json(self): + volume_id = self._create_volume() + transfer = self._create_transfer(volume_id) + + body = {"accept": {"id": transfer['id'], + "auth_key": transfer['auth_key']}} + req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % + transfer['id']) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 202) + self.assertEqual(res_dict['transfer']['id'], transfer['id']) + self.assertEqual(res_dict['transfer']['volume_id'], volume_id) + + def test_accept_transfer_volume_id_specified_xml(self): + volume_id = self._create_volume(size=5) + transfer = self._create_transfer(volume_id) + + req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % + transfer['id']) + req.body = '' % transfer['auth_key'] + req.method = 'POST' + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 202) + dom = minidom.parseString(res.body) + accept = dom.getElementsByTagName('transfer') + self.assertEqual(accept.item(0).getAttribute('id'), + transfer['id']) + self.assertEqual(accept.item(0).getAttribute('volume_id'), volume_id) + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_accept_transfer_with_no_body(self): + volume_id = self._create_volume(size=5) + transfer = self._create_transfer(volume_id) + + req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % + transfer['id']) + req.body = json.dumps(None) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'The server could not comply with the request since' + ' it is either malformed or otherwise incorrect.') + + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_accept_transfer_with_body_KeyError(self): + volume_id = self._create_volume(size=5) + transfer = self._create_transfer(volume_id) + + req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % + transfer['id']) + body = {"": {}} + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.headers['Accept'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'The server could not comply with the request since' + ' it is either malformed or otherwise incorrect.') + + def test_accept_transfer_invalid_id_auth_key(self): + volume_id = self._create_volume() + transfer = self._create_transfer(volume_id) + + body = {"accept": {"id": transfer['id'], + "auth_key": 1}} + req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % + transfer['id']) + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 400) + self.assertEqual(res_dict['badRequest']['code'], 400) + self.assertEqual(res_dict['badRequest']['message'], + 'Invalid auth key: Attempt to transfer %s with ' + 'invalid auth key.' % transfer['id']) + + db.transfer_destroy(context.get_admin_context(), transfer['id']) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_accept_transfer_with_invalid_transfer(self): + volume_id = self._create_volume() + transfer = self._create_transfer(volume_id) + + body = {"accept": {"id": transfer['id'], + "auth_key": 1}} + req = webob.Request.blank('/v2/fake/os-volume-transfer/1/accept') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 404) + self.assertEqual(res_dict['itemNotFound']['code'], 404) + self.assertEqual(res_dict['itemNotFound']['message'], + 'TransferNotFound: Transfer 1 could not be found.') + + db.transfer_destroy(context.get_admin_context(), transfer['id']) + db.volume_destroy(context.get_admin_context(), volume_id) + + def test_accept_transfer_with_VolumeSizeExceedsAvailableQuota(self): + + def fake_transfer_api_accept_throwing_VolumeSizeExceedsAvailableQuota( + cls, context, transfer, volume_id): + raise exception.VolumeSizeExceedsAvailableQuota(requested='2', + consumed='2', + quota='3') + + self.stubs.Set( + cinder.transfer.API, + 'accept', + fake_transfer_api_accept_throwing_VolumeSizeExceedsAvailableQuota) + + volume_id = self._create_volume() + transfer = self._create_transfer(volume_id) + + body = {"accept": {"id": transfer['id'], + "auth_key": transfer['auth_key']}} + req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % + transfer['id']) + + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 413) + self.assertEqual(res_dict['overLimit']['code'], 413) + self.assertEqual(res_dict['overLimit']['message'], + 'Requested volume or snapshot exceeds allowed ' + 'Gigabytes quota. Requested 2G, quota is 3G and ' + '2G has been consumed.') + + def test_accept_transfer_with_VolumeLimitExceeded(self): + + def fake_transfer_api_accept_throwing_VolumeLimitExceeded(cls, + context, + transfer, + volume_id): + raise exception.VolumeLimitExceeded(allowed=1) + + self.stubs.Set(cinder.transfer.API, 'accept', + fake_transfer_api_accept_throwing_VolumeLimitExceeded) + + volume_id = self._create_volume() + transfer = self._create_transfer(volume_id) + + body = {"accept": {"id": transfer['id'], + "auth_key": transfer['auth_key']}} + req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' % + transfer['id']) + + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 413) + self.assertEqual(res_dict['overLimit']['code'], 413) + self.assertEqual(res_dict['overLimit']['message'], + 'VolumeLimitExceeded: Maximum number of volumes ' + 'allowed (1) exceeded') diff --git a/cinder/tests/api/contrib/test_volume_type_encryption.py b/cinder/tests/api/contrib/test_volume_type_encryption.py new file mode 100644 index 0000000000..fc6bacc50d --- /dev/null +++ b/cinder/tests/api/contrib/test_volume_type_encryption.py @@ -0,0 +1,536 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import webob + +from cinder import context +from cinder import db +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common.notifier import test_notifier +from cinder import test +from cinder.tests.api import fakes + + +def return_volume_type_encryption_db(context, volume_type_id, session): + return stub_volume_type_encryption() + + +def return_volume_type_encryption(context, volume_type_id): + return stub_volume_type_encryption() + + +def stub_volume_type_encryption(): + values = { + 'cipher': 'fake_cipher', + 'control_location': 'front-end', + 'key_size': 256, + 'provider': 'fake_provider', + 'volume_type_id': 'fake_type_id', + } + return values + + +def volume_type_encryption_get(context, volume_type_id): + pass + + +class VolumeTypeEncryptionTest(test.TestCase): + + def setUp(self): + super(VolumeTypeEncryptionTest, self).setUp() + self.flags(host='fake', + notification_driver=[test_notifier.__name__]) + self.api_path = '/v2/fake/os-volume-types/1/encryption' + """to reset notifier drivers left over from other api/contrib tests""" + notifier_api._reset_drivers() + test_notifier.NOTIFICATIONS = [] + + def tearDown(self): + notifier_api._reset_drivers() + super(VolumeTypeEncryptionTest, self).tearDown() + + def _get_response(self, volume_type, admin=True, + url='/v2/fake/types/%s/encryption', + req_method='GET', req_body=None, + req_headers=None): + ctxt = context.RequestContext('fake', 'fake', is_admin=admin) + + req = webob.Request.blank(url % volume_type['id']) + req.method = req_method + req.body = req_body + if req_headers: + req.headers['Content-Type'] = req_headers + + return req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) + + def test_index(self): + self.stubs.Set(db, 'volume_type_encryption_get', + return_volume_type_encryption) + + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + db.volume_type_create(context.get_admin_context(), volume_type) + + res = self._get_response(volume_type) + self.assertEqual(200, res.status_code) + res_dict = json.loads(res.body) + + expected = stub_volume_type_encryption() + self.assertEqual(expected, res_dict) + + db.volume_type_destroy(context.get_admin_context(), volume_type['id']) + + def test_index_invalid_type(self): + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + + res = self._get_response(volume_type) + self.assertEqual(404, res.status_code) + res_dict = json.loads(res.body) + + expected = { + 'itemNotFound': { + 'code': 404, + 'message': ('Volume type %s could not be found.' + % volume_type['id']) + } + } + self.assertEqual(expected, res_dict) + + def test_show_key_size(self): + self.stubs.Set(db, 'volume_type_encryption_get', + return_volume_type_encryption) + + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + db.volume_type_create(context.get_admin_context(), volume_type) + + res = self._get_response(volume_type, + url='/v2/fake/types/%s/encryption/key_size') + res_dict = json.loads(res.body) + + self.assertEqual(200, res.status_code) + self.assertEqual(256, res_dict['key_size']) + + db.volume_type_destroy(context.get_admin_context(), volume_type['id']) + + def test_show_provider(self): + self.stubs.Set(db, 'volume_type_encryption_get', + return_volume_type_encryption) + + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + db.volume_type_create(context.get_admin_context(), volume_type) + + res = self._get_response(volume_type, + url='/v2/fake/types/%s/encryption/provider') + res_dict = json.loads(res.body) + + self.assertEqual(200, res.status_code) + self.assertEqual('fake_provider', res_dict['provider']) + db.volume_type_destroy(context.get_admin_context(), volume_type['id']) + + def test_show_item_not_found(self): + self.stubs.Set(db, 'volume_type_encryption_get', + return_volume_type_encryption) + + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + db.volume_type_create(context.get_admin_context(), volume_type) + + res = self._get_response(volume_type, + url='/v2/fake/types/%s/encryption/fake') + res_dict = json.loads(res.body) + + self.assertEqual(404, res.status_code) + expected = { + 'itemNotFound': { + 'code': 404, + 'message': ('The resource could not be found.') + } + } + self.assertEqual(expected, res_dict) + db.volume_type_destroy(context.get_admin_context(), volume_type['id']) + + def _create(self, cipher, control_location, key_size, provider): + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + db.volume_type_create(context.get_admin_context(), volume_type) + + body = {"encryption": {'cipher': cipher, + 'control_location': control_location, + 'key_size': key_size, + 'provider': provider, + 'volume_type_id': volume_type['id']}} + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + res = self._get_response(volume_type) + res_dict = json.loads(res.body) + self.assertEqual(200, res.status_code) + # Confirm that volume type has no encryption information + # before create. + self.assertEqual('{}', res.body) + + # Create encryption specs for the volume type + # with the defined body. + res = self._get_response(volume_type, req_method='POST', + req_body=json.dumps(body), + req_headers='application/json') + res_dict = json.loads(res.body) + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) + + # check response + self.assertIn('encryption', res_dict) + self.assertEqual(cipher, res_dict['encryption']['cipher']) + self.assertEqual(control_location, + res_dict['encryption']['control_location']) + self.assertEqual(key_size, res_dict['encryption']['key_size']) + self.assertEqual(provider, res_dict['encryption']['provider']) + self.assertEqual(volume_type['id'], + res_dict['encryption']['volume_type_id']) + + # check database + encryption = db.volume_type_encryption_get(context.get_admin_context(), + volume_type['id']) + self.assertIsNotNone(encryption) + self.assertEqual(cipher, encryption['cipher']) + self.assertEqual(key_size, encryption['key_size']) + self.assertEqual(provider, encryption['provider']) + self.assertEqual(volume_type['id'], encryption['volume_type_id']) + + db.volume_type_destroy(context.get_admin_context(), volume_type['id']) + + def test_create_json(self): + self._create('fake_cipher', 'front-end', 128, 'fake_encryptor') + + def test_create_xml(self): + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + db.volume_type_create(context.get_admin_context(), volume_type) + + ctxt = context.RequestContext('fake', 'fake', is_admin=True) + + req = webob.Request.blank('/v2/fake/types/%s/encryption' + % volume_type['id']) + req.method = 'POST' + req.body = ('') + req.headers['Content-Type'] = 'application/xml' + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) + + self.assertEqual(res.status_int, 200) + + db.volume_type_destroy(context.get_admin_context(), volume_type['id']) + + def test_create_invalid_volume_type(self): + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + + body = {"encryption": {'cipher': 'cipher', + 'control_location': 'front-end', + 'key_size': 128, + 'provider': 'fake_provider', + 'volume_type_id': 'volume_type'}} + + res = self._get_response(volume_type, req_method='POST', + req_body=json.dumps(body), + req_headers='application/json') + res_dict = json.loads(res.body) + + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.assertEqual(404, res.status_code) + + expected = { + 'itemNotFound': { + 'code': 404, + 'message': ('Volume type %s could not be found.' + % volume_type['id']) + } + } + self.assertEqual(expected, res_dict) + + def test_create_encryption_type_exists(self): + self.stubs.Set(db, 'volume_type_encryption_get', + return_volume_type_encryption) + + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + db.volume_type_create(context.get_admin_context(), volume_type) + + body = {"encryption": {'cipher': 'cipher', + 'control_location': 'front-end', + 'key_size': 128, + 'provider': 'fake_provider', + 'volume_type_id': volume_type['id']}} + + # Try to create encryption specs for a volume type + # that already has them. + res = self._get_response(volume_type, req_method='POST', + req_body=json.dumps(body), + req_headers='application/json') + res_dict = json.loads(res.body) + + expected = { + 'badRequest': { + 'code': 400, + 'message': ('Volume type encryption for type ' + 'fake_type_id already exists.') + } + } + self.assertEqual(expected, res_dict) + db.volume_type_destroy(context.get_admin_context(), volume_type['id']) + + def test_create_volume_exists(self): + # Create the volume type and a volume with the volume type. + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + db.volume_type_create(context.get_admin_context(), volume_type) + db.volume_create(context.get_admin_context(), + {'id': 'fake_id', + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'creating', + 'instance_uuid': None, + 'host': 'dummy', + 'volume_type_id': volume_type['id']}) + + body = {"encryption": {'cipher': 'cipher', + 'key_size': 128, + 'control_location': 'front-end', + 'provider': 'fake_provider', + 'volume_type_id': volume_type['id']}} + + # Try to create encryption specs for a volume type + # with a volume. + res = self._get_response(volume_type, req_method='POST', + req_body=json.dumps(body), + req_headers='application/json') + res_dict = json.loads(res.body) + + expected = { + 'badRequest': { + 'code': 400, + 'message': ('Cannot create encryption specs. ' + 'Volume type in use.') + } + } + self.assertEqual(expected, res_dict) + db.volume_destroy(context.get_admin_context(), 'fake_id') + db.volume_type_destroy(context.get_admin_context(), volume_type['id']) + + def _encryption_create_bad_body(self, body, + msg='Create body is not valid.'): + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + db.volume_type_create(context.get_admin_context(), volume_type) + res = self._get_response(volume_type, req_method='POST', + req_body=json.dumps(body), + req_headers='application/json') + + res_dict = json.loads(res.body) + + expected = { + 'badRequest': { + 'code': 400, + 'message': (msg) + } + } + self.assertEqual(expected, res_dict) + db.volume_type_destroy(context.get_admin_context(), volume_type['id']) + + def test_create_no_body(self): + self._encryption_create_bad_body(body=None) + + def test_create_malformed_entity(self): + body = {'encryption': 'string'} + self._encryption_create_bad_body(body=body) + + def test_create_negative_key_size(self): + body = {"encryption": {'cipher': 'cipher', + 'key_size': -128, + 'provider': 'fake_provider', + 'volume_type_id': 'volume_type'}} + msg = 'Invalid input received: key_size must be non-negative' + self._encryption_create_bad_body(body=body, msg=msg) + + def test_create_none_key_size(self): + self._create('fake_cipher', 'front-end', None, 'fake_encryptor') + + def test_create_invalid_control_location(self): + body = {"encryption": {'cipher': 'cipher', + 'control_location': 'fake_control', + 'provider': 'fake_provider', + 'volume_type_id': 'volume_type'}} + msg = ("Invalid input received: Valid control location are: " + "['front-end', 'back-end']") + self._encryption_create_bad_body(body=body, msg=msg) + + def test_create_no_provider(self): + body = {"encryption": {'cipher': 'cipher', + 'volume_type_id': 'volume_type'}} + msg = ("Invalid input received: provider must be defined") + self._encryption_create_bad_body(body=body, msg=msg) + + def test_delete(self): + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + db.volume_type_create(context.get_admin_context(), volume_type) + + # Test that before create, there's nothing with a get + res = self._get_response(volume_type, req_method='GET', + req_headers='application/json', + url='/v2/fake/types/%s/encryption') + self.assertEqual(200, res.status_code) + res_dict = json.loads(res.body) + self.assertEqual({}, res_dict) + + body = {"encryption": {'cipher': 'cipher', + 'key_size': 128, + 'control_location': 'front-end', + 'provider': 'fake_provider', + 'volume_type_id': volume_type['id']}} + + # Create, and test that get returns something + res = self._get_response(volume_type, req_method='POST', + req_body=json.dumps(body), + req_headers='application/json') + res_dict = json.loads(res.body) + + res = self._get_response(volume_type, req_method='GET', + req_headers='application/json', + url='/v2/fake/types/%s/encryption') + self.assertEqual(200, res.status_code) + res_dict = json.loads(res.body) + self.assertEqual(volume_type['id'], res_dict['volume_type_id']) + + # Delete, and test that get returns nothing + res = self._get_response(volume_type, req_method='DELETE', + req_headers='application/json', + url='/v2/fake/types/%s/encryption/provider') + self.assertEqual(202, res.status_code) + self.assertEqual(0, len(res.body)) + res = self._get_response(volume_type, req_method='GET', + req_headers='application/json', + url='/v2/fake/types/%s/encryption') + self.assertEqual(200, res.status_code) + res_dict = json.loads(res.body) + self.assertEqual({}, res_dict) + + db.volume_type_destroy(context.get_admin_context(), volume_type['id']) + + def test_delete_with_volume_in_use(self): + # Create the volume type + volume_type = { + 'id': 'fake_type_id', + 'name': 'fake_type', + } + db.volume_type_create(context.get_admin_context(), volume_type) + + body = {"encryption": {'cipher': 'cipher', + 'key_size': 128, + 'control_location': 'front-end', + 'provider': 'fake_provider', + 'volume_type_id': volume_type['id']}} + + # Create encryption with volume type, and test with GET + res = self._get_response(volume_type, req_method='POST', + req_body=json.dumps(body), + req_headers='application/json') + res = self._get_response(volume_type, req_method='GET', + req_headers='application/json', + url='/v2/fake/types/%s/encryption') + self.assertEqual(200, res.status_code) + res_dict = json.loads(res.body) + self.assertEqual(volume_type['id'], res_dict['volume_type_id']) + + # Create volumes with the volume type + db.volume_create(context.get_admin_context(), + {'id': 'fake_id', + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'creating', + 'instance_uuid': None, + 'host': 'dummy', + 'volume_type_id': volume_type['id']}) + + db.volume_create(context.get_admin_context(), + {'id': 'fake_id2', + 'display_description': 'Test Desc2', + 'size': 2, + 'status': 'creating', + 'instance_uuid': None, + 'host': 'dummy', + 'volume_type_id': volume_type['id']}) + + # Delete, and test that there is an error since volumes exist + res = self._get_response(volume_type, req_method='DELETE', + req_headers='application/json', + url='/v2/fake/types/%s/encryption/provider') + self.assertEqual(400, res.status_code) + res_dict = json.loads(res.body) + expected = { + 'badRequest': { + 'code': 400, + 'message': 'Cannot delete encryption specs. ' + 'Volume type in use.' + } + } + self.assertEqual(expected, res_dict) + + # Delete the volumes + db.volume_destroy(context.get_admin_context(), 'fake_id') + db.volume_destroy(context.get_admin_context(), 'fake_id2') + + # Delete, and test that get returns nothing + res = self._get_response(volume_type, req_method='DELETE', + req_headers='application/json', + url='/v2/fake/types/%s/encryption/provider') + self.assertEqual(202, res.status_code) + self.assertEqual(0, len(res.body)) + res = self._get_response(volume_type, req_method='GET', + req_headers='application/json', + url='/v2/fake/types/%s/encryption') + self.assertEqual(200, res.status_code) + res_dict = json.loads(res.body) + self.assertEqual({}, res_dict) + + db.volume_type_destroy(context.get_admin_context(), volume_type['id']) diff --git a/cinder/tests/api/extensions/__init__.py b/cinder/tests/api/extensions/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/api/openstack/volume/extensions/foxinsocks.py b/cinder/tests/api/extensions/foxinsocks.py similarity index 91% rename from cinder/tests/api/openstack/volume/extensions/foxinsocks.py rename to cinder/tests/api/extensions/foxinsocks.py index 5fcd6a2ba0..fcc9635d33 100644 --- a/cinder/tests/api/openstack/volume/extensions/foxinsocks.py +++ b/cinder/tests/api/extensions/foxinsocks.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,7 +15,7 @@ import webob.exc -from cinder.api.openstack import extensions +from cinder.api import extensions from cinder.api.openstack import wsgi @@ -61,7 +59,7 @@ def show(self, req, resp_obj, id): class Foxinsocks(extensions.ExtensionDescriptor): - """The Fox In Socks Extension""" + """The Fox In Socks Extension.""" name = "Fox In Socks" alias = "FOXNSOX" @@ -74,7 +72,7 @@ def __init__(self, ext_mgr): def get_resources(self): resources = [] resource = extensions.ResourceExtension('foxnsocks', - FoxInSocksController()) + FoxInSocksController()) resources.append(resource) return resources @@ -84,8 +82,7 @@ def get_controller_extensions(self): extension_set = [ (FoxInSocksServerControllerExtension, 'servers'), (FoxInSocksFlavorGooseControllerExtension, 'flavors'), - (FoxInSocksFlavorBandsControllerExtension, 'flavors'), - ] + (FoxInSocksFlavorBandsControllerExtension, 'flavors'), ] for klass, collection in extension_set: controller = klass() ext = extensions.ControllerExtension(self, collection, controller) diff --git a/cinder/tests/api/fakes.py b/cinder/tests/api/fakes.py new file mode 100644 index 0000000000..822363d880 --- /dev/null +++ b/cinder/tests/api/fakes.py @@ -0,0 +1,193 @@ +# Copyright 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +import routes +import webob +import webob.dec +import webob.request + +from cinder.api.middleware import auth +from cinder.api.middleware import fault +from cinder.api.openstack import wsgi as os_wsgi +from cinder.api import urlmap +from cinder.api.v2 import limits +from cinder.api.v2 import router +from cinder.api import versions +from cinder import context +from cinder import exception as exc +from cinder.openstack.common import timeutils +from cinder import wsgi + + +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +FAKE_UUIDS = {} + + +class Context(object): + pass + + +class FakeRouter(wsgi.Router): + def __init__(self, ext_mgr=None): + pass + + @webob.dec.wsgify + def __call__(self, req): + res = webob.Response() + res.status = '200' + res.headers['X-Test-Success'] = 'True' + return res + + +@webob.dec.wsgify +def fake_wsgi(self, req): + return self.application + + +def wsgi_app(inner_app_v2=None, fake_auth=True, fake_auth_context=None, + use_no_auth=False, ext_mgr=None): + if not inner_app_v2: + inner_app_v2 = router.APIRouter(ext_mgr) + + if fake_auth: + if fake_auth_context is not None: + ctxt = fake_auth_context + else: + ctxt = context.RequestContext('fake', 'fake', auth_token=True) + api_v2 = fault.FaultWrapper(auth.InjectContext(ctxt, + inner_app_v2)) + elif use_no_auth: + api_v2 = fault.FaultWrapper(auth.NoAuthMiddleware( + limits.RateLimitingMiddleware(inner_app_v2))) + else: + api_v2 = fault.FaultWrapper(auth.AuthMiddleware( + limits.RateLimitingMiddleware(inner_app_v2))) + + mapper = urlmap.URLMap() + mapper['/v2'] = api_v2 + mapper['/'] = fault.FaultWrapper(versions.Versions()) + return mapper + + +def stub_out_rate_limiting(stubs): + def fake_rate_init(self, app): + # super(limits.RateLimitingMiddleware, self).__init__(app) + self.application = app + + # FIXME(ja): unsure about limits in volumes + # stubs.Set(cinder.api.openstack.compute.limits.RateLimitingMiddleware, + # '__init__', fake_rate_init) + + # stubs.Set(cinder.api.openstack.compute.limits.RateLimitingMiddleware, + # '__call__', fake_wsgi) + + +def stub_out_key_pair_funcs(stubs, have_key_pair=True): + def key_pair(context, user_id): + return [dict(name='key', public_key='public_key')] + + def one_key_pair(context, user_id, name): + if name == 'key': + return dict(name='key', public_key='public_key') + else: + raise exc.KeypairNotFound(user_id=user_id, name=name) + + def no_key_pair(context, user_id): + return [] + + +class FakeToken(object): + id_count = 0 + + def __getitem__(self, key): + return getattr(self, key) + + def __init__(self, **kwargs): + FakeToken.id_count += 1 + self.id = FakeToken.id_count + for k, v in kwargs.iteritems(): + setattr(self, k, v) + + +class FakeRequestContext(context.RequestContext): + def __init__(self, *args, **kwargs): + kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token') + return super(FakeRequestContext, self).__init__(*args, **kwargs) + + +class HTTPRequest(webob.Request): + + @classmethod + def blank(cls, *args, **kwargs): + if args != None: + if args[0].find('v1') == 0: + kwargs['base_url'] = 'http://localhost/v1' + else: + kwargs['base_url'] = 'http://localhost/v2' + + use_admin_context = kwargs.pop('use_admin_context', False) + out = os_wsgi.Request.blank(*args, **kwargs) + out.environ['cinder.context'] = FakeRequestContext( + 'fake_user', + 'fake', + is_admin=use_admin_context) + return out + + +class TestRouter(wsgi.Router): + def __init__(self, controller): + mapper = routes.Mapper() + mapper.resource("test", "tests", + controller=os_wsgi.Resource(controller)) + super(TestRouter, self).__init__(mapper) + + +class FakeAuthDatabase(object): + data = {} + + @staticmethod + def auth_token_get(context, token_hash): + return FakeAuthDatabase.data.get(token_hash, None) + + @staticmethod + def auth_token_create(context, token): + fake_token = FakeToken(created_at=timeutils.utcnow(), **token) + FakeAuthDatabase.data[fake_token.token_hash] = fake_token + FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token + return fake_token + + @staticmethod + def auth_token_destroy(context, token_id): + token = FakeAuthDatabase.data.get('id_%i' % token_id) + if token and token.token_hash in FakeAuthDatabase.data: + del FakeAuthDatabase.data[token.token_hash] + del FakeAuthDatabase.data['id_%i' % token_id] + + +class FakeRateLimiter(object): + def __init__(self, application): + self.application = application + + @webob.dec.wsgify + def __call__(self, req): + return self.application + + +def get_fake_uuid(token=0): + if token not in FAKE_UUIDS: + FAKE_UUIDS[token] = str(uuid.uuid4()) + return FAKE_UUIDS[token] diff --git a/cinder/tests/api/middleware/__init__.py b/cinder/tests/api/middleware/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/api/test_auth.py b/cinder/tests/api/middleware/test_auth.py similarity index 77% rename from cinder/tests/api/test_auth.py rename to cinder/tests/api/middleware/test_auth.py index cfb8b7775c..600fed80de 100644 --- a/cinder/tests/api/test_auth.py +++ b/cinder/tests/api/middleware/test_auth.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 OpenStack, LLC +# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,7 +14,7 @@ import webob -import cinder.api.auth +import cinder.api.middleware.auth from cinder import test @@ -29,7 +29,8 @@ def fake_app(req): return webob.Response() self.context = None - self.middleware = cinder.api.auth.CinderKeystoneContext(fake_app) + self.middleware = (cinder.api.middleware.auth + .CinderKeystoneContext(fake_app)) self.request = webob.Request.blank('/') self.request.headers['X_TENANT_ID'] = 'testtenantid' self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' @@ -39,16 +40,16 @@ def test_no_user_or_user_id(self): self.assertEqual(response.status, '401 Unauthorized') def test_user_only(self): - self.request.headers['X_USER_ID'] = 'testuserid' + self.request.headers['X_USER'] = 'testuser' response = self.request.get_response(self.middleware) self.assertEqual(response.status, '200 OK') - self.assertEqual(self.context.user_id, 'testuserid') + self.assertEqual(self.context.user_id, 'testuser') def test_user_id_only(self): - self.request.headers['X_USER'] = 'testuser' + self.request.headers['X_USER_ID'] = 'testuserid' response = self.request.get_response(self.middleware) self.assertEqual(response.status, '200 OK') - self.assertEqual(self.context.user_id, 'testuser') + self.assertEqual(self.context.user_id, 'testuserid') def test_user_id_trumps_user(self): self.request.headers['X_USER_ID'] = 'testuserid' @@ -56,3 +57,11 @@ def test_user_id_trumps_user(self): response = self.request.get_response(self.middleware) self.assertEqual(response.status, '200 OK') self.assertEqual(self.context.user_id, 'testuserid') + + def test_tenant_id_name(self): + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.headers['X_TENANT_NAME'] = 'testtenantname' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.project_id, 'testtenantid') + self.assertEqual(self.context.project_name, 'testtenantname') diff --git a/cinder/tests/api/openstack/test_faults.py b/cinder/tests/api/middleware/test_faults.py similarity index 70% rename from cinder/tests/api/openstack/test_faults.py rename to cinder/tests/api/middleware/test_faults.py index 33f8300050..83100feca3 100644 --- a/cinder/tests/api/openstack/test_faults.py +++ b/cinder/tests/api/middleware/test_faults.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. +# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,14 +15,17 @@ from xml.dom import minidom -import webob +import gettext +import mock import webob.dec import webob.exc -from cinder import test -from cinder.api.openstack import common +from cinder.api import common from cinder.api.openstack import wsgi +from cinder import exception +from cinder.openstack.common import gettextutils from cinder.openstack.common import jsonutils +from cinder import test class TestFaults(test.TestCase): @@ -69,7 +70,7 @@ def test_413_fault_json(self): for request in requests: exc = webob.exc.HTTPRequestEntityTooLarge fault = wsgi.Fault(exc(explanation='sorry', - headers={'Retry-After': 4})) + headers={'Retry-After': 4})) response = request.get_response(fault) expected = { @@ -94,7 +95,7 @@ def raiser(req): resp = req.get_response(raiser) self.assertEqual(resp.content_type, "application/xml") self.assertEqual(resp.status_int, 404) - self.assertTrue('whut?' in resp.body) + self.assertIn('whut?', resp.body) def test_raise_403(self): """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" @@ -106,23 +107,79 @@ def raiser(req): resp = req.get_response(raiser) self.assertEqual(resp.content_type, "application/xml") self.assertEqual(resp.status_int, 403) - self.assertTrue('resizeNotAllowed' not in resp.body) - self.assertTrue('forbidden' in resp.body) + self.assertNotIn('resizeNotAllowed', resp.body) + self.assertIn('forbidden', resp.body) + + def test_raise_http_with_localized_explanation(self): + params = ('blah', ) + expl = gettextutils.Message("String with params: %s" % params, 'test') + + def _mock_translation(msg, locale): + return "Mensaje traducido" + + self.stubs.Set(gettextutils, + "translate", _mock_translation) + + @webob.dec.wsgify + def raiser(req): + raise wsgi.Fault(webob.exc.HTTPNotFound(explanation=expl)) + + req = webob.Request.blank('/.xml') + resp = req.get_response(raiser) + self.assertEqual(resp.content_type, "application/xml") + self.assertEqual(resp.status_int, 404) + self.assertIn(("Mensaje traducido"), resp.body) + self.stubs.UnsetAll() + + @mock.patch('cinder.openstack.common.gettextutils.gettext.translation') + def test_raise_invalid_with_localized_explanation(self, mock_translation): + msg_template = gettextutils.Message("Invalid input: %(reason)s", "") + reason = gettextutils.Message("Value is invalid", "") + + class MockESTranslations(gettext.GNUTranslations): + def ugettext(self, msgid): + if "Invalid input" in msgid: + return "Entrada invalida: %(reason)s" + elif "Value is invalid" in msgid: + return "El valor es invalido" + return msgid + + def translation(domain, localedir=None, languages=None, fallback=None): + return MockESTranslations() + + mock_translation.side_effect = translation + + @webob.dec.wsgify + def raiser(req): + class MyInvalidInput(exception.InvalidInput): + message = msg_template + + ex = MyInvalidInput(reason=reason) + raise wsgi.Fault(exception.ConvertedException(code=ex.code, + explanation=ex.msg)) + + req = webob.Request.blank("/.json") + resp = req.get_response(raiser) + self.assertEqual(resp.content_type, "application/json") + self.assertEqual(resp.status_int, 400) + # This response was comprised of Message objects from two different + # exceptions, here we are testing that both got translated + self.assertIn("Entrada invalida: El valor es invalido", resp.body) def test_fault_has_status_int(self): - """Ensure the status_int is set correctly on faults""" + """Ensure the status_int is set correctly on faults.""" fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?')) self.assertEqual(fault.status_int, 400) def test_xml_serializer(self): - """Ensure that a v1.1 request responds with a v1 xmlns""" + """Ensure that a v1.1 request responds with a v1 xmlns.""" request = webob.Request.blank('/v1', headers={"Accept": "application/xml"}) fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) response = request.get_response(fault) - self.assertTrue(common.XML_NS_V1 in response.body) + self.assertIn(common.XML_NS_V1, response.body) self.assertEqual(response.content_type, "application/xml") self.assertEqual(response.status_int, 400) diff --git a/cinder/tests/api/middleware/test_sizelimit.py b/cinder/tests/api/middleware/test_sizelimit.py new file mode 100644 index 0000000000..80184d828c --- /dev/null +++ b/cinder/tests/api/middleware/test_sizelimit.py @@ -0,0 +1,103 @@ +# Copyright (c) 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import StringIO + +from oslo.config import cfg +import webob + +from cinder.api.middleware import sizelimit +from cinder import test + + +CONF = cfg.CONF + +MAX_REQUEST_BODY_SIZE = CONF.osapi_max_request_body_size + + +class TestLimitingReader(test.TestCase): + + def test_limiting_reader(self): + BYTES = 1024 + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + for chunk in sizelimit.LimitingReader(data, BYTES): + bytes_read += len(chunk) + + self.assertEqual(bytes_read, BYTES) + + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + reader = sizelimit.LimitingReader(data, BYTES) + byte = reader.read(1) + while len(byte) != 0: + bytes_read += 1 + byte = reader.read(1) + + self.assertEqual(bytes_read, BYTES) + + def test_limiting_reader_fails(self): + BYTES = 1024 + + def _consume_all_iter(): + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + for chunk in sizelimit.LimitingReader(data, BYTES - 1): + bytes_read += len(chunk) + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + _consume_all_iter) + + def _consume_all_read(): + bytes_read = 0 + data = StringIO.StringIO("*" * BYTES) + reader = sizelimit.LimitingReader(data, BYTES - 1) + byte = reader.read(1) + while len(byte) != 0: + bytes_read += 1 + byte = reader.read(1) + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + _consume_all_read) + + +class TestRequestBodySizeLimiter(test.TestCase): + + def setUp(self): + super(TestRequestBodySizeLimiter, self).setUp() + + @webob.dec.wsgify() + def fake_app(req): + return webob.Response(req.body) + + self.middleware = sizelimit.RequestBodySizeLimiter(fake_app) + self.request = webob.Request.blank('/', method='POST') + + def test_content_length_acceptable(self): + self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + self.request.body = "0" * MAX_REQUEST_BODY_SIZE + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 200) + + def test_content_length_too_large(self): + self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + 1 + self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 413) + + def test_request_too_large_no_content_length(self): + self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) + self.request.headers['Content-Length'] = None + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 413) diff --git a/cinder/tests/api/openstack/__init__.py b/cinder/tests/api/openstack/__init__.py index 3be5ce944c..a2c6e2e27d 100644 --- a/cinder/tests/api/openstack/__init__.py +++ b/cinder/tests/api/openstack/__init__.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/api/openstack/fakes.py b/cinder/tests/api/openstack/fakes.py deleted file mode 100644 index b855533973..0000000000 --- a/cinder/tests/api/openstack/fakes.py +++ /dev/null @@ -1,283 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import routes -import webob -import webob.dec -import webob.request - -from cinder.api import auth as api_auth -from cinder.api import openstack as openstack_api -from cinder.api.openstack import auth -from cinder.api.openstack.volume import limits -from cinder.api.openstack import urlmap -from cinder.api.openstack import volume -from cinder.api.openstack.volume import versions -from cinder.api.openstack import wsgi as os_wsgi -from cinder import context -from cinder import exception as exc -from cinder import utils -from cinder import wsgi -from cinder.openstack.common import timeutils - - -FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' -FAKE_UUIDS = {} - - -class Context(object): - pass - - -class FakeRouter(wsgi.Router): - def __init__(self, ext_mgr=None): - pass - - @webob.dec.wsgify - def __call__(self, req): - res = webob.Response() - res.status = '200' - res.headers['X-Test-Success'] = 'True' - return res - - -@webob.dec.wsgify -def fake_wsgi(self, req): - return self.application - - -def wsgi_app(inner_app_v1=None, fake_auth=True, fake_auth_context=None, - use_no_auth=False, ext_mgr=None): - if not inner_app_v1: - inner_app_v1 = volume.APIRouter(ext_mgr) - - if fake_auth: - if fake_auth_context is not None: - ctxt = fake_auth_context - else: - ctxt = context.RequestContext('fake', 'fake', auth_token=True) - api_v1 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt, - inner_app_v1)) - elif use_no_auth: - api_v1 = openstack_api.FaultWrapper(auth.NoAuthMiddleware( - limits.RateLimitingMiddleware(inner_app_v1))) - else: - api_v1 = openstack_api.FaultWrapper(auth.AuthMiddleware( - limits.RateLimitingMiddleware(inner_app_v1))) - - mapper = urlmap.URLMap() - mapper['/v1'] = api_v1 - mapper['/'] = openstack_api.FaultWrapper(versions.Versions()) - return mapper - - -def stub_out_rate_limiting(stubs): - def fake_rate_init(self, app): - # super(limits.RateLimitingMiddleware, self).__init__(app) - self.application = app - - # FIXME(ja): unsure about limits in volumes - # stubs.Set(cinder.api.openstack.compute.limits.RateLimitingMiddleware, - # '__init__', fake_rate_init) - - # stubs.Set(cinder.api.openstack.compute.limits.RateLimitingMiddleware, - # '__call__', fake_wsgi) - - -class FakeToken(object): - id_count = 0 - - def __getitem__(self, key): - return getattr(self, key) - - def __init__(self, **kwargs): - FakeToken.id_count += 1 - self.id = FakeToken.id_count - for k, v in kwargs.iteritems(): - setattr(self, k, v) - - -class FakeRequestContext(context.RequestContext): - def __init__(self, *args, **kwargs): - kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token') - return super(FakeRequestContext, self).__init__(*args, **kwargs) - - -class HTTPRequest(webob.Request): - - @classmethod - def blank(cls, *args, **kwargs): - kwargs['base_url'] = 'http://localhost/v1' - use_admin_context = kwargs.pop('use_admin_context', False) - out = webob.Request.blank(*args, **kwargs) - out.environ['cinder.context'] = FakeRequestContext('fake_user', 'fake', - is_admin=use_admin_context) - return out - - -class TestRouter(wsgi.Router): - def __init__(self, controller): - mapper = routes.Mapper() - mapper.resource("test", "tests", - controller=os_wsgi.Resource(controller)) - super(TestRouter, self).__init__(mapper) - - -class FakeAuthDatabase(object): - data = {} - - @staticmethod - def auth_token_get(context, token_hash): - return FakeAuthDatabase.data.get(token_hash, None) - - @staticmethod - def auth_token_create(context, token): - fake_token = FakeToken(created_at=timeutils.utcnow(), **token) - FakeAuthDatabase.data[fake_token.token_hash] = fake_token - FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token - return fake_token - - @staticmethod - def auth_token_destroy(context, token_id): - token = FakeAuthDatabase.data.get('id_%i' % token_id) - if token and token.token_hash in FakeAuthDatabase.data: - del FakeAuthDatabase.data[token.token_hash] - del FakeAuthDatabase.data['id_%i' % token_id] - - -class FakeRateLimiter(object): - def __init__(self, application): - self.application = application - - @webob.dec.wsgify - def __call__(self, req): - return self.application - - -def get_fake_uuid(token=0): - if not token in FAKE_UUIDS: - FAKE_UUIDS[token] = str(utils.gen_uuid()) - return FAKE_UUIDS[token] - - -def stub_volume(id, **kwargs): - volume = { - 'id': id, - 'user_id': 'fakeuser', - 'project_id': 'fakeproject', - 'host': 'fakehost', - 'size': 1, - 'availability_zone': 'fakeaz', - 'instance_uuid': 'fakeuuid', - 'mountpoint': '/', - 'status': 'fakestatus', - 'attach_status': 'attached', - 'name': 'vol name', - 'display_name': 'displayname', - 'display_description': 'displaydesc', - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'snapshot_id': None, - 'volume_type_id': 'fakevoltype', - 'volume_metadata': [], - 'volume_type': {'name': 'vol_type_name'}} - - volume.update(kwargs) - return volume - - -def stub_volume_create(self, context, size, name, description, snapshot, - **param): - vol = stub_volume('1') - vol['size'] = size - vol['display_name'] = name - vol['display_description'] = description - try: - vol['snapshot_id'] = snapshot['id'] - except (KeyError, TypeError): - vol['snapshot_id'] = None - vol['availability_zone'] = param.get('availability_zone', 'fakeaz') - return vol - - -def stub_volume_create_from_image(self, context, size, name, description, - snapshot, volume_type, metadata, - availability_zone): - vol = stub_volume('1') - vol['status'] = 'creating' - vol['size'] = size - vol['display_name'] = name - vol['display_description'] = description - vol['availability_zone'] = 'cinder' - return vol - - -def stub_volume_update(self, context, *args, **param): - pass - - -def stub_volume_delete(self, context, *args, **param): - pass - - -def stub_volume_get(self, context, volume_id): - return stub_volume(volume_id) - - -def stub_volume_get_notfound(self, context, volume_id): - raise exc.NotFound - - -def stub_volume_get_all(context, search_opts=None): - return [stub_volume(100, project_id='fake'), - stub_volume(101, project_id='superfake'), - stub_volume(102, project_id='superduperfake')] - - -def stub_volume_get_all_by_project(self, context, search_opts=None): - return [stub_volume_get(self, context, '1')] - - -def stub_snapshot(id, **kwargs): - snapshot = { - 'id': id, - 'volume_id': 12, - 'status': 'available', - 'volume_size': 100, - 'created_at': None, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'project_id': 'fake' - } - - snapshot.update(kwargs) - return snapshot - - -def stub_snapshot_get_all(self): - return [stub_snapshot(100, project_id='fake'), - stub_snapshot(101, project_id='superfake'), - stub_snapshot(102, project_id='superduperfake')] - - -def stub_snapshot_get_all_by_project(self, context): - return [stub_snapshot(1)] - - -def stub_snapshot_update(self, context, *args, **param): - pass diff --git a/cinder/tests/api/openstack/test_wsgi.py b/cinder/tests/api/openstack/test_wsgi.py index 8792984f99..fe7a4404e0 100644 --- a/cinder/tests/api/openstack/test_wsgi.py +++ b/cinder/tests/api/openstack/test_wsgi.py @@ -1,19 +1,29 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. import inspect import webob +from cinder.api.openstack import wsgi from cinder import exception from cinder import test -from cinder.api.openstack import wsgi -from cinder.tests.api.openstack import fakes +from cinder.tests.api import fakes class RequestTest(test.TestCase): def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = "" - self.assertEqual(None, request.get_content_type()) + self.assertIsNone(request.get_content_type()) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123', method='POST') @@ -75,6 +85,60 @@ def test_content_type_accept_default(self): result = request.best_match_content_type() self.assertEqual(result, "application/json") + def test_best_match_language(self): + # Test that we are actually invoking language negotiation by webob + request = wsgi.Request.blank('/') + accepted = 'unknown-lang' + request.headers = {'Accept-Language': accepted} + + def fake_best_match(self, offers, default_match=None): + # Match would return None, if requested lang is not found + return None + + self.stubs.SmartSet(request.accept_language, + 'best_match', fake_best_match) + + self.assertIsNone(request.best_match_language()) + # If accept-language is not included or empty, match should be None + request.headers = {'Accept-Language': ''} + self.assertIsNone(request.best_match_language()) + request.headers.pop('Accept-Language') + self.assertIsNone(request.best_match_language()) + + def test_cache_and_retrieve_resources(self): + request = wsgi.Request.blank('/foo') + # Test that trying to retrieve a cached object on + # an empty cache fails gracefully + self.assertIsNone(request.cached_resource()) + self.assertIsNone(request.cached_resource_by_id('r-0')) + + resources = [] + for x in xrange(3): + resources.append({'id': 'r-%s' % x}) + + # Cache an empty list of resources using the default name + request.cache_resource([]) + self.assertEqual({}, request.cached_resource()) + self.assertIsNone(request.cached_resource('r-0')) + # Cache some resources + request.cache_resource(resources[:2]) + # Cache one resource + request.cache_resource(resources[2]) + # Cache a different resource name + other_resource = {'id': 'o-0'} + request.cache_resource(other_resource, name='other-resource') + + self.assertEqual(resources[0], request.cached_resource_by_id('r-0')) + self.assertEqual(resources[1], request.cached_resource_by_id('r-1')) + self.assertEqual(resources[2], request.cached_resource_by_id('r-2')) + self.assertIsNone(request.cached_resource_by_id('r-3')) + self.assertEqual({'r-0': resources[0], + 'r-1': resources[1], + 'r-2': resources[2]}, request.cached_resource()) + self.assertEqual(other_resource, + request.cached_resource_by_id('o-0', + name='other-resource')) + class ActionDispatcherTest(test.TestCase): def test_dispatch(self): @@ -181,6 +245,29 @@ def test_xml_empty(self): self.assertEqual(deserializer.deserialize(xml), as_dict) +class MetadataXMLDeserializerTest(test.TestCase): + def test_xml_meta_parsing_special_character(self): + """Test that when a SaxParser splits a string containing special + characters into multiple childNodes there are no issues extracting + the text. + """ + meta_xml_str = """ + + value&3 + value2 + value1 + + """.strip() + meta_expected = {'key1': 'value1', + 'key2': 'value2', + 'key3': 'value&3'} + meta_deserializer = wsgi.MetadataXMLDeserializer() + document = wsgi.utils.safe_minidom_parse_string(meta_xml_str) + root_node = document.childNodes[0] + meta_extracted = meta_deserializer.extract_metadata(root_node) + self.assertEqual(meta_expected, meta_extracted) + + class ResourceTest(test.TestCase): def test_resource_call(self): class Controller(object): @@ -215,7 +302,7 @@ def index(self, req, pants=None): expected = 'off' self.assertEqual(actual, expected) - def test_get_method_unknown_controller_action(self): + def test_get_method_undefined_controller_action(self): class Controller(object): def index(self, req, pants=None): return pants @@ -320,7 +407,7 @@ def index(self, req, pants=None): request.body = 'foo' content_type, body = resource.get_body(request) - self.assertEqual(content_type, None) + self.assertIsNone(content_type) self.assertEqual(body, '') def test_get_body_no_content_type(self): @@ -335,7 +422,7 @@ def index(self, req, pants=None): request.body = 'foo' content_type, body = resource.get_body(request) - self.assertEqual(content_type, None) + self.assertIsNone(content_type) self.assertEqual(body, '') def test_get_body_no_content_body(self): @@ -351,7 +438,7 @@ def index(self, req, pants=None): request.body = '' content_type, body = resource.get_body(request) - self.assertEqual(content_type, None) + self.assertIsNone(content_type) self.assertEqual(body, '') def test_get_body(self): @@ -441,10 +528,9 @@ def _action_bar(self, req, id, body): extended = ControllerExtended() resource.register_actions(extended) - self.assertEqual({ - 'fooAction': extended._action_foo, - 'barAction': extended._action_bar, - }, resource.wsgi_actions) + self.assertEqual({'fooAction': extended._action_foo, + 'barAction': extended._action_bar, }, + resource.wsgi_actions) def test_register_extensions(self): class Controller(object): @@ -563,7 +649,7 @@ def extension2(req, resp_obj): extensions = [extension1, extension2] response, post = resource.pre_process_extensions(extensions, None, {}) self.assertEqual(called, []) - self.assertEqual(response, None) + self.assertIsNone(response) self.assertEqual(list(post), [extension2, extension1]) def test_pre_process_extensions_generator(self): @@ -590,7 +676,7 @@ def extension2(req): response, post = resource.pre_process_extensions(extensions, None, {}) post = list(post) self.assertEqual(called, ['pre1', 'pre2']) - self.assertEqual(response, None) + self.assertIsNone(response) self.assertEqual(len(post), 2) self.assertTrue(inspect.isgenerator(post[0])) self.assertTrue(inspect.isgenerator(post[1])) @@ -647,7 +733,7 @@ def extension2(req, resp_obj): response = resource.post_process_extensions([extension2, extension1], None, None, {}) self.assertEqual(called, [2, 1]) - self.assertEqual(response, None) + self.assertIsNone(response) def test_post_process_extensions_regular_response(self): class Controller(object): @@ -699,7 +785,7 @@ def extension2(req): None, None, {}) self.assertEqual(called, [2, 1]) - self.assertEqual(response, None) + self.assertIsNone(response) def test_post_process_extensions_generator_response(self): class Controller(object): @@ -765,7 +851,7 @@ def test_del_header(self): robj = wsgi.ResponseObject({}) robj['Header'] = 'foo' del robj['hEADER'] - self.assertFalse('header' in robj.headers) + self.assertNotIn('header', robj.headers) def test_header_isolation(self): robj = wsgi.ResponseObject({}) diff --git a/cinder/tests/api/openstack/volume/contrib/test_admin_actions.py b/cinder/tests/api/openstack/volume/contrib/test_admin_actions.py deleted file mode 100644 index 91b6b73be0..0000000000 --- a/cinder/tests/api/openstack/volume/contrib/test_admin_actions.py +++ /dev/null @@ -1,252 +0,0 @@ -import webob - -from cinder import context -from cinder import db -from cinder import exception -from cinder import test -from cinder.openstack.common import jsonutils -from cinder.tests.api.openstack import fakes - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = fakes.volume.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v1'] = api - return mapper - - -class AdminActionsTest(test.TestCase): - - def setUp(self): - super(AdminActionsTest, self).setUp() - self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') - - def test_reset_status_as_admin(self): - # admin context - ctx = context.RequestContext('admin', 'fake', True) - # current status is available - volume = db.volume_create(ctx, {'status': 'available'}) - req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id']) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - # request status of 'error' - req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) - # attach admin context to request - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - # request is accepted - self.assertEquals(resp.status_int, 202) - volume = db.volume_get(ctx, volume['id']) - # status changed to 'error' - self.assertEquals(volume['status'], 'error') - - def test_reset_status_as_non_admin(self): - # current status is 'error' - volume = db.volume_create(context.get_admin_context(), - {'status': 'error'}) - req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id']) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - # request changing status to available - req.body = jsonutils.dumps({'os-reset_status': {'status': - 'available'}}) - # non-admin context - req.environ['cinder.context'] = context.RequestContext('fake', 'fake') - resp = req.get_response(app()) - # request is not authorized - self.assertEquals(resp.status_int, 403) - volume = db.volume_get(context.get_admin_context(), volume['id']) - # status is still 'error' - self.assertEquals(volume['status'], 'error') - - def test_malformed_reset_status_body(self): - # admin context - ctx = context.RequestContext('admin', 'fake', True) - # current status is available - volume = db.volume_create(ctx, {'status': 'available'}) - req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id']) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - # malformed request body - req.body = jsonutils.dumps({'os-reset_status': {'x-status': 'bad'}}) - # attach admin context to request - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - # bad request - self.assertEquals(resp.status_int, 400) - volume = db.volume_get(ctx, volume['id']) - # status is still 'available' - self.assertEquals(volume['status'], 'available') - - def test_invalid_status_for_volume(self): - # admin context - ctx = context.RequestContext('admin', 'fake', True) - # current status is available - volume = db.volume_create(ctx, {'status': 'available'}) - req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id']) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - # 'invalid' is not a valid status - req.body = jsonutils.dumps({'os-reset_status': {'status': 'invalid'}}) - # attach admin context to request - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - # bad request - self.assertEquals(resp.status_int, 400) - volume = db.volume_get(ctx, volume['id']) - # status is still 'available' - self.assertEquals(volume['status'], 'available') - - def test_reset_status_for_missing_volume(self): - # admin context - ctx = context.RequestContext('admin', 'fake', True) - # missing-volume-id - req = webob.Request.blank('/v1/fake/volumes/%s/action' % - 'missing-volume-id') - req.method = 'POST' - req.headers['content-type'] = 'application/json' - # malformed request body - req.body = jsonutils.dumps({'os-reset_status': {'status': - 'available'}}) - # attach admin context to request - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - # not found - self.assertEquals(resp.status_int, 404) - self.assertRaises(exception.NotFound, db.volume_get, ctx, - 'missing-volume-id') - - def test_reset_attached_status(self): - # admin context - ctx = context.RequestContext('admin', 'fake', True) - # current status is available - volume = db.volume_create(ctx, {'status': 'available', - 'attach_status': 'attached'}) - req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id']) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - # request update attach_status to detached - body = {'os-reset_status': {'status': 'available', - 'attach_status': 'detached'}} - req.body = jsonutils.dumps(body) - # attach admin context to request - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - # request is accepted - self.assertEquals(resp.status_int, 202) - volume = db.volume_get(ctx, volume['id']) - # attach_status changed to 'detached' - self.assertEquals(volume['attach_status'], 'detached') - # status un-modified - self.assertEquals(volume['status'], 'available') - - def test_invalid_reset_attached_status(self): - # admin context - ctx = context.RequestContext('admin', 'fake', True) - # current status is available - volume = db.volume_create(ctx, {'status': 'available', - 'attach_status': 'detached'}) - req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id']) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - # 'invalid' is not a valid attach_status - body = {'os-reset_status': {'status': 'available', - 'attach_status': 'invalid'}} - req.body = jsonutils.dumps(body) - # attach admin context to request - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - # bad request - self.assertEquals(resp.status_int, 400) - volume = db.volume_get(ctx, volume['id']) - # status and attach_status un-modified - self.assertEquals(volume['status'], 'available') - self.assertEquals(volume['attach_status'], 'detached') - - def test_snapshot_reset_status(self): - # admin context - ctx = context.RequestContext('admin', 'fake', True) - # snapshot in 'error_deleting' - volume = db.volume_create(ctx, {}) - snapshot = db.snapshot_create(ctx, {'status': 'error_deleting', - 'volume_id': volume['id']}) - req = webob.Request.blank('/v1/fake/snapshots/%s/action' % - snapshot['id']) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - # request status of 'error' - req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) - # attach admin context to request - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - # request is accepted - self.assertEquals(resp.status_int, 202) - snapshot = db.snapshot_get(ctx, snapshot['id']) - # status changed to 'error' - self.assertEquals(snapshot['status'], 'error') - - def test_invalid_status_for_snapshot(self): - # admin context - ctx = context.RequestContext('admin', 'fake', True) - # snapshot in 'available' - volume = db.volume_create(ctx, {}) - snapshot = db.snapshot_create(ctx, {'status': 'available', - 'volume_id': volume['id']}) - req = webob.Request.blank('/v1/fake/snapshots/%s/action' % - snapshot['id']) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - # 'attaching' is not a valid status for snapshots - req.body = jsonutils.dumps({'os-reset_status': {'status': - 'attaching'}}) - # attach admin context to request - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - # request is accepted - self.assertEquals(resp.status_int, 400) - snapshot = db.snapshot_get(ctx, snapshot['id']) - # status is still 'available' - self.assertEquals(snapshot['status'], 'available') - - def test_force_delete(self): - # admin context - ctx = context.RequestContext('admin', 'fake', True) - # current status is creating - volume = db.volume_create(ctx, {'status': 'creating'}) - req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id']) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - req.body = jsonutils.dumps({'os-force_delete': {}}) - # attach admin context to request - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - # request is accepted - self.assertEquals(resp.status_int, 202) - # volume is deleted - self.assertRaises(exception.NotFound, db.volume_get, ctx, volume['id']) - - def test_force_delete_snapshot(self): - # admin context - ctx = context.RequestContext('admin', 'fake', True) - # current status is creating - volume = db.volume_create(ctx, {'host': 'test'}) - snapshot = db.snapshot_create(ctx, {'status': 'creating', - 'volume_size': 1, - 'volume_id': volume['id']}) - path = '/v1/fake/snapshots/%s/action' % snapshot['id'] - req = webob.Request.blank(path) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - req.body = jsonutils.dumps({'os-force_delete': {}}) - # attach admin context to request - req.environ['cinder.context'] = ctx - # start service to handle rpc.cast for 'delete snapshot' - self.start_service('volume', host='test') - # make request - resp = req.get_response(app()) - # request is accepted - self.assertEquals(resp.status_int, 202) - # snapshot is deleted - self.assertRaises(exception.NotFound, db.snapshot_get, ctx, - snapshot['id']) diff --git a/cinder/tests/api/openstack/volume/contrib/test_volume_actions.py b/cinder/tests/api/openstack/volume/contrib/test_volume_actions.py deleted file mode 100644 index 2448c9f37c..0000000000 --- a/cinder/tests/api/openstack/volume/contrib/test_volume_actions.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright 2012 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import webob - -from cinder import exception -from cinder import flags -from cinder import test -from cinder import utils -from cinder import volume -from cinder.api.openstack.volume.contrib import volume_actions -from cinder.openstack.common import jsonutils -from cinder.openstack.common.rpc import common as rpc_common -from cinder.tests.api.openstack import fakes -from cinder.volume import api as volume_api - - -FLAGS = flags.FLAGS - - -def fake_volume_api(*args, **kwargs): - return True - - -def fake_volume_get(*args, **kwargs): - return {'id': 'fake', 'host': 'fake'} - - -class VolumeActionsTest(test.TestCase): - - _actions = ('os-detach', 'os-reserve', 'os-unreserve') - - _methods = ('attach', 'detach', 'reserve_volume', 'unreserve_volume') - - def setUp(self): - super(VolumeActionsTest, self).setUp() - self.stubs.Set(volume.API, 'get', fake_volume_api) - self.UUID = utils.gen_uuid() - for _method in self._methods: - self.stubs.Set(volume.API, _method, fake_volume_api) - - self.stubs.Set(volume.API, 'get', fake_volume_get) - - def test_simple_api_actions(self): - app = fakes.wsgi_app() - for _action in self._actions: - req = webob.Request.blank('/v1/fake/volumes/%s/action' % - self.UUID) - req.method = 'POST' - req.body = jsonutils.dumps({_action: None}) - req.content_type = 'application/json' - res = req.get_response(app) - self.assertEqual(res.status_int, 202) - - def test_initialize_connection(self): - def fake_initialize_connection(*args, **kwargs): - return {} - self.stubs.Set(volume.API, 'initialize_connection', - fake_initialize_connection) - - body = {'os-initialize_connection': {'connector': 'fake'}} - req = webob.Request.blank('/v1/fake/volumes/1/action') - req.method = "POST" - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - - def test_terminate_connection(self): - def fake_terminate_connection(*args, **kwargs): - return {} - self.stubs.Set(volume.API, 'terminate_connection', - fake_terminate_connection) - - body = {'os-terminate_connection': {'connector': 'fake'}} - req = webob.Request.blank('/v1/fake/volumes/1/action') - req.method = "POST" - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 202) - - def test_attach(self): - body = {'os-attach': {'instance_uuid': 'fake', - 'mountpoint': '/dev/vdc'}} - req = webob.Request.blank('/v1/fake/volumes/1/action') - req.method = "POST" - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 202) - - -def stub_volume_get(self, context, volume_id): - volume = fakes.stub_volume(volume_id) - if volume_id == 5: - volume['status'] = 'in-use' - else: - volume['status'] = 'available' - return volume - - -def stub_upload_volume_to_image_service(self, context, volume, metadata, - force): - ret = {"id": volume['id'], - "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), - "status": 'uploading', - "display_description": volume['display_description'], - "size": volume['size'], - "volume_type": volume['volume_type'], - "image_id": 1, - "container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name'} - return ret - - -class VolumeImageActionsTest(test.TestCase): - def setUp(self): - super(VolumeImageActionsTest, self).setUp() - self.controller = volume_actions.VolumeActionsController() - - self.stubs.Set(volume_api.API, 'get', stub_volume_get) - - def test_copy_volume_to_image(self): - self.stubs.Set(volume_api.API, - "copy_volume_to_image", - stub_upload_volume_to_image_service) - - id = 1 - vol = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": vol} - req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id) - res_dict = self.controller._volume_upload_image(req, id, body) - expected = {'os-volume_upload_image': {'id': id, - 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'status': 'uploading', - 'display_description': 'displaydesc', - 'size': 1, - 'volume_type': {'name': 'vol_type_name'}, - 'image_id': 1, - 'container_format': 'bare', - 'disk_format': 'raw', - 'image_name': 'image_name'}} - self.assertDictMatch(res_dict, expected) - - def test_copy_volume_to_image_volumenotfound(self): - def stub_volume_get_raise_exc(self, context, volume_id): - raise exception.VolumeNotFound(volume_id=volume_id) - - self.stubs.Set(volume_api.API, 'get', stub_volume_get_raise_exc) - - id = 1 - vol = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": vol} - req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id) - self.assertRaises(webob.exc.HTTPNotFound, - self.controller._volume_upload_image, - req, - id, - body) - - def test_copy_volume_to_image_invalidvolume(self): - def stub_upload_volume_to_image_service_raise(self, context, volume, - metadata, force): - raise exception.InvalidVolume - self.stubs.Set(volume_api.API, - "copy_volume_to_image", - stub_upload_volume_to_image_service_raise) - - id = 1 - vol = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": vol} - req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._volume_upload_image, - req, - id, - body) - - def test_copy_volume_to_image_valueerror(self): - def stub_upload_volume_to_image_service_raise(self, context, volume, - metadata, force): - raise ValueError - self.stubs.Set(volume_api.API, - "copy_volume_to_image", - stub_upload_volume_to_image_service_raise) - - id = 1 - vol = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": vol} - req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._volume_upload_image, - req, - id, - body) - - def test_copy_volume_to_image_remoteerror(self): - def stub_upload_volume_to_image_service_raise(self, context, volume, - metadata, force): - raise rpc_common.RemoteError - self.stubs.Set(volume_api.API, - "copy_volume_to_image", - stub_upload_volume_to_image_service_raise) - - id = 1 - vol = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": vol} - req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._volume_upload_image, - req, - id, - body) diff --git a/cinder/tests/api/openstack/volume/test_router.py b/cinder/tests/api/openstack/volume/test_router.py deleted file mode 100644 index 6be12e14d0..0000000000 --- a/cinder/tests/api/openstack/volume/test_router.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2011 Denali Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from cinder.api.openstack import volume -from cinder.api.openstack.volume import snapshots -from cinder.api.openstack.volume import volumes -from cinder.api.openstack.volume import versions -from cinder.api.openstack import wsgi -from cinder import flags -from cinder.openstack.common import log as logging -from cinder import test -from cinder.tests.api.openstack import fakes - -FLAGS = flags.FLAGS - -LOG = logging.getLogger(__name__) - - -class FakeController(object): - def __init__(self, ext_mgr=None): - self.ext_mgr = ext_mgr - - def index(self, req): - return {} - - def detail(self, req): - return {} - - -def create_resource(ext_mgr): - return wsgi.Resource(FakeController(ext_mgr)) - - -class VolumeRouterTestCase(test.TestCase): - def setUp(self): - super(VolumeRouterTestCase, self).setUp() - # NOTE(vish): versions is just returning text so, no need to stub. - self.stubs.Set(snapshots, 'create_resource', create_resource) - self.stubs.Set(volumes, 'create_resource', create_resource) - self.app = volume.APIRouter() - - def test_versions(self): - req = fakes.HTTPRequest.blank('') - req.method = 'GET' - req.content_type = 'application/json' - response = req.get_response(self.app) - self.assertEqual(302, response.status_int) - req = fakes.HTTPRequest.blank('/') - req.method = 'GET' - req.content_type = 'application/json' - response = req.get_response(self.app) - self.assertEqual(200, response.status_int) - - def test_versions_dispatch(self): - req = fakes.HTTPRequest.blank('/') - req.method = 'GET' - req.content_type = 'application/json' - resource = versions.Versions() - result = resource.dispatch(resource.index, req, {}) - self.assertTrue(result) - - def test_volumes(self): - req = fakes.HTTPRequest.blank('/fake/volumes') - req.method = 'GET' - req.content_type = 'application/json' - response = req.get_response(self.app) - self.assertEqual(200, response.status_int) - - def test_volumes_detail(self): - req = fakes.HTTPRequest.blank('/fake/volumes/detail') - req.method = 'GET' - req.content_type = 'application/json' - response = req.get_response(self.app) - self.assertEqual(200, response.status_int) - - def test_types(self): - req = fakes.HTTPRequest.blank('/fake/types') - req.method = 'GET' - req.content_type = 'application/json' - response = req.get_response(self.app) - self.assertEqual(200, response.status_int) - - def test_snapshots(self): - req = fakes.HTTPRequest.blank('/fake/snapshots') - req.method = 'GET' - req.content_type = 'application/json' - response = req.get_response(self.app) - self.assertEqual(200, response.status_int) - - def test_snapshots_detail(self): - req = fakes.HTTPRequest.blank('/fake/snapshots/detail') - req.method = 'GET' - req.content_type = 'application/json' - response = req.get_response(self.app) - self.assertEqual(200, response.status_int) diff --git a/cinder/tests/api/openstack/volume/test_volumes.py b/cinder/tests/api/openstack/volume/test_volumes.py deleted file mode 100644 index f5141f631f..0000000000 --- a/cinder/tests/api/openstack/volume/test_volumes.py +++ /dev/null @@ -1,703 +0,0 @@ -# Copyright 2013 Josh Durgin -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from lxml import etree -import webob - -from cinder.api.openstack.volume import volumes -from cinder import db -from cinder.api.openstack.volume import extensions -from cinder import exception -from cinder import flags -from cinder import test -from cinder.tests.api.openstack import fakes -from cinder.tests.image import fake as fake_image -from cinder.volume import api as volume_api - - -FLAGS = flags.FLAGS -NS = '{http://docs.openstack.org/volume/api/v1}' - -TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001' - - -def stub_snapshot_get(self, context, snapshot_id): - if snapshot_id != TEST_SNAPSHOT_UUID: - raise exception.NotFound - - return { - 'id': snapshot_id, - 'volume_id': 12, - 'status': 'available', - 'volume_size': 100, - 'created_at': None, - 'display_name': 'Default name', - 'display_description': 'Default description', - } - - -class VolumeApiTest(test.TestCase): - def setUp(self): - super(VolumeApiTest, self).setUp() - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - fake_image.stub_out_image_service(self.stubs) - self.controller = volumes.VolumeController(self.ext_mgr) - - self.stubs.Set(db, 'volume_get_all', fakes.stub_volume_get_all) - self.stubs.Set(db, 'volume_get_all_by_project', - fakes.stub_volume_get_all_by_project) - self.stubs.Set(volume_api.API, 'get', fakes.stub_volume_get) - self.stubs.Set(volume_api.API, 'delete', fakes.stub_volume_delete) - - def test_volume_create(self): - self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create) - - vol = {"size": 100, - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "zone1:host1"} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - res_dict = self.controller.create(req, body) - expected = {'volume': {'status': 'fakestatus', - 'display_description': 'Volume Test Desc', - 'availability_zone': 'zone1:host1', - 'display_name': 'Volume Test Name', - 'attachments': [{'device': '/', - 'server_id': 'fakeuuid', - 'id': '1', - 'volume_id': '1'}], - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'metadata': {}, - 'id': '1', - 'created_at': datetime.datetime(1, 1, 1, - 1, 1, 1), - 'size': 100}} - self.assertEqual(res_dict, expected) - - def test_volume_creation_fails_with_bad_size(self): - vol = {"size": '', - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "zone1:host1"} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - self.assertRaises(exception.InvalidInput, - self.controller.create, - req, - body) - - def test_volume_create_with_image_id(self): - self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = {"size": '1', - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "nova", - "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'} - expected = {'volume': {'status': 'fakestatus', - 'display_description': 'Volume Test Desc', - 'availability_zone': 'nova', - 'display_name': 'Volume Test Name', - 'attachments': [{'device': '/', - 'server_id': 'fakeuuid', - 'id': '1', - 'volume_id': '1'}], - 'volume_type': 'vol_type_name', - 'image_id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', - 'snapshot_id': None, - 'metadata': {}, - 'id': '1', - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'size': '1'} - } - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - res_dict = self.controller.create(req, body) - self.assertEqual(res_dict, expected) - - def test_volume_create_with_image_id_and_snapshot_id(self): - self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create) - self.stubs.Set(volume_api.API, "get_snapshot", stub_snapshot_get) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = {"size": '1', - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "cinder", - "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', - "snapshot_id": TEST_SNAPSHOT_UUID} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - def test_volume_create_with_image_id_is_integer(self): - self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = {"size": '1', - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "cinder", - "imageRef": 1234} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - def test_volume_create_with_image_id_not_uuid_format(self): - self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = {"size": '1', - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "cinder", - "imageRef": '12345'} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - def test_volume_update(self): - self.stubs.Set(volume_api.API, "update", fakes.stub_volume_update) - updates = { - "display_name": "Updated Test Name", - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v1/volumes/1') - res_dict = self.controller.update(req, '1', body) - expected = {'volume': { - 'status': 'fakestatus', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'Updated Test Name', - 'attachments': [{ - 'id': '1', - 'volume_id': '1', - 'server_id': 'fakeuuid', - 'device': '/', - }], - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'metadata': {}, - 'id': '1', - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'size': 1, - }} - self.assertEquals(res_dict, expected) - - def test_update_empty_body(self): - body = {} - req = fakes.HTTPRequest.blank('/v1/volumes/1') - self.assertRaises(webob.exc.HTTPUnprocessableEntity, - self.controller.update, - req, '1', body) - - def test_update_invalid_body(self): - body = {'display_name': 'missing top level volume key'} - req = fakes.HTTPRequest.blank('/v1/volumes/1') - self.assertRaises(webob.exc.HTTPUnprocessableEntity, - self.controller.update, - req, '1', body) - - def test_update_not_found(self): - self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound) - updates = { - "display_name": "Updated Test Name", - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v1/volumes/1') - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.update, - req, '1', body) - - def test_volume_list(self): - self.stubs.Set(volume_api.API, 'get_all', - fakes.stub_volume_get_all_by_project) - - req = fakes.HTTPRequest.blank('/v1/volumes') - res_dict = self.controller.index(req) - expected = {'volumes': [{'status': 'fakestatus', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'attachments': [{'device': '/', - 'server_id': 'fakeuuid', - 'id': '1', - 'volume_id': '1'}], - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'metadata': {}, - 'id': '1', - 'created_at': datetime.datetime(1, 1, 1, - 1, 1, 1), - 'size': 1}]} - self.assertEqual(res_dict, expected) - - def test_volume_list_detail(self): - self.stubs.Set(volume_api.API, 'get_all', - fakes.stub_volume_get_all_by_project) - req = fakes.HTTPRequest.blank('/v1/volumes/detail') - res_dict = self.controller.index(req) - expected = {'volumes': [{'status': 'fakestatus', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'attachments': [{'device': '/', - 'server_id': 'fakeuuid', - 'id': '1', - 'volume_id': '1'}], - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'metadata': {}, - 'id': '1', - 'created_at': datetime.datetime(1, 1, 1, - 1, 1, 1), - 'size': 1}]} - self.assertEqual(res_dict, expected) - - def test_volume_list_by_name(self): - def stub_volume_get_all_by_project(context, project_id): - return [ - fakes.stub_volume(1, display_name='vol1'), - fakes.stub_volume(2, display_name='vol2'), - fakes.stub_volume(3, display_name='vol3'), - ] - self.stubs.Set(db, 'volume_get_all_by_project', - stub_volume_get_all_by_project) - - # no display_name filter - req = fakes.HTTPRequest.blank('/v1/volumes') - resp = self.controller.index(req) - self.assertEqual(len(resp['volumes']), 3) - # filter on display_name - req = fakes.HTTPRequest.blank('/v1/volumes?display_name=vol2') - resp = self.controller.index(req) - self.assertEqual(len(resp['volumes']), 1) - self.assertEqual(resp['volumes'][0]['display_name'], 'vol2') - # filter no match - req = fakes.HTTPRequest.blank('/v1/volumes?display_name=vol4') - resp = self.controller.index(req) - self.assertEqual(len(resp['volumes']), 0) - - def test_volume_list_by_status(self): - def stub_volume_get_all_by_project(context, project_id): - return [ - fakes.stub_volume(1, display_name='vol1', status='available'), - fakes.stub_volume(2, display_name='vol2', status='available'), - fakes.stub_volume(3, display_name='vol3', status='in-use'), - ] - self.stubs.Set(db, 'volume_get_all_by_project', - stub_volume_get_all_by_project) - # no status filter - req = fakes.HTTPRequest.blank('/v1/volumes') - resp = self.controller.index(req) - self.assertEqual(len(resp['volumes']), 3) - # single match - req = fakes.HTTPRequest.blank('/v1/volumes?status=in-use') - resp = self.controller.index(req) - self.assertEqual(len(resp['volumes']), 1) - self.assertEqual(resp['volumes'][0]['status'], 'in-use') - # multiple match - req = fakes.HTTPRequest.blank('/v1/volumes?status=available') - resp = self.controller.index(req) - self.assertEqual(len(resp['volumes']), 2) - for volume in resp['volumes']: - self.assertEqual(volume['status'], 'available') - # multiple filters - req = fakes.HTTPRequest.blank('/v1/volumes?status=available&' - 'display_name=vol1') - resp = self.controller.index(req) - self.assertEqual(len(resp['volumes']), 1) - self.assertEqual(resp['volumes'][0]['display_name'], 'vol1') - self.assertEqual(resp['volumes'][0]['status'], 'available') - # no match - req = fakes.HTTPRequest.blank('/v1/volumes?status=in-use&' - 'display_name=vol1') - resp = self.controller.index(req) - self.assertEqual(len(resp['volumes']), 0) - - def test_volume_show(self): - req = fakes.HTTPRequest.blank('/v1/volumes/1') - res_dict = self.controller.show(req, '1') - expected = {'volume': {'status': 'fakestatus', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'attachments': [{'device': '/', - 'server_id': 'fakeuuid', - 'id': '1', - 'volume_id': '1'}], - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'metadata': {}, - 'id': '1', - 'created_at': datetime.datetime(1, 1, 1, - 1, 1, 1), - 'size': 1}} - self.assertEqual(res_dict, expected) - - def test_volume_show_no_attachments(self): - def stub_volume_get(self, context, volume_id): - return fakes.stub_volume(volume_id, attach_status='detached') - - self.stubs.Set(volume_api.API, 'get', stub_volume_get) - - req = fakes.HTTPRequest.blank('/v1/volumes/1') - res_dict = self.controller.show(req, '1') - expected = {'volume': {'status': 'fakestatus', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'attachments': [], - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'metadata': {}, - 'id': '1', - 'created_at': datetime.datetime(1, 1, 1, - 1, 1, 1), - 'size': 1}} - self.assertEqual(res_dict, expected) - - def test_volume_show_no_volume(self): - self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound) - - req = fakes.HTTPRequest.blank('/v1/volumes/1') - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.show, - req, - 1) - - def test_volume_delete(self): - req = fakes.HTTPRequest.blank('/v1/volumes/1') - resp = self.controller.delete(req, 1) - self.assertEqual(resp.status_int, 202) - - def test_volume_delete_no_volume(self): - self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound) - - req = fakes.HTTPRequest.blank('/v1/volumes/1') - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.delete, - req, - 1) - - def test_admin_list_volumes_limited_to_project(self): - req = fakes.HTTPRequest.blank('/v1/fake/volumes', - use_admin_context=True) - res = self.controller.index(req) - - self.assertTrue('volumes' in res) - self.assertEqual(1, len(res['volumes'])) - - def test_admin_list_volumes_all_tenants(self): - req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1', - use_admin_context=True) - res = self.controller.index(req) - self.assertTrue('volumes' in res) - self.assertEqual(3, len(res['volumes'])) - - def test_all_tenants_non_admin_gets_all_tenants(self): - req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1') - res = self.controller.index(req) - self.assertTrue('volumes' in res) - self.assertEqual(1, len(res['volumes'])) - - def test_non_admin_get_by_project(self): - req = fakes.HTTPRequest.blank('/v1/fake/volumes') - res = self.controller.index(req) - self.assertTrue('volumes' in res) - self.assertEqual(1, len(res['volumes'])) - - -class VolumeSerializerTest(test.TestCase): - def _verify_volume_attachment(self, attach, tree): - for attr in ('id', 'volume_id', 'server_id', 'device'): - self.assertEqual(str(attach[attr]), tree.get(attr)) - - def _verify_volume(self, vol, tree): - self.assertEqual(tree.tag, NS + 'volume') - - for attr in ('id', 'status', 'size', 'availability_zone', 'created_at', - 'display_name', 'display_description', 'volume_type', - 'snapshot_id'): - self.assertEqual(str(vol[attr]), tree.get(attr)) - - for child in tree: - print child.tag - self.assertTrue(child.tag in (NS + 'attachments', NS + 'metadata')) - if child.tag == 'attachments': - self.assertEqual(1, len(child)) - self.assertEqual('attachment', child[0].tag) - self._verify_volume_attachment(vol['attachments'][0], child[0]) - elif child.tag == 'metadata': - not_seen = set(vol['metadata'].keys()) - for gr_child in child: - self.assertTrue(gr_child.get("key") in not_seen) - self.assertEqual(str(vol['metadata'][gr_child.get("key")]), - gr_child.text) - not_seen.remove(gr_child.get('key')) - self.assertEqual(0, len(not_seen)) - - def test_volume_show_create_serializer(self): - serializer = volumes.VolumeTemplate() - raw_volume = dict( - id='vol_id', - status='vol_status', - size=1024, - availability_zone='vol_availability', - created_at=datetime.datetime.now(), - attachments=[dict( - id='vol_id', - volume_id='vol_id', - server_id='instance_uuid', - device='/foo')], - display_name='vol_name', - display_description='vol_desc', - volume_type='vol_type', - snapshot_id='snap_id', - metadata=dict( - foo='bar', - baz='quux', - ), - ) - text = serializer.serialize(dict(volume=raw_volume)) - - print text - tree = etree.fromstring(text) - - self._verify_volume(raw_volume, tree) - - def test_volume_index_detail_serializer(self): - serializer = volumes.VolumesTemplate() - raw_volumes = [dict( - id='vol1_id', - status='vol1_status', - size=1024, - availability_zone='vol1_availability', - created_at=datetime.datetime.now(), - attachments=[dict( - id='vol1_id', - volume_id='vol1_id', - server_id='instance_uuid', - device='/foo1')], - display_name='vol1_name', - display_description='vol1_desc', - volume_type='vol1_type', - snapshot_id='snap1_id', - metadata=dict( - foo='vol1_foo', - bar='vol1_bar', - ), - ), - dict( - id='vol2_id', - status='vol2_status', - size=1024, - availability_zone='vol2_availability', - created_at=datetime.datetime.now(), - attachments=[dict( - id='vol2_id', - volume_id='vol2_id', - server_id='instance_uuid', - device='/foo2')], - display_name='vol2_name', - display_description='vol2_desc', - volume_type='vol2_type', - snapshot_id='snap2_id', - metadata=dict( - foo='vol2_foo', - bar='vol2_bar', - ), - )] - text = serializer.serialize(dict(volumes=raw_volumes)) - - print text - tree = etree.fromstring(text) - - self.assertEqual(NS + 'volumes', tree.tag) - self.assertEqual(len(raw_volumes), len(tree)) - for idx, child in enumerate(tree): - self._verify_volume(raw_volumes[idx], child) - - -class TestVolumeCreateRequestXMLDeserializer(test.TestCase): - - def setUp(self): - super(TestVolumeCreateRequestXMLDeserializer, self).setUp() - self.deserializer = volumes.CreateDeserializer() - - def test_minimal_volume(self): - self_request = """ -""" - request = self.deserializer.deserialize(self_request) - expected = { - "volume": { - "size": "1", - }, - } - self.assertEquals(request['body'], expected) - - def test_display_name(self): - self_request = """ -""" - request = self.deserializer.deserialize(self_request) - expected = { - "volume": { - "size": "1", - "display_name": "Volume-xml", - }, - } - self.assertEquals(request['body'], expected) - - def test_display_description(self): - self_request = """ -""" - request = self.deserializer.deserialize(self_request) - expected = { - "volume": { - "size": "1", - "display_name": "Volume-xml", - "display_description": "description", - }, - } - self.assertEquals(request['body'], expected) - - def test_volume_type(self): - self_request = """ -""" - request = self.deserializer.deserialize(self_request) - expected = { - "volume": { - "display_name": "Volume-xml", - "size": "1", - "display_name": "Volume-xml", - "display_description": "description", - "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", - }, - } - self.assertEquals(request['body'], expected) - - def test_availability_zone(self): - self_request = """ -""" - request = self.deserializer.deserialize(self_request) - expected = { - "volume": { - "size": "1", - "display_name": "Volume-xml", - "display_description": "description", - "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", - "availability_zone": "us-east1", - }, - } - self.assertEquals(request['body'], expected) - - def test_metadata(self): - self_request = """ - - work""" - request = self.deserializer.deserialize(self_request) - expected = { - "volume": { - "display_name": "Volume-xml", - "size": "1", - "metadata": { - "Type": "work", - }, - }, - } - self.assertEquals(request['body'], expected) - - def test_full_volume(self): - self_request = """ - - work""" - request = self.deserializer.deserialize(self_request) - expected = { - "volume": { - "size": "1", - "display_name": "Volume-xml", - "display_description": "description", - "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", - "availability_zone": "us-east1", - "metadata": { - "Type": "work", - }, - }, - } - self.assertEquals(request['body'], expected) - - -class VolumesUnprocessableEntityTestCase(test.TestCase): - - """ - Tests of places we throw 422 Unprocessable Entity from - """ - - def setUp(self): - super(VolumesUnprocessableEntityTestCase, self).setUp() - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - self.controller = volumes.VolumeController(self.ext_mgr) - - def _unprocessable_volume_create(self, body): - req = fakes.HTTPRequest.blank('/v2/fake/volumes') - req.method = 'POST' - - self.assertRaises(webob.exc.HTTPUnprocessableEntity, - self.controller.create, req, body) - - def test_create_no_body(self): - self._unprocessable_volume_create(body=None) - - def test_create_missing_volume(self): - body = {'foo': {'a': 'b'}} - self._unprocessable_volume_create(body=body) - - def test_create_malformed_entity(self): - body = {'volume': 'string'} - self._unprocessable_volume_create(body=body) diff --git a/cinder/tests/api/openstack/test_common.py b/cinder/tests/api/test_common.py similarity index 82% rename from cinder/tests/api/openstack/test_common.py rename to cinder/tests/api/test_common.py index 374937db38..12acc9b6bb 100644 --- a/cinder/tests/api/openstack/test_common.py +++ b/cinder/tests/api/test_common.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. +# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -22,8 +20,8 @@ import webob import webob.exc +from cinder.api import common from cinder import test -from cinder.api.openstack import common NS = "{http://docs.openstack.org/compute/api/v1.1}" @@ -31,14 +29,15 @@ class LimiterTest(test.TestCase): - """ - Unit tests for the `cinder.api.openstack.common.limited` method which takes - in a list of items and, depending on the 'offset' and 'limit' GET params, - returns a subset or complete set of the given items. + """Unit tests for the `cinder.api.common.limited` method. + + This method takes in a list of items and, depending on the 'offset' + and 'limit' GET params, returns a subset or complete set of the given + items. """ def setUp(self): - """ Run before each test. """ + """Run before each test.""" super(LimiterTest, self).setUp() self.tiny = range(1) self.small = range(10) @@ -46,7 +45,7 @@ def setUp(self): self.large = range(10000) def test_limiter_offset_zero(self): - """ Test offset key works with 0. """ + """Test offset key works with 0.""" req = webob.Request.blank('/?offset=0') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) @@ -54,7 +53,7 @@ def test_limiter_offset_zero(self): self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_offset_medium(self): - """ Test offset key works with a medium sized number. """ + """Test offset key works with a medium sized number.""" req = webob.Request.blank('/?offset=10') self.assertEqual(common.limited(self.tiny, req), []) self.assertEqual(common.limited(self.small, req), self.small[10:]) @@ -62,7 +61,7 @@ def test_limiter_offset_medium(self): self.assertEqual(common.limited(self.large, req), self.large[10:1010]) def test_limiter_offset_over_max(self): - """ Test offset key works with a number over 1000 (max_limit). """ + """Test offset key works with a number over 1000 (max_limit).""" req = webob.Request.blank('/?offset=1001') self.assertEqual(common.limited(self.tiny, req), []) self.assertEqual(common.limited(self.small, req), []) @@ -71,19 +70,19 @@ def test_limiter_offset_over_max(self): common.limited(self.large, req), self.large[1001:2001]) def test_limiter_offset_blank(self): - """ Test offset key works with a blank offset. """ + """Test offset key works with a blank offset.""" req = webob.Request.blank('/?offset=') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_offset_bad(self): - """ Test offset key works with a BAD offset. """ + """Test offset key works with a BAD offset.""" req = webob.Request.blank(u'/?offset=\u0020aa') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_nothing(self): - """ Test request with no offset or limit """ + """Test request with no offset or limit.""" req = webob.Request.blank('/') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) @@ -91,15 +90,21 @@ def test_limiter_nothing(self): self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_limit_zero(self): - """ Test limit of zero. """ + """Test limit of zero.""" req = webob.Request.blank('/?limit=0') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) self.assertEqual(common.limited(self.medium, req), self.medium) self.assertEqual(common.limited(self.large, req), self.large[:1000]) + def test_limiter_limit_bad(self): + """Test with a bad limit.""" + req = webob.Request.blank(u'/?limit=hello') + self.assertRaises( + webob.exc.HTTPBadRequest, common.limited, self.tiny, req) + def test_limiter_limit_medium(self): - """ Test limit of 10. """ + """Test limit of 10.""" req = webob.Request.blank('/?limit=10') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) @@ -107,7 +112,7 @@ def test_limiter_limit_medium(self): self.assertEqual(common.limited(self.large, req), self.large[:10]) def test_limiter_limit_over_max(self): - """ Test limit of 3000. """ + """Test limit of 3000.""" req = webob.Request.blank('/?limit=3000') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) @@ -115,7 +120,7 @@ def test_limiter_limit_over_max(self): self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_limit_and_offset(self): - """ Test request with both limit and offset. """ + """Test request with both limit and offset.""" items = range(2000) req = webob.Request.blank('/?offset=1&limit=3') self.assertEqual(common.limited(items, req), items[1:4]) @@ -127,7 +132,7 @@ def test_limiter_limit_and_offset(self): self.assertEqual(common.limited(items, req), []) def test_limiter_custom_max_limit(self): - """ Test a max_limit other than 1000. """ + """Test a max_limit other than 1000.""" items = range(2000) req = webob.Request.blank('/?offset=1&limit=3') self.assertEqual( @@ -142,50 +147,56 @@ def test_limiter_custom_max_limit(self): self.assertEqual(common.limited(items, req, max_limit=2000), []) def test_limiter_negative_limit(self): - """ Test a negative limit. """ + """Test a negative limit.""" req = webob.Request.blank('/?limit=-3000') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_negative_offset(self): - """ Test a negative offset. """ + """Test a negative offset.""" req = webob.Request.blank('/?offset=-30') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) class PaginationParamsTest(test.TestCase): - """ - Unit tests for the `cinder.api.openstack.common.get_pagination_params` - method which takes in a request object and returns 'marker' and 'limit' + """Unit tests for `cinder.api.common.get_pagination_params` method. + + This method takes in a request object and returns 'marker' and 'limit' GET params. """ + def test_nonnumerical_limit(self): + """Test nonnumerical limit param.""" + req = webob.Request.blank('/?limit=hello') + self.assertRaises( + webob.exc.HTTPBadRequest, common.get_pagination_params, req) + def test_no_params(self): - """ Test no params. """ + """Test no params.""" req = webob.Request.blank('/') self.assertEqual(common.get_pagination_params(req), {}) def test_valid_marker(self): - """ Test valid marker param. """ + """Test valid marker param.""" req = webob.Request.blank( - '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2') + '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2') self.assertEqual(common.get_pagination_params(req), {'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'}) def test_valid_limit(self): - """ Test valid limit param. """ + """Test valid limit param.""" req = webob.Request.blank('/?limit=10') self.assertEqual(common.get_pagination_params(req), {'limit': 10}) def test_invalid_limit(self): - """ Test invalid limit param. """ + """Test invalid limit param.""" req = webob.Request.blank('/?limit=-2') self.assertRaises( webob.exc.HTTPBadRequest, common.get_pagination_params, req) def test_valid_limit_and_marker(self): - """ Test valid limit and marker parameters. """ + """Test valid limit and marker parameters.""" marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' req = webob.Request.blank('/?limit=20&marker=%s' % marker) self.assertEqual(common.get_pagination_params(req), diff --git a/cinder/tests/api/openstack/volume/test_extensions.py b/cinder/tests/api/test_extensions.py similarity index 71% rename from cinder/tests/api/openstack/volume/test_extensions.py rename to cinder/tests/api/test_extensions.py index 72749ce4c5..64acba42b1 100644 --- a/cinder/tests/api/openstack/volume/test_extensions.py +++ b/cinder/tests/api/test_extensions.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,27 +14,28 @@ # License for the specific language governing permissions and limitations # under the License. - -import webob -from lxml import etree import iso8601 +from lxml import etree +from oslo.config import cfg +import webob -from cinder.api.openstack import volume -from cinder.api.openstack import xmlutil -from cinder import flags +from cinder.api.v1 import router +from cinder.api import xmlutil from cinder.openstack.common import jsonutils from cinder import test -FLAGS = flags.FLAGS + NS = "{http://docs.openstack.org/common/api/v1.0}" +CONF = cfg.CONF + + class ExtensionTestCase(test.TestCase): def setUp(self): super(ExtensionTestCase, self).setUp() - ext_list = FLAGS.osapi_volume_extension[:] - fox = ('cinder.tests.api.openstack.volume.extensions.' - 'foxinsocks.Foxinsocks') + ext_list = CONF.osapi_volume_extension[:] + fox = ('cinder.tests.api.extensions.foxinsocks.Foxinsocks') if fox not in ext_list: ext_list.append(fox) self.flags(osapi_volume_extension=ext_list) @@ -46,14 +45,11 @@ class ExtensionControllerTest(ExtensionTestCase): def setUp(self): super(ExtensionControllerTest, self).setUp() - self.ext_list = [ - "TypesManage", - "TypesExtraSpecs", - ] + self.ext_list = ["TypesManage", "TypesExtraSpecs", ] self.ext_list.sort() def test_list_extensions_json(self): - app = volume.APIRouter() + app = router.APIRouter() request = webob.Request.blank("/fake/extensions") response = request.get_response(app) self.assertEqual(200, response.status_int) @@ -72,15 +68,13 @@ def test_list_extensions_json(self): # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] - self.assertEqual(fox_ext, { - 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0', - 'name': 'Fox In Socks', - 'updated': '2011-01-22T13:25:27-06:00', - 'description': 'The Fox In Socks Extension', - 'alias': 'FOXNSOX', - 'links': [] - }, - ) + self.assertEqual( + fox_ext, {'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0', + 'name': 'Fox In Socks', + 'updated': '2011-01-22T13:25:27-06:00', + 'description': 'The Fox In Socks Extension.', + 'alias': 'FOXNSOX', + 'links': []}, ) for ext in data['extensions']: url = '/fake/extensions/%s' % ext['alias'] @@ -90,28 +84,29 @@ def test_list_extensions_json(self): self.assertEqual(output['extension']['alias'], ext['alias']) def test_get_extension_json(self): - app = volume.APIRouter() + app = router.APIRouter() request = webob.Request.blank("/fake/extensions/FOXNSOX") response = request.get_response(app) self.assertEqual(200, response.status_int) data = jsonutils.loads(response.body) - self.assertEqual(data['extension'], { - "namespace": "http://www.fox.in.socks/api/ext/pie/v1.0", - "name": "Fox In Socks", - "updated": "2011-01-22T13:25:27-06:00", - "description": "The Fox In Socks Extension", - "alias": "FOXNSOX", - "links": []}) + self.assertEqual( + data['extension'], + {"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0", + "name": "Fox In Socks", + "updated": "2011-01-22T13:25:27-06:00", + "description": "The Fox In Socks Extension.", + "alias": "FOXNSOX", + "links": []}) def test_get_non_existing_extension_json(self): - app = volume.APIRouter() + app = router.APIRouter() request = webob.Request.blank("/fake/extensions/4") response = request.get_response(app) self.assertEqual(404, response.status_int) def test_list_extensions_xml(self): - app = volume.APIRouter() + app = router.APIRouter() request = webob.Request.blank("/fake/extensions") request.accept = "application/xml" response = request.get_response(app) @@ -122,21 +117,23 @@ def test_list_extensions_xml(self): # Make sure we have all the extensions, extras extensions being OK. exts = root.findall('{0}extension'.format(NS)) - self.assert_(len(exts) >= len(self.ext_list)) + self.assertGreaterEqual(len(exts), len(self.ext_list)) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX'] self.assertEqual(fox_ext.get('name'), 'Fox In Socks') - self.assertEqual(fox_ext.get('namespace'), + self.assertEqual( + fox_ext.get('namespace'), 'http://www.fox.in.socks/api/ext/pie/v1.0') self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00') - self.assertEqual(fox_ext.findtext('{0}description'.format(NS)), - 'The Fox In Socks Extension') + self.assertEqual( + fox_ext.findtext('{0}description'.format(NS)), + 'The Fox In Socks Extension.') xmlutil.validate_schema(root, 'extensions') def test_get_extension_xml(self): - app = volume.APIRouter() + app = router.APIRouter() request = webob.Request.blank("/fake/extensions/FOXNSOX") request.accept = "application/xml" response = request.get_response(app) @@ -147,10 +144,12 @@ def test_get_extension_xml(self): self.assertEqual(root.tag.split('extension')[0], NS) self.assertEqual(root.get('alias'), 'FOXNSOX') self.assertEqual(root.get('name'), 'Fox In Socks') - self.assertEqual(root.get('namespace'), + self.assertEqual( + root.get('namespace'), 'http://www.fox.in.socks/api/ext/pie/v1.0') self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00') - self.assertEqual(root.findtext('{0}description'.format(NS)), - 'The Fox In Socks Extension') + self.assertEqual( + root.findtext('{0}description'.format(NS)), + 'The Fox In Socks Extension.') xmlutil.validate_schema(root, 'extension') diff --git a/cinder/tests/api/test_router.py b/cinder/tests/api/test_router.py new file mode 100644 index 0000000000..0aebeaacee --- /dev/null +++ b/cinder/tests/api/test_router.py @@ -0,0 +1,259 @@ +# Copyright 2011 Denali Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.api.openstack import wsgi +from cinder.api.v1 import router +from cinder.api.v1 import snapshots +from cinder.api.v1 import volumes +from cinder.api import versions +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests.api import fakes + + +LOG = logging.getLogger(__name__) + + +class FakeController(object): + def __init__(self, ext_mgr=None): + self.ext_mgr = ext_mgr + + def index(self, req): + return {} + + def detail(self, req): + return {} + + +def create_resource(ext_mgr): + return wsgi.Resource(FakeController(ext_mgr)) + + +class VolumeRouterTestCase(test.TestCase): + def setUp(self): + super(VolumeRouterTestCase, self).setUp() + # NOTE(vish): versions is just returning text so, no need to stub. + self.stubs.Set(snapshots, 'create_resource', create_resource) + self.stubs.Set(volumes, 'create_resource', create_resource) + self.app = router.APIRouter() + + def test_versions(self): + req = fakes.HTTPRequest.blank('') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(302, response.status_int) + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_versions_action_args_index(self): + request_environment = {'PATH_INFO': '/'} + resource = versions.Versions() + result = resource.get_action_args(request_environment) + self.assertEqual(result['action'], 'index') + + def test_versions_action_args_multi(self): + request_environment = {'PATH_INFO': '/fake/path'} + resource = versions.Versions() + result = resource.get_action_args(request_environment) + self.assertEqual(result['action'], 'multi') + + def test_versions_get_most_recent_update(self): + res = versions.AtomSerializer() + fake_date_updated = [ + {"updated": '2012-01-04T11:33:21Z'}, + {"updated": '2012-11-21T11:33:21Z'} + ] + result = res._get_most_recent_update(fake_date_updated) + self.assertEqual('2012-11-21T11:33:21Z', result) + + def test_versions_create_version_entry(self): + res = versions.AtomSerializer() + vers = { + "id": "v2.0", + "status": "CURRENT", + "updated": "2012-11-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://jorgew.github.com/block-storage-api/" + "content/os-block-storage-1.0.pdf", + }, + ], + } + fake_result = { + 'id': 'http://jorgew.github.com/block-storage-api/' + 'content/os-block-storage-1.0.pdf', + 'title': 'Version v2.0', + 'updated': '2012-11-21T11:33:21Z', + 'link': { + 'href': 'http://jorgew.github.com/block-storage-api/' + 'content/os-block-storage-1.0.pdf', + 'type': 'application/pdf', + 'rel': 'describedby' + }, + 'content': 'Version v2.0 CURRENT (2012-11-21T11:33:21Z)' + } + result_function = res._create_version_entry(vers) + result = {} + for subElement in result_function: + if subElement.text: + result[subElement.tag] = subElement.text + else: + result[subElement.tag] = subElement.attrib + self.assertEqual(result, fake_result) + + def test_versions_create_feed(self): + res = versions.AtomSerializer() + vers = [ + { + "id": "v2.0", + "status": "CURRENT", + "updated": "2012-11-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://jorgew.github.com/block-storage-api/" + "content/os-block-storage-1.0.pdf", + }, + ], + }, + { + "id": "v1.0", + "status": "CURRENT", + "updated": "2012-01-04T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + } + ] + result = res._create_feed(vers, "fake_feed_title", + "http://jorgew.github.com/block-storage-api/" + "content/os-block-storage-1.0.pdf") + fake_data = { + 'id': 'http://jorgew.github.com/block-storage-api/' + 'content/os-block-storage-1.0.pdf', + 'title': 'fake_feed_title', + 'updated': '2012-11-21T11:33:21Z', + } + data = {} + for subElement in result: + if subElement.text: + data[subElement.tag] = subElement.text + self.assertEqual(data, fake_data) + + def test_versions_multi(self): + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.multi, req, {}) + ids = [v['id'] for v in result['choices']] + self.assertEqual(set(ids), set(['v1.0', 'v2.0'])) + + def test_versions_multi_disable_v1(self): + self.flags(enable_v1_api=False) + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.multi, req, {}) + ids = [v['id'] for v in result['choices']] + self.assertEqual(set(ids), set(['v2.0'])) + + def test_versions_multi_disable_v2(self): + self.flags(enable_v2_api=False) + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.multi, req, {}) + ids = [v['id'] for v in result['choices']] + self.assertEqual(set(ids), set(['v1.0'])) + + def test_versions_index(self): + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.index, req, {}) + ids = [v['id'] for v in result['versions']] + self.assertEqual(set(ids), set(['v1.0', 'v2.0'])) + + def test_versions_index_disable_v1(self): + self.flags(enable_v1_api=False) + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.index, req, {}) + ids = [v['id'] for v in result['versions']] + self.assertEqual(set(ids), set(['v2.0'])) + + def test_versions_index_disable_v2(self): + self.flags(enable_v2_api=False) + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.index, req, {}) + ids = [v['id'] for v in result['versions']] + self.assertEqual(set(ids), set(['v1.0'])) + + def test_volumes(self): + req = fakes.HTTPRequest.blank('/fake/volumes') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_volumes_detail(self): + req = fakes.HTTPRequest.blank('/fake/volumes/detail') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_types(self): + req = fakes.HTTPRequest.blank('/fake/types') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_snapshots(self): + req = fakes.HTTPRequest.blank('/fake/snapshots') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_snapshots_detail(self): + req = fakes.HTTPRequest.blank('/fake/snapshots/detail') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) diff --git a/cinder/tests/api/test_sizelimit.py b/cinder/tests/api/test_sizelimit.py deleted file mode 100644 index 280ee9c29d..0000000000 --- a/cinder/tests/api/test_sizelimit.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2012 OpenStack, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob - -import cinder.api.sizelimit -from cinder import flags -from cinder import test - -FLAGS = flags.FLAGS -MAX_REQUEST_BODY_SIZE = FLAGS.osapi_max_request_body_size - - -class TestRequestBodySizeLimiter(test.TestCase): - - def setUp(self): - super(TestRequestBodySizeLimiter, self).setUp() - - @webob.dec.wsgify() - def fake_app(req): - return webob.Response() - - self.middleware = cinder.api.sizelimit.RequestBodySizeLimiter(fake_app) - self.request = webob.Request.blank('/', method='POST') - - def test_content_length_acceptable(self): - self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE - self.request.body = "0" * MAX_REQUEST_BODY_SIZE - response = self.request.get_response(self.middleware) - self.assertEqual(response.status_int, 200) - - def test_content_length_to_large(self): - self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + 1 - response = self.request.get_response(self.middleware) - self.assertEqual(response.status_int, 400) - - def test_request_to_large(self): - self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) - response = self.request.get_response(self.middleware) - self.assertEqual(response.status_int, 400) diff --git a/cinder/tests/api/test_wsgi.py b/cinder/tests/api/test_wsgi.py index c68f8158eb..fc52fb9a51 100644 --- a/cinder/tests/api/test_wsgi.py +++ b/cinder/tests/api/test_wsgi.py @@ -1,8 +1,6 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. -# Copyright 2010 OpenStack LLC. +# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/api/openstack/test_xmlutil.py b/cinder/tests/api/test_xmlutil.py similarity index 86% rename from cinder/tests/api/openstack/test_xmlutil.py rename to cinder/tests/api/test_xmlutil.py index 7f2a26044e..4c111e78fd 100644 --- a/cinder/tests/api/openstack/test_xmlutil.py +++ b/cinder/tests/api/test_xmlutil.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,22 +15,16 @@ from lxml import etree +from cinder.api import xmlutil from cinder import test -from cinder.api.openstack import xmlutil class SelectorTest(test.TestCase): - obj_for_test = { - 'test': { - 'name': 'test', - 'values': [1, 2, 3], - 'attrs': { - 'foo': 1, - 'bar': 2, - 'baz': 3, - }, - }, - } + obj_for_test = {'test': {'name': 'test', + 'values': [1, 2, 3], + 'attrs': {'foo': 1, + 'bar': 2, + 'baz': 3, }, }, } def test_empty_selector(self): sel = xmlutil.Selector() @@ -70,7 +62,7 @@ def test_items_selector(self): def test_missing_key_selector(self): sel = xmlutil.Selector('test2', 'attrs') - self.assertEqual(sel(self.obj_for_test), None) + self.assertIsNone(sel(self.obj_for_test)) self.assertRaises(KeyError, sel, self.obj_for_test, True) def test_constant_selector(self): @@ -169,7 +161,7 @@ def test_element_subselector_none(self): # Create a template element with no subselector elem = xmlutil.TemplateElement('test') - self.assertEqual(elem.subselector, None) + self.assertIsNone(elem.subselector) def test_element_subselector_string(self): # Create a template element with a string subselector @@ -202,7 +194,7 @@ def test_element_append_child(self): # Verify that the child was added self.assertEqual(len(elem), 1) self.assertEqual(elem[0], child) - self.assertEqual('child' in elem, True) + self.assertIn('child', elem) self.assertEqual(elem['child'], child) # Ensure that multiple children of the same name are rejected @@ -217,11 +209,9 @@ def test_element_extend_children(self): self.assertEqual(len(elem), 0) # Create a few children - children = [ - xmlutil.TemplateElement('child1'), - xmlutil.TemplateElement('child2'), - xmlutil.TemplateElement('child3'), - ] + children = [xmlutil.TemplateElement('child1'), + xmlutil.TemplateElement('child2'), + xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) @@ -230,14 +220,12 @@ def test_element_extend_children(self): self.assertEqual(len(elem), 3) for idx in range(len(elem)): self.assertEqual(children[idx], elem[idx]) - self.assertEqual(children[idx].tag in elem, True) + self.assertIn(children[idx].tag, elem) self.assertEqual(elem[children[idx].tag], children[idx]) # Ensure that multiple children of the same name are rejected - children2 = [ - xmlutil.TemplateElement('child4'), - xmlutil.TemplateElement('child1'), - ] + children2 = [xmlutil.TemplateElement('child4'), + xmlutil.TemplateElement('child1'), ] self.assertRaises(KeyError, elem.extend, children2) # Also ensure that child4 was not added @@ -252,11 +240,9 @@ def test_element_insert_child(self): self.assertEqual(len(elem), 0) # Create a few children - children = [ - xmlutil.TemplateElement('child1'), - xmlutil.TemplateElement('child2'), - xmlutil.TemplateElement('child3'), - ] + children = [xmlutil.TemplateElement('child1'), + xmlutil.TemplateElement('child2'), + xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) @@ -272,7 +258,7 @@ def test_element_insert_child(self): children.insert(1, child) for idx in range(len(elem)): self.assertEqual(children[idx], elem[idx]) - self.assertEqual(children[idx].tag in elem, True) + self.assertIn(children[idx].tag, elem) self.assertEqual(elem[children[idx].tag], children[idx]) # Ensure that multiple children of the same name are rejected @@ -287,11 +273,9 @@ def test_element_remove_child(self): self.assertEqual(len(elem), 0) # Create a few children - children = [ - xmlutil.TemplateElement('child1'), - xmlutil.TemplateElement('child2'), - xmlutil.TemplateElement('child3'), - ] + children = [xmlutil.TemplateElement('child1'), + xmlutil.TemplateElement('child2'), + xmlutil.TemplateElement('child3'), ] # Extend the parent by those children elem.extend(children) @@ -312,7 +296,7 @@ def test_element_remove_child(self): self.assertEqual(len(elem), 2) self.assertEqual(elem[0], children[0]) self.assertEqual(elem[1], children[2]) - self.assertEqual('child2' in elem, False) + self.assertNotIn('child2', elem) # Ensure the child cannot be retrieved by name def get_key(elem, key): @@ -324,7 +308,7 @@ def test_element_text(self): elem = xmlutil.TemplateElement('test') # Ensure that it has no text - self.assertEqual(elem.text, None) + self.assertIsNone(elem.text) # Try setting it to a string and ensure it becomes a selector elem.text = 'test' @@ -334,7 +318,7 @@ def test_element_text(self): # Try resetting the text to None elem.text = None - self.assertEqual(elem.text, None) + self.assertIsNone(elem.text) # Now make up a selector and try setting the text to that sel = xmlutil.Selector() @@ -343,7 +327,7 @@ def test_element_text(self): # Finally, try deleting the text and see what happens del elem.text - self.assertEqual(elem.text, None) + self.assertIsNone(elem.text) def test_apply_attrs(self): # Create a template element @@ -384,10 +368,8 @@ def test__render(self): master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1']) # Create a couple of slave template element - slave_elems = [ - xmlutil.TemplateElement('test', attr2=attrs['attr2']), - xmlutil.TemplateElement('test', attr3=attrs['attr3']), - ] + slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']), + xmlutil.TemplateElement('test', attr3=attrs['attr3']), ] # Try the render elem = master_elem._render(None, None, slave_elems, None) @@ -589,22 +571,13 @@ def test_slave_apply(self): def test__serialize(self): # Our test object to serialize - obj = { - 'test': { - 'name': 'foobar', - 'values': [1, 2, 3, 4], - 'attrs': { - 'a': 1, - 'b': 2, - 'c': 3, - 'd': 4, - }, - 'image': { - 'name': 'image_foobar', - 'id': 42, - }, - }, - } + obj = {'test': {'name': 'foobar', + 'values': [1, 2, 3, 4], + 'attrs': {'a': 1, + 'b': 2, + 'c': 3, + 'd': 4, }, + 'image': {'name': 'image_foobar', 'id': 42, }, }, } # Set up our master template root = xmlutil.TemplateElement('test', selector='test', @@ -652,6 +625,35 @@ def test__serialize(self): str(obj['test']['image']['id'])) self.assertEqual(result[idx].text, obj['test']['image']['name']) + def test_serialize_with_delimiter(self): + # Our test object to serialize + obj = {'test': {'scope0:key1': 'Value1', + 'scope0:scope1:key2': 'Value2', + 'scope0:scope1:scope2:key3': 'Value3' + }} + + # Set up our master template + root = xmlutil.TemplateElement('test', selector='test') + key1 = xmlutil.SubTemplateElement(root, 'scope0:key1', + selector='scope0:key1') + key1.text = xmlutil.Selector() + key2 = xmlutil.SubTemplateElement(root, 'scope0:scope1:key2', + selector='scope0:scope1:key2') + key2.text = xmlutil.Selector() + key3 = xmlutil.SubTemplateElement(root, 'scope0:scope1:scope2:key3', + selector='scope0:scope1:scope2:key3') + key3.text = xmlutil.Selector() + serializer = xmlutil.MasterTemplate(root, 1) + xml_list = [] + xml_list.append("") + xml_list.append("Value1") + xml_list.append("Value2Value3") + xml_list.append("") + expected_xml = ''.join(xml_list) + result = serializer.serialize(obj) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_xml) + class MasterTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): @@ -668,13 +670,13 @@ def construct(self): class TemplateBuilderTest(test.TestCase): def test_master_template_builder(self): # Make sure the template hasn't been built yet - self.assertEqual(MasterTemplateBuilder._tmpl, None) + self.assertIsNone(MasterTemplateBuilder._tmpl) # Now, construct the template tmpl1 = MasterTemplateBuilder() # Make sure that there is a template cached... - self.assertNotEqual(MasterTemplateBuilder._tmpl, None) + self.assertIsNotNone(MasterTemplateBuilder._tmpl) # Make sure it wasn't what was returned... self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1) @@ -693,13 +695,13 @@ def test_master_template_builder(self): def test_slave_template_builder(self): # Make sure the template hasn't been built yet - self.assertEqual(SlaveTemplateBuilder._tmpl, None) + self.assertIsNone(SlaveTemplateBuilder._tmpl) # Now, construct the template tmpl1 = SlaveTemplateBuilder() # Make sure there is a template cached... - self.assertNotEqual(SlaveTemplateBuilder._tmpl, None) + self.assertIsNotNone(SlaveTemplateBuilder._tmpl) # Make sure it was what was returned... self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) diff --git a/cinder/tests/api/v1/__init__.py b/cinder/tests/api/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/api/v1/stubs.py b/cinder/tests/api/v1/stubs.py new file mode 100644 index 0000000000..8f6883061c --- /dev/null +++ b/cinder/tests/api/v1/stubs.py @@ -0,0 +1,137 @@ +# Copyright 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from cinder import exception as exc + +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +FAKE_UUIDS = {} + + +def stub_volume(id, **kwargs): + volume = { + 'id': id, + 'user_id': 'fakeuser', + 'project_id': 'fakeproject', + 'host': 'fakehost', + 'size': 1, + 'availability_zone': 'fakeaz', + 'instance_uuid': 'fakeuuid', + 'attached_host': None, + 'mountpoint': '/', + 'attached_mode': 'rw', + 'status': 'fakestatus', + 'migration_status': None, + 'attach_status': 'attached', + 'bootable': 'false', + 'name': 'vol name', + 'display_name': 'displayname', + 'display_description': 'displaydesc', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'snapshot_id': None, + 'source_volid': None, + 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', + 'volume_metadata': [], + 'volume_type': {'name': 'vol_type_name'}, + 'readonly': 'False'} + + volume.update(kwargs) + return volume + + +def stub_volume_create(self, context, size, name, description, snapshot, + **param): + vol = stub_volume('1') + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + vol['source_volid'] = None + try: + vol['snapshot_id'] = snapshot['id'] + except (KeyError, TypeError): + vol['snapshot_id'] = None + vol['availability_zone'] = param.get('availability_zone', 'fakeaz') + return vol + + +def stub_volume_create_from_image(self, context, size, name, description, + snapshot, volume_type, metadata, + availability_zone): + vol = stub_volume('1') + vol['status'] = 'creating' + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + vol['availability_zone'] = 'cinder' + return vol + + +def stub_volume_update(self, context, *args, **param): + pass + + +def stub_volume_delete(self, context, *args, **param): + pass + + +def stub_volume_get(self, context, volume_id): + return stub_volume(volume_id) + + +def stub_volume_get_notfound(self, context, volume_id): + raise exc.NotFound + + +def stub_volume_get_all(context, search_opts=None): + return [stub_volume(100, project_id='fake'), + stub_volume(101, project_id='superfake'), + stub_volume(102, project_id='superduperfake')] + + +def stub_volume_get_all_by_project(self, context, search_opts=None): + return [stub_volume_get(self, context, '1')] + + +def stub_snapshot(id, **kwargs): + snapshot = {'id': id, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', + 'project_id': 'fake'} + + snapshot.update(kwargs) + return snapshot + + +def stub_snapshot_get_all(self): + return [stub_snapshot(100, project_id='fake'), + stub_snapshot(101, project_id='superfake'), + stub_snapshot(102, project_id='superduperfake')] + + +def stub_snapshot_get_all_by_project(self, context): + return [stub_snapshot(1)] + + +def stub_snapshot_update(self, context, *args, **param): + pass + + +def stub_service_get_all_by_topic(context, topic): + return [{'availability_zone': "zone1:host1", "disabled": 0}] diff --git a/cinder/tests/api/openstack/volume/test_limits.py b/cinder/tests/api/v1/test_limits.py similarity index 82% rename from cinder/tests/api/openstack/volume/test_limits.py rename to cinder/tests/api/v1/test_limits.py index aaa9eb845f..755a053232 100644 --- a/cinder/tests/api/openstack/volume/test_limits.py +++ b/cinder/tests/api/v1/test_limits.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -24,9 +24,9 @@ from lxml import etree import webob -from cinder.api.openstack.volume import limits -from cinder.api.openstack.volume import views -from cinder.api.openstack import xmlutil +from cinder.api.v1 import limits +from cinder.api import views +from cinder.api import xmlutil import cinder.context from cinder.openstack.common import jsonutils from cinder import test @@ -67,9 +67,7 @@ def _get_time(self): class LimitsControllerTest(BaseLimitTestSuite): - """ - Tests for `limits.LimitsController` class. - """ + """Tests for `limits.LimitsController` class.""" def setUp(self): """Run before each test.""" @@ -159,10 +157,8 @@ def test_index_json(self): }, ], - "absolute": { - "maxTotalVolumeGigabytes": 512, - "maxTotalVolumes": 5, - }, + "absolute": {"maxTotalVolumeGigabytes": 512, + "maxTotalVolumes": 5, }, }, } body = jsonutils.loads(response.body) @@ -235,9 +231,7 @@ class TestLimiter(limits.Limiter): class LimitMiddlewareTest(BaseLimitTestSuite): - """ - Tests for the `limits.RateLimitingMiddleware` class. - """ + """Tests for the `limits.RateLimitingMiddleware` class.""" @webob.dec.wsgify def _empty_app(self, request): @@ -272,7 +266,7 @@ def test_limited_request_json(self): response = request.get_response(self.app) self.assertEqual(response.status_int, 413) - self.assertTrue('Retry-After' in response.headers) + self.assertIn('Retry-After', response.headers) retry_after = int(response.headers['Retry-After']) self.assertAlmostEqual(retry_after, 60, 1) @@ -282,7 +276,7 @@ def test_limited_request_json(self): self.assertEqual(value, expected) def test_limited_request_xml(self): - """Test a rate-limited (413) response as XML""" + """Test a rate-limited (413) response as XML.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) @@ -303,15 +297,13 @@ def test_limited_request_xml(self): class LimitTest(BaseLimitTestSuite): - """ - Tests for the `limits.Limit` class. - """ + """Tests for the `limits.Limit` class.""" def test_GET_no_delay(self): """Test a limit handles 1 GET per second.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") - self.assertEqual(None, delay) + self.assertIsNone(delay) self.assertEqual(0, limit.next_request) self.assertEqual(0, limit.last_request) @@ -319,7 +311,7 @@ def test_GET_delay(self): """Test two calls to 1 GET per second limit.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") - self.assertEqual(None, delay) + self.assertIsNone(delay) delay = limit("GET", "/anything") self.assertEqual(1, delay) @@ -329,16 +321,13 @@ def test_GET_delay(self): self.time += 4 delay = limit("GET", "/anything") - self.assertEqual(None, delay) + self.assertIsNone(delay) self.assertEqual(4, limit.next_request) self.assertEqual(4, limit.last_request) class ParseLimitsTest(BaseLimitTestSuite): - """ - Tests for the default limits parser in the in-memory - `limits.Limiter` class. - """ + """Tests for the default limits parser in the `limits.Limiter` class.""" def test_invalid(self): """Test that parse_limits() handles invalid input correctly.""" @@ -372,7 +361,7 @@ def test_multiple_rules(self): '(PUT, /foo*, /foo.*, 10, hour);' '(POST, /bar*, /bar.*, 5, second);' '(Say, /derp*, /derp.*, 1, day)') - except ValueError, e: + except ValueError as e: assert False, str(e) # Make sure the number of returned limits are correct @@ -401,14 +390,14 @@ def test_multiple_rules(self): class LimiterTest(BaseLimitTestSuite): - """ - Tests for the in-memory `limits.Limiter` class. - """ + """Tests for the in-memory `limits.Limiter` class.""" def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() - userlimits = {'user:user3': ''} + userlimits = {'limits.user3': '', + 'limits.user0': '(get, *, .*, 4, minute);' + '(put, *, .*, 2, minute)'} self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) def _check(self, num, verb, url, username=None): @@ -422,23 +411,19 @@ def _check_sum(self, num, verb, url, username=None): return sum(item for item in results if item) def test_no_delay_GET(self): - """ - Simple test to ensure no delay on a single call for a limit verb we - didn"t set. - """ + """no delay on a single call for a limit verb we didn"t set.""" delay = self.limiter.check_for_delay("GET", "/anything") self.assertEqual(delay, (None, None)) def test_no_delay_PUT(self): - """ - Simple test to ensure no delay on a single call for a known limit. - """ + """no delay on a single call for a known limit.""" delay = self.limiter.check_for_delay("PUT", "/anything") self.assertEqual(delay, (None, None)) def test_delay_PUT(self): - """ - Ensure the 11th PUT will result in a delay of 6.0 seconds until + """test delay on 11th put request. + + the 11th PUT will result in a delay of 6.0 seconds until the next request will be granced. """ expected = [None] * 10 + [6.0] @@ -447,9 +432,10 @@ def test_delay_PUT(self): self.assertEqual(expected, results) def test_delay_POST(self): - """ - Ensure the 8th POST will result in a delay of 6.0 seconds until - the next request will be granced. + """test delay of 8th post request. + + Ensure that the 8th POST will result in a delay of 6.0 seconds + until the next request will be granced. """ expected = [None] * 7 results = list(self._check(7, "POST", "/anything")) @@ -457,22 +443,24 @@ def test_delay_POST(self): expected = 60.0 / 7.0 results = self._check_sum(1, "POST", "/anything") - self.failUnlessAlmostEqual(expected, results, 8) + self.assertAlmostEqual(expected, results, 8) def test_delay_GET(self): - """ - Ensure the 11th GET will result in NO delay. - """ + """Ensure the 11th GET will result in NO delay.""" expected = [None] * 11 results = list(self._check(11, "GET", "/anything")) + self.assertEqual(expected, results) + expected = [None] * 4 + [15.0] + results = list(self._check(5, "GET", "/foo", "user0")) self.assertEqual(expected, results) def test_delay_PUT_volumes(self): - """ - Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is still - OK after 5 requests...but then after 11 total requests, PUT limiting - kicks in. + """Test limit of PUT on /volumes. + + Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is + still OK after 5 requests... + but then after 11 total requests, PUT limiting kicks in. """ # First 6 requests on PUT /volumes expected = [None] * 5 + [12.0] @@ -485,7 +473,8 @@ def test_delay_PUT_volumes(self): self.assertEqual(expected, results) def test_delay_PUT_wait(self): - """ + """Test limit on PUT is lifted. + Ensure after hitting the limit and then waiting for the correct amount of time, the limit will be lifted. """ @@ -501,9 +490,7 @@ def test_delay_PUT_wait(self): self.assertEqual(expected, results) def test_multiple_delays(self): - """ - Ensure multiple requests still get a delay. - """ + """Ensure multiple requests still get a delay.""" expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything")) self.assertEqual(expected, results) @@ -514,16 +501,23 @@ def test_multiple_delays(self): results = list(self._check(10, "PUT", "/anything")) self.assertEqual(expected, results) + expected = [None] * 2 + [30.0] * 8 + results = list(self._check(10, "PUT", "/anything", "user0")) + self.assertEqual(expected, results) + def test_user_limit(self): - """ - Test user-specific limits. - """ + """Test user-specific limits.""" self.assertEqual(self.limiter.levels['user3'], []) + self.assertEqual(len(self.limiter.levels['user0']), 2) def test_multiple_users(self): - """ - Tests involving multiple users. - """ + """Tests involving multiple users.""" + + # User0 + expected = [None] * 2 + [30.0] * 8 + results = list(self._check(10, "PUT", "/anything", "user0")) + self.assertEqual(expected, results) + # User1 expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything", "user1")) @@ -553,11 +547,20 @@ def test_multiple_users(self): results = list(self._check(5, "PUT", "/anything", "user2")) self.assertEqual(expected, results) + # User0 again + expected = [28.0] + results = list(self._check(1, "PUT", "/anything", "user0")) + self.assertEqual(expected, results) + + self.time += 28.0 + + expected = [None, 30.0] + results = list(self._check(2, "PUT", "/anything", "user0")) + self.assertEqual(expected, results) + class WsgiLimiterTest(BaseLimitTestSuite): - """ - Tests for `limits.WsgiLimiter` class. - """ + """Tests for `limits.WsgiLimiter` class.""" def setUp(self): """Run before each test.""" @@ -565,13 +568,17 @@ def setUp(self): self.app = limits.WsgiLimiter(TEST_LIMITS) def _request_data(self, verb, path): - """Get data decribing a limit request verb/path.""" + """Get data describing a limit request verb/path.""" return jsonutils.dumps({"verb": verb, "path": path}) def _request(self, verb, url, username=None): - """Make sure that POSTing to the given url causes the given username - to perform the given action. Make the internal rate limiter return - delay and make sure that the WSGI app returns the correct response. + """Assert that POSTing to given url triggers given action. + + Ensure POSTing to the given url causes the given username + to perform the given action. + + Make the internal rate limiter return delay and make sure that the + WSGI app returns the correct response. """ if username: request = webob.Request.blank("/%s" % username) @@ -598,25 +605,25 @@ def test_invalid_methods(self): def test_good_url(self): delay = self._request("GET", "/something") - self.assertEqual(delay, None) + self.assertIsNone(delay) def test_escaping(self): delay = self._request("GET", "/something/jump%20up") - self.assertEqual(delay, None) + self.assertIsNone(delay) def test_response_to_delays(self): delay = self._request("GET", "/delayed") - self.assertEqual(delay, None) + self.assertIsNone(delay) delay = self._request("GET", "/delayed") self.assertEqual(delay, '60.00') def test_response_to_delays_usernames(self): delay = self._request("GET", "/delayed", "user1") - self.assertEqual(delay, None) + self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user2") - self.assertEqual(delay, None) + self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user1") self.assertEqual(delay, '60.00') @@ -626,9 +633,7 @@ def test_response_to_delays_usernames(self): class FakeHttplibSocket(object): - """ - Fake `httplib.HTTPResponse` replacement. - """ + """Fake `httplib.HTTPResponse` replacement.""" def __init__(self, response_string): """Initialize new `FakeHttplibSocket`.""" @@ -640,22 +645,19 @@ def makefile(self, _mode, _other): class FakeHttplibConnection(object): - """ - Fake `httplib.HTTPConnection`. - """ + """Fake `httplib.HTTPConnection`.""" def __init__(self, app, host): - """ - Initialize `FakeHttplibConnection`. - """ + """Initialize `FakeHttplibConnection`.""" self.app = app self.host = host def request(self, method, path, body="", headers=None): - """ - Requests made via this connection actually get translated and routed - into our WSGI app, we then wait for the response and turn it back into - an `httplib.HTTPResponse`. + """Fake method for request. + + Requests made via this connection actually get translated and + routed into our WSGI app, we then wait for the response and turn + it back into an `httplib.HTTPResponse`. """ if not headers: headers = {} @@ -698,7 +700,8 @@ def wire_HTTPConnection_to_WSGI(host, app): """ class HTTPConnectionDecorator(object): """Wraps the real HTTPConnection class so that when you instantiate - the class you might instead get a fake instance.""" + the class you might instead get a fake instance. + """ def __init__(self, wrapped): self.wrapped = wrapped @@ -715,12 +718,11 @@ def __call__(self, connection_host, *args, **kwargs): class WsgiLimiterProxyTest(BaseLimitTestSuite): - """ - Tests for the `limits.WsgiLimiterProxy` class. - """ + """Tests for the `limits.WsgiLimiterProxy` class.""" def setUp(self): - """ + """setUp for test suite. + Do some nifty HTTP/WSGI magic which allows for WSGI to be called directly by something like the `httplib` library. """ @@ -751,6 +753,7 @@ def test_403(self): def tearDown(self): # restore original HTTPConnection object httplib.HTTPConnection = self.oldHTTPConnection + super(WsgiLimiterProxyTest, self).tearDown() class LimitsViewBuilderTest(test.TestCase): @@ -776,26 +779,26 @@ def setUp(self): "injected_file_content_bytes": 5} def test_build_limits(self): - expected_limits = {"limits": { - "rate": [{ - "uri": "*", - "regex": ".*", - "limit": [{"value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "next-available": "2011-07-21T18:17:06Z"}]}, - {"uri": "*/volumes", - "regex": "^/volumes", - "limit": [{"value": 50, - "verb": "POST", - "remaining": 10, - "unit": "DAY", - "next-available": "2011-07-21T18:17:06Z"}]}], - "absolute": {"maxServerMeta": 1, - "maxImageMeta": 1, - "maxPersonality": 5, - "maxPersonalitySize": 5}}} + tdate = "2011-07-21T18:17:06Z" + expected_limits = \ + {"limits": {"rate": [{"uri": "*", + "regex": ".*", + "limit": [{"value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": tdate}]}, + {"uri": "*/volumes", + "regex": "^/volumes", + "limit": [{"value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": tdate}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 5}}} output = self.view_builder.build(self.rate_limits, self.absolute_limits) @@ -827,27 +830,27 @@ def test_index(self): serializer = limits.LimitsTemplate() fixture = { "limits": { - "rate": [{ - "uri": "*", - "regex": ".*", - "limit": [{ - "value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "next-available": "2011-12-15T22:42:45Z"}]}, - {"uri": "*/servers", - "regex": "^/servers", - "limit": [{ - "value": 50, - "verb": "POST", - "remaining": 10, - "unit": "DAY", - "next-available": "2011-12-15T22:42:45Z"}]}], - "absolute": {"maxServerMeta": 1, - "maxImageMeta": 1, - "maxPersonality": 5, - "maxPersonalitySize": 10240}}} + "rate": [{ + "uri": "*", + "regex": ".*", + "limit": [{ + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z"}]}, + {"uri": "*/servers", + "regex": "^/servers", + "limit": [{ + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": "2011-12-15T22:42:45Z"}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 10240}}} output = serializer.serialize(fixture) root = etree.XML(output) @@ -873,8 +876,9 @@ def test_index(self): for j, limit in enumerate(rate_limits): for key in ['verb', 'value', 'remaining', 'unit', 'next-available']: - self.assertEqual(limit.get(key), - str(fixture['limits']['rate'][i]['limit'][j][key])) + self.assertEqual( + limit.get(key), + str(fixture['limits']['rate'][i]['limit'][j][key])) def test_index_no_limits(self): serializer = limits.LimitsTemplate() diff --git a/cinder/tests/api/v1/test_snapshot_metadata.py b/cinder/tests/api/v1/test_snapshot_metadata.py new file mode 100644 index 0000000000..427d694eba --- /dev/null +++ b/cinder/tests/api/v1/test_snapshot_metadata.py @@ -0,0 +1,555 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo.config import cfg +import webob + +from cinder.api import extensions +from cinder.api.v1 import snapshot_metadata +from cinder.api.v1 import snapshots +import cinder.db +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes + + +CONF = cfg.CONF + + +def return_create_snapshot_metadata_max(context, + snapshot_id, + metadata, + delete): + return stub_max_snapshot_metadata() + + +def return_create_snapshot_metadata(context, snapshot_id, metadata, delete): + return stub_snapshot_metadata() + + +def return_create_snapshot_metadata_insensitive(context, snapshot_id, + metadata, delete): + return stub_snapshot_metadata_insensitive() + + +def return_new_snapshot_metadata(context, snapshot_id, metadata, delete): + return stub_new_snapshot_metadata() + + +def return_snapshot_metadata(context, snapshot_id): + if not isinstance(snapshot_id, str) or not len(snapshot_id) == 36: + msg = 'id %s must be a uuid in return snapshot metadata' % snapshot_id + raise Exception(msg) + return stub_snapshot_metadata() + + +def return_empty_snapshot_metadata(context, snapshot_id): + return {} + + +def return_empty_container_metadata(context, snapshot_id, metadata, delete): + return {} + + +def delete_snapshot_metadata(context, snapshot_id, key): + pass + + +def stub_snapshot_metadata(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + return metadata + + +def stub_snapshot_metadata_insensitive(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "KEY4": "value4", + } + return metadata + + +def stub_new_snapshot_metadata(): + metadata = { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + } + return metadata + + +def stub_max_snapshot_metadata(): + metadata = {"metadata": {}} + for num in range(CONF.quota_metadata_items): + metadata['metadata']['key%i' % num] = "blah" + return metadata + + +def return_snapshot(context, snapshot_id): + return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', + 'name': 'fake', + 'status': 'available', + 'metadata': {}} + + +def return_volume(context, volume_id): + return {'id': 'fake-vol-id', + 'size': 100, + 'name': 'fake', + 'host': 'fake-host', + 'status': 'available', + 'encryption_key_id': None, + 'volume_type_id': None, + 'migration_status': None, + 'metadata': {}} + + +def return_snapshot_nonexistent(context, snapshot_id): + raise exception.SnapshotNotFound('bogus test message') + + +def fake_update_snapshot_metadata(self, context, snapshot, diff): + pass + + +class SnapshotMetaDataTest(test.TestCase): + + def setUp(self): + super(SnapshotMetaDataTest, self).setUp() + self.volume_api = cinder.volume.api.API() + fakes.stub_out_key_pair_funcs(self.stubs) + self.stubs.Set(cinder.db, 'volume_get', return_volume) + self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot) + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + + self.stubs.Set(self.volume_api, 'update_snapshot_metadata', + fake_update_snapshot_metadata) + + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr) + self.controller = snapshot_metadata.Controller() + self.req_id = str(uuid.uuid4()) + self.url = '/v1/fake/snapshots/%s/metadata' % self.req_id + + snap = {"volume_size": 100, + "volume_id": "fake-vol-id", + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "host": "fake-host", + "metadata": {}} + body = {"snapshot": snap} + req = fakes.HTTPRequest.blank('/v1/snapshots') + self.snapshot_controller.create(req, body) + + def test_index(self): + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.req_id) + + expected = { + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + }, + } + self.assertEqual(expected, res_dict) + + def test_index_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.index, req, self.url) + + def test_index_no_data(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.req_id) + expected = {'metadata': {}} + self.assertEqual(expected, res_dict) + + def test_show(self): + req = fakes.HTTPRequest.blank(self.url + '/key2') + res_dict = self.controller.show(req, self.req_id, 'key2') + expected = {'meta': {'key2': 'value2'}} + self.assertEqual(expected, res_dict) + + def test_show_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key2') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.req_id, 'key2') + + def test_show_meta_not_found(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.req_id, 'key6') + + def test_delete(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_delete', + delete_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key2') + req.method = 'DELETE' + res = self.controller.delete(req, self.req_id, 'key2') + + self.assertEqual(200, res.status_int) + + def test_delete_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.req_id, 'key1') + + def test_delete_meta_not_found(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.req_id, 'key6') + + def test_create(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + + req = fakes.HTTPRequest.blank('/v1/snapshot_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key1": "value1", + "key2": "value2", + "key3": "value3"}} + req.body = jsonutils.dumps(body) + res_dict = self.controller.create(req, self.req_id, body) + self.assertEqual(body, res_dict) + + def test_create_with_keys_in_uppercase_and_lowercase(self): + # if the keys in uppercase_and_lowercase, should return the one + # which server added + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata_insensitive) + + req = fakes.HTTPRequest.blank('/v1/snapshot_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key1": "value1", + "KEY1": "value1", + "key2": "value2", + "KEY2": "value2", + "key3": "value3", + "KEY4": "value4"}} + expected = {"metadata": {"key1": "value1", + "key2": "value2", + "key3": "value3", + "KEY4": "value4"}} + req.body = jsonutils.dumps(body) + res_dict = self.controller.create(req, self.req_id, body) + self.assertEqual(expected, res_dict) + + def test_create_empty_body(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, None) + + def test_create_item_empty_key(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, body) + + def test_create_item_key_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, self.req_id, body) + + def test_create_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + + req = fakes.HTTPRequest.blank('/v1/snapshot_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key9": "value9"}} + req.body = jsonutils.dumps(body) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.create, req, self.req_id, body) + + def test_update_all(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_create_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_new_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_with_keys_in_uppercase_and_lowercase(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_create_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_new_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + body = { + 'metadata': { + 'key10': 'value10', + 'KEY10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, body) + + self.assertEqual(expected, res_dict) + + def test_update_all_empty_container(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_empty_container_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': {}} + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_malformed_container(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'meta': {}} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.req_id, + expected) + + def test_update_all_malformed_data(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': ['asdf']} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.req_id, + expected) + + def test_update_all_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + body = {'metadata': {'key10': 'value10'}} + req.body = jsonutils.dumps(body) + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update_all, req, '100', body) + + def test_update_item(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + res_dict = self.controller.update(req, self.req_id, 'key1', body) + expected = {'meta': {'key1': 'value1'}} + self.assertEqual(expected, res_dict) + + def test_update_item_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank( + '/v1.1/fake/snapshots/asdf/metadata/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, req, self.req_id, 'key1', + body) + + def test_update_item_empty_body(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'key1', + None) + + def test_update_item_empty_key(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, '', body) + + def test_update_item_key_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.req_id, ("a" * 260), body) + + def test_update_item_value_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": ("a" * 260)}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.req_id, "key1", body) + + def test_update_item_too_many_keys(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1", "key2": "value2"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'key1', + body) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/bad') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'bad', + body) + + def test_invalid_metadata_items_on_create(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + #test for long key + data = {"metadata": {"a" * 260: "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.req_id, data) + + #test for long value + data = {"metadata": {"key": "v" * 260}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.req_id, data) + + #test for empty key. + data = {"metadata": {"": "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, data) diff --git a/cinder/tests/api/openstack/volume/test_snapshots.py b/cinder/tests/api/v1/test_snapshots.py similarity index 70% rename from cinder/tests/api/openstack/volume/test_snapshots.py rename to cinder/tests/api/v1/test_snapshots.py index 6e32ea14e9..c5b0b8c287 100644 --- a/cinder/tests/api/openstack/volume/test_snapshots.py +++ b/cinder/tests/api/v1/test_snapshots.py @@ -18,17 +18,16 @@ from lxml import etree import webob -from cinder.api.openstack.volume import snapshots +from cinder.api.v1 import snapshots from cinder import db from cinder import exception -from cinder import flags from cinder.openstack.common import log as logging from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v1 import stubs from cinder import volume -from cinder.tests.api.openstack import fakes -FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) UUID = '00000000-0000-0000-0000-000000000001' @@ -36,22 +35,23 @@ def _get_default_snapshot_param(): - return { - 'id': UUID, - 'volume_id': 12, - 'status': 'available', - 'volume_size': 100, - 'created_at': None, - 'display_name': 'Default name', - 'display_description': 'Default description', - } + return {'id': UUID, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', } -def stub_snapshot_create(self, context, volume_id, name, description): +def stub_snapshot_create(self, context, + volume_id, name, + description, metadata): snapshot = _get_default_snapshot_param() snapshot['volume_id'] = volume_id snapshot['display_name'] = name snapshot['display_description'] = description + snapshot['metadata'] = metadata return snapshot @@ -79,49 +79,50 @@ def setUp(self): self.controller = snapshots.SnapshotsController() self.stubs.Set(db, 'snapshot_get_all_by_project', - fakes.stub_snapshot_get_all_by_project) + stubs.stub_snapshot_get_all_by_project) self.stubs.Set(db, 'snapshot_get_all', - fakes.stub_snapshot_get_all) + stubs.stub_snapshot_get_all) def test_snapshot_create(self): self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create) - self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get) + self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get) snapshot = {"volume_id": '12', - "force": False, - "display_name": "Snapshot Test Name", - "display_description": "Snapshot Test Desc"} + "force": False, + "display_name": "Snapshot Test Name", + "display_description": "Snapshot Test Desc"} body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v1/snapshots') resp_dict = self.controller.create(req, body) - self.assertTrue('snapshot' in resp_dict) + self.assertIn('snapshot', resp_dict) self.assertEqual(resp_dict['snapshot']['display_name'], - snapshot['display_name']) + snapshot['display_name']) self.assertEqual(resp_dict['snapshot']['display_description'], - snapshot['display_description']) + snapshot['display_description']) def test_snapshot_create_force(self): - self.stubs.Set(volume.api.API, "create_snapshot_force", - stub_snapshot_create) - self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get) + self.stubs.Set(volume.api.API, + "create_snapshot_force", + stub_snapshot_create) + self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get) snapshot = {"volume_id": '12', - "force": True, - "display_name": "Snapshot Test Name", - "display_description": "Snapshot Test Desc"} + "force": True, + "display_name": "Snapshot Test Name", + "display_description": "Snapshot Test Desc"} body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v1/snapshots') resp_dict = self.controller.create(req, body) - self.assertTrue('snapshot' in resp_dict) + self.assertIn('snapshot', resp_dict) self.assertEqual(resp_dict['snapshot']['display_name'], - snapshot['display_name']) + snapshot['display_name']) self.assertEqual(resp_dict['snapshot']['display_description'], - snapshot['display_description']) + snapshot['display_description']) snapshot = {"volume_id": "12", - "force": "**&&^^%%$$##@@", - "display_name": "Snapshot Test Name", - "display_description": "Snapshot Test Desc"} + "force": "**&&^^%%$$##@@", + "display_name": "Snapshot Test Name", + "display_description": "Snapshot Test Desc"} body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v1/snapshots') self.assertRaises(exception.InvalidParameterValue, @@ -129,13 +130,25 @@ def test_snapshot_create_force(self): req, body) + def test_snapshot_create_without_volume_id(self): + snapshot_name = 'Snapshot Test Name' + snapshot_description = 'Snapshot Test Desc' + body = { + "snapshot": { + "force": True, + "name": snapshot_name, + "description": snapshot_description + } + } + req = fakes.HTTPRequest.blank('/v1/snapshots') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body) + def test_snapshot_update(self): self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) self.stubs.Set(volume.api.API, "update_snapshot", - fakes.stub_snapshot_update) - updates = { - "display_name": "Updated Test Name", - } + stubs.stub_snapshot_update) + updates = {"display_name": "Updated Test Name", } body = {"snapshot": updates} req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) res_dict = self.controller.update(req, UUID, body) @@ -147,8 +160,9 @@ def test_snapshot_update(self): 'created_at': None, 'display_name': 'Updated Test Name', 'display_description': 'Default description', + 'metadata': {}, }} - self.assertEquals(expected, res_dict) + self.assertEqual(expected, res_dict) def test_snapshot_update_missing_body(self): body = {} @@ -195,7 +209,7 @@ def test_snapshot_show(self): req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) resp_dict = self.controller.show(req, UUID) - self.assertTrue('snapshot' in resp_dict) + self.assertIn('snapshot', resp_dict) self.assertEqual(resp_dict['snapshot']['id'], UUID) def test_snapshot_show_invalid_id(self): @@ -207,12 +221,13 @@ def test_snapshot_show_invalid_id(self): snapshot_id) def test_snapshot_detail(self): - self.stubs.Set(volume.api.API, "get_all_snapshots", - stub_snapshot_get_all) + self.stubs.Set(volume.api.API, + "get_all_snapshots", + stub_snapshot_get_all) req = fakes.HTTPRequest.blank('/v1/snapshots/detail') resp_dict = self.controller.detail(req) - self.assertTrue('snapshots' in resp_dict) + self.assertIn('snapshots', resp_dict) resp_snapshots = resp_dict['snapshots'] self.assertEqual(len(resp_snapshots), 1) @@ -222,11 +237,11 @@ def test_snapshot_detail(self): def test_snapshot_list_by_status(self): def stub_snapshot_get_all_by_project(context, project_id): return [ - fakes.stub_snapshot(1, display_name='backup1', + stubs.stub_snapshot(1, display_name='backup1', status='available'), - fakes.stub_snapshot(2, display_name='backup2', + stubs.stub_snapshot(2, display_name='backup2', status='available'), - fakes.stub_snapshot(3, display_name='backup3', + stubs.stub_snapshot(3, display_name='backup3', status='creating'), ] self.stubs.Set(db, 'snapshot_get_all_by_project', @@ -246,7 +261,7 @@ def stub_snapshot_get_all_by_project(context, project_id): resp = self.controller.index(req) self.assertEqual(len(resp['snapshots']), 2) for snapshot in resp['snapshots']: - self.assertEquals(snapshot['status'], 'available') + self.assertEqual(snapshot['status'], 'available') # no match req = fakes.HTTPRequest.blank('/v1/snapshots?status=error') resp = self.controller.index(req) @@ -255,9 +270,9 @@ def stub_snapshot_get_all_by_project(context, project_id): def test_snapshot_list_by_volume(self): def stub_snapshot_get_all_by_project(context, project_id): return [ - fakes.stub_snapshot(1, volume_id='vol1', status='creating'), - fakes.stub_snapshot(2, volume_id='vol1', status='available'), - fakes.stub_snapshot(3, volume_id='vol2', status='available'), + stubs.stub_snapshot(1, volume_id='vol1', status='creating'), + stubs.stub_snapshot(2, volume_id='vol1', status='available'), + stubs.stub_snapshot(3, volume_id='vol2', status='available'), ] self.stubs.Set(db, 'snapshot_get_all_by_project', stub_snapshot_get_all_by_project) @@ -284,9 +299,9 @@ def stub_snapshot_get_all_by_project(context, project_id): def test_snapshot_list_by_name(self): def stub_snapshot_get_all_by_project(context, project_id): return [ - fakes.stub_snapshot(1, display_name='backup1'), - fakes.stub_snapshot(2, display_name='backup2'), - fakes.stub_snapshot(3, display_name='backup3'), + stubs.stub_snapshot(1, display_name='backup1'), + stubs.stub_snapshot(2, display_name='backup2'), + stubs.stub_snapshot(3, display_name='backup3'), ] self.stubs.Set(db, 'snapshot_get_all_by_project', stub_snapshot_get_all_by_project) @@ -299,7 +314,7 @@ def stub_snapshot_get_all_by_project(context, project_id): req = fakes.HTTPRequest.blank('/v1/snapshots?display_name=backup2') resp = self.controller.index(req) self.assertEqual(len(resp['snapshots']), 1) - self.assertEquals(resp['snapshots'][0]['display_name'], 'backup2') + self.assertEqual(resp['snapshots'][0]['display_name'], 'backup2') # filter no match req = fakes.HTTPRequest.blank('/v1/snapshots?display_name=backup4') resp = self.controller.index(req) @@ -310,26 +325,52 @@ def test_admin_list_snapshots_limited_to_project(self): use_admin_context=True) res = self.controller.index(req) - self.assertTrue('snapshots' in res) + self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) + def test_list_snapshots_with_limit_and_offset(self): + def list_snapshots_with_limit_and_offset(is_admin): + def stub_snapshot_get_all_by_project(context, project_id): + return [ + stubs.stub_snapshot(1, display_name='backup1'), + stubs.stub_snapshot(2, display_name='backup2'), + stubs.stub_snapshot(3, display_name='backup3'), + ] + + self.stubs.Set(db, 'snapshot_get_all_by_project', + stub_snapshot_get_all_by_project) + + req = fakes.HTTPRequest.blank('/v1/fake/snapshots?limit=1\ + &offset=1', + use_admin_context=is_admin) + res = self.controller.index(req) + + self.assertIn('snapshots', res) + self.assertEqual(1, len(res['snapshots'])) + self.assertEqual(2, res['snapshots'][0]['id']) + + #admin case + list_snapshots_with_limit_and_offset(is_admin=True) + #non_admin case + list_snapshots_with_limit_and_offset(is_admin=False) + def test_admin_list_snapshots_all_tenants(self): req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1', use_admin_context=True) res = self.controller.index(req) - self.assertTrue('snapshots' in res) + self.assertIn('snapshots', res) self.assertEqual(3, len(res['snapshots'])) def test_all_tenants_non_admin_gets_all_tenants(self): req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1') res = self.controller.index(req) - self.assertTrue('snapshots' in res) + self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) def test_non_admin_get_by_project(self): req = fakes.HTTPRequest.blank('/v1/fake/snapshots') res = self.controller.index(req) - self.assertTrue('snapshots' in res) + self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) @@ -350,38 +391,31 @@ def test_snapshot_show_create_serializer(self): created_at=datetime.datetime.now(), display_name='snap_name', display_description='snap_desc', - volume_id='vol_id', - ) + volume_id='vol_id', ) text = serializer.serialize(dict(snapshot=raw_snapshot)) - print text tree = etree.fromstring(text) self._verify_snapshot(raw_snapshot, tree) def test_snapshot_index_detail_serializer(self): serializer = snapshots.SnapshotsTemplate() - raw_snapshots = [dict( - id='snap1_id', - status='snap1_status', - size=1024, - created_at=datetime.datetime.now(), - display_name='snap1_name', - display_description='snap1_desc', - volume_id='vol1_id', - ), - dict( - id='snap2_id', - status='snap2_status', - size=1024, - created_at=datetime.datetime.now(), - display_name='snap2_name', - display_description='snap2_desc', - volume_id='vol2_id', - )] + raw_snapshots = [dict(id='snap1_id', + status='snap1_status', + size=1024, + created_at=datetime.datetime.now(), + display_name='snap1_name', + display_description='snap1_desc', + volume_id='vol1_id', ), + dict(id='snap2_id', + status='snap2_status', + size=1024, + created_at=datetime.datetime.now(), + display_name='snap2_name', + display_description='snap2_desc', + volume_id='vol2_id', )] text = serializer.serialize(dict(snapshots=raw_snapshots)) - print text tree = etree.fromstring(text) self.assertEqual('snapshots', tree.tag) @@ -392,9 +426,7 @@ def test_snapshot_index_detail_serializer(self): class SnapshotsUnprocessableEntityTestCase(test.TestCase): - """ - Tests of places we throw 422 Unprocessable Entity from - """ + """Tests of places we throw 422 Unprocessable Entity.""" def setUp(self): super(SnapshotsUnprocessableEntityTestCase, self).setUp() diff --git a/cinder/tests/api/openstack/volume/test_types.py b/cinder/tests/api/v1/test_types.py similarity index 92% rename from cinder/tests/api/openstack/volume/test_types.py rename to cinder/tests/api/v1/test_types.py index 2ad26de2e0..3f72989de7 100644 --- a/cinder/tests/api/openstack/volume/test_types.py +++ b/cinder/tests/api/v1/test_types.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # aLL Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,22 +16,21 @@ from lxml import etree import webob -from cinder.api.openstack.volume.views import types as views_types -from cinder.api.openstack.volume import types +from cinder.api.v1 import types +from cinder.api.views import types as views_types from cinder import exception from cinder.openstack.common import timeutils from cinder import test +from cinder.tests.api import fakes from cinder.volume import volume_types -from cinder.tests.api.openstack import fakes def stub_volume_type(id): - specs = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5"} + specs = {"key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs) @@ -120,7 +119,7 @@ def test_view_builder_show(self): request = fakes.HTTPRequest.blank("/v1") output = view_builder.show(request, raw_volume_type) - self.assertTrue('volume_type' in output) + self.assertIn('volume_type', output) expected_volume_type = dict(name='new_type', extra_specs={}, id=42) @@ -143,7 +142,7 @@ def test_view_builder_list(self): request = fakes.HTTPRequest.blank("/v1") output = view_builder.index(request, raw_volume_types) - self.assertTrue('volume_types' in output) + self.assertIn('volume_types', output) for i in range(0, 10): expected_volume_type = dict(name='new_type', extra_specs={}, @@ -162,7 +161,7 @@ def _verify_volume_type(self, vtype, tree): self.assertEqual('extra_specs', extra_specs.tag) seen = set(vtype['extra_specs'].keys()) for child in extra_specs: - self.assertTrue(child.tag in seen) + self.assertIn(child.tag, seen) self.assertEqual(vtype['extra_specs'][child.tag], child.text) seen.remove(child.tag) self.assertEqual(len(seen), 0) @@ -180,7 +179,7 @@ def test_index_serializer(self): self.assertEqual(len(vtypes), len(tree)) for child in tree: name = child.get('name') - self.assertTrue(name in vtypes) + self.assertIn(name, vtypes) self._verify_volume_type(vtypes[name], child) def test_voltype_serializer(self): diff --git a/cinder/tests/api/v1/test_volume_metadata.py b/cinder/tests/api/v1/test_volume_metadata.py new file mode 100644 index 0000000000..56d90b3036 --- /dev/null +++ b/cinder/tests/api/v1/test_volume_metadata.py @@ -0,0 +1,536 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo.config import cfg +import webob + +from cinder.api import extensions +from cinder.api.v1 import volume_metadata +from cinder.api.v1 import volumes +import cinder.db +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v1 import stubs + + +CONF = cfg.CONF + + +def return_create_volume_metadata_max(context, volume_id, metadata, delete): + return stub_max_volume_metadata() + + +def return_create_volume_metadata(context, volume_id, metadata, delete): + return stub_volume_metadata() + + +def return_new_volume_metadata(context, volume_id, metadata, delete): + return stub_new_volume_metadata() + + +def return_create_volume_metadata_insensitive(context, snapshot_id, + metadata, delete): + return stub_volume_metadata_insensitive() + + +def return_volume_metadata(context, volume_id): + if not isinstance(volume_id, str) or not len(volume_id) == 36: + msg = 'id %s must be a uuid in return volume metadata' % volume_id + raise Exception(msg) + return stub_volume_metadata() + + +def return_empty_volume_metadata(context, volume_id): + return {} + + +def return_empty_container_metadata(context, volume_id, metadata, delete): + return {} + + +def delete_volume_metadata(context, volume_id, key): + pass + + +def stub_volume_metadata(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + return metadata + + +def stub_new_volume_metadata(): + metadata = { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + } + return metadata + + +def stub_volume_metadata_insensitive(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "KEY4": "value4", + } + return metadata + + +def stub_max_volume_metadata(): + metadata = {"metadata": {}} + for num in range(CONF.quota_metadata_items): + metadata['metadata']['key%i' % num] = "blah" + return metadata + + +def return_volume(context, volume_id): + return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', + 'name': 'fake', + 'metadata': {}} + + +def return_volume_nonexistent(context, volume_id): + raise exception.VolumeNotFound('bogus test message') + + +def fake_update_volume_metadata(self, context, volume, diff): + pass + + +class volumeMetaDataTest(test.TestCase): + + def setUp(self): + super(volumeMetaDataTest, self).setUp() + self.volume_api = cinder.volume.api.API() + fakes.stub_out_key_pair_funcs(self.stubs) + self.stubs.Set(cinder.db, 'volume_get', return_volume) + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_volume_metadata) + self.stubs.Set(cinder.db, 'service_get_all_by_topic', + stubs.stub_service_get_all_by_topic) + + self.stubs.Set(self.volume_api, 'update_volume_metadata', + fake_update_volume_metadata) + + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.volume_controller = volumes.VolumeController(self.ext_mgr) + self.controller = volume_metadata.Controller() + self.req_id = str(uuid.uuid4()) + self.url = '/v1/fake/volumes/%s/metadata' % self.req_id + + vol = {"size": 100, + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "metadata": {}} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + self.volume_controller.create(req, body) + + def test_index(self): + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.req_id) + + expected = { + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + }, + } + self.assertEqual(expected, res_dict) + + def test_index_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.index, req, self.url) + + def test_index_no_data(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_empty_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.req_id) + expected = {'metadata': {}} + self.assertEqual(expected, res_dict) + + def test_show(self): + req = fakes.HTTPRequest.blank(self.url + '/key2') + res_dict = self.controller.show(req, self.req_id, 'key2') + expected = {'meta': {'key2': 'value2'}} + self.assertEqual(expected, res_dict) + + def test_show_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key2') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.req_id, 'key2') + + def test_show_meta_not_found(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_empty_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.req_id, 'key6') + + def test_delete(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_volume_metadata) + self.stubs.Set(cinder.db, 'volume_metadata_delete', + delete_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key2') + req.method = 'DELETE' + res = self.controller.delete(req, self.req_id, 'key2') + + self.assertEqual(200, res.status_int) + + def test_delete_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.req_id, 'key1') + + def test_delete_meta_not_found(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_empty_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.req_id, 'key6') + + def test_create(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_empty_volume_metadata) + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + + req = fakes.HTTPRequest.blank('/v1/volume_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key1": "value1", + "key2": "value2", + "key3": "value3", }} + req.body = jsonutils.dumps(body) + res_dict = self.controller.create(req, self.req_id, body) + self.assertEqual(body, res_dict) + + def test_create_with_keys_in_uppercase_and_lowercase(self): + # if the keys in uppercase_and_lowercase, should return the one + # which server added + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_empty_volume_metadata) + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata_insensitive) + + req = fakes.HTTPRequest.blank('/v1/volume_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key1": "value1", + "KEY1": "value1", + "key2": "value2", + "KEY2": "value2", + "key3": "value3", + "KEY4": "value4"}} + expected = {"metadata": {"key1": "value1", + "key2": "value2", + "key3": "value3", + "KEY4": "value4"}} + req.body = jsonutils.dumps(body) + res_dict = self.controller.create(req, self.req_id, body) + self.assertEqual(expected, res_dict) + + def test_create_empty_body(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, None) + + def test_create_item_empty_key(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, body) + + def test_create_item_key_too_long(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, self.req_id, body) + + def test_create_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_get', + return_volume_nonexistent) + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_volume_metadata) + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + + req = fakes.HTTPRequest.blank('/v1/volume_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key9": "value9"}} + req.body = jsonutils.dumps(body) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.create, req, self.req_id, body) + + def test_update_all(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_new_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_with_keys_in_uppercase_and_lowercase(self): + self.stubs.Set(cinder.db, 'volume_metadata_get', + return_create_volume_metadata) + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_new_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + body = { + 'metadata': { + 'key10': 'value10', + 'KEY10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, body) + + self.assertEqual(expected, res_dict) + + def test_update_all_empty_container(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_empty_container_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': {}} + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_malformed_container(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'meta': {}} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.req_id, + expected) + + def test_update_all_malformed_data(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': ['asdf']} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.req_id, + expected) + + def test_update_all_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_get', return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + body = {'metadata': {'key10': 'value10'}} + req.body = jsonutils.dumps(body) + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update_all, req, '100', body) + + def test_update_item(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + res_dict = self.controller.update(req, self.req_id, 'key1', body) + expected = {'meta': {'key1': 'value1'}} + self.assertEqual(expected, res_dict) + + def test_update_item_nonexistent_volume(self): + self.stubs.Set(cinder.db, 'volume_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank('/v1.1/fake/volumes/asdf/metadata/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, req, self.req_id, 'key1', + body) + + def test_update_item_empty_body(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'key1', + None) + + def test_update_item_empty_key(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, '', body) + + def test_update_item_key_too_long(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.req_id, ("a" * 260), body) + + def test_update_item_value_too_long(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": ("a" * 260)}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.req_id, "key1", body) + + def test_update_item_too_many_keys(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1", "key2": "value2"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'key1', + body) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/bad') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'bad', + body) + + def test_invalid_metadata_items_on_create(self): + self.stubs.Set(cinder.db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + #test for long key + data = {"metadata": {"a" * 260: "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.req_id, data) + + #test for long value + data = {"metadata": {"key": "v" * 260}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.req_id, data) + + #test for empty key. + data = {"metadata": {"": "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, data) diff --git a/cinder/tests/api/v1/test_volumes.py b/cinder/tests/api/v1/test_volumes.py new file mode 100644 index 0000000000..57c4d82b01 --- /dev/null +++ b/cinder/tests/api/v1/test_volumes.py @@ -0,0 +1,1166 @@ +# Copyright 2013 Josh Durgin +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree +from oslo.config import cfg +import urllib +import webob + +from cinder.api import extensions +from cinder.api.v1 import volumes +from cinder import context +from cinder import db +from cinder import exception +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder.tests.image import fake as fake_image +from cinder.volume import api as volume_api + + +NS = '{http://docs.openstack.org/volume/api/v1}' + +TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001' + +CONF = cfg.CONF + + +def stub_snapshot_get(self, context, snapshot_id): + if snapshot_id != TEST_SNAPSHOT_UUID: + raise exception.NotFound + + return {'id': snapshot_id, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', } + + +class VolumeApiTest(test.TestCase): + def setUp(self): + super(VolumeApiTest, self).setUp() + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + fake_image.stub_out_image_service(self.stubs) + self.controller = volumes.VolumeController(self.ext_mgr) + + self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all) + self.stubs.Set(db, 'service_get_all_by_topic', + stubs.stub_service_get_all_by_topic) + self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete) + + def test_volume_create(self): + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + + vol = {"size": 100, + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1"} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + res_dict = self.controller.create(req, body) + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'Volume Test Desc', + 'availability_zone': 'zone1:host1', + 'display_name': 'Volume Test Name', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'attached_mode': 'rw', + 'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 100}} + self.assertEqual(res_dict, expected) + + def test_volume_create_with_type(self): + vol_type = CONF.default_volume_type + db.volume_type_create(context.get_admin_context(), + dict(name=vol_type, extra_specs={})) + db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), + vol_type) + + vol = {"size": 100, + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "volume_type": "FakeTypeName"} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + # Raise 404 when type name isn't valid + self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, + req, body) + # Use correct volume type name + vol.update(dict(volume_type=CONF.default_volume_type)) + body.update(dict(volume=vol)) + res_dict = self.controller.create(req, body) + volume_id = res_dict['volume']['id'] + self.assertEqual(len(res_dict), 1) + self.assertEqual(res_dict['volume']['volume_type'], + db_vol_type['name']) + + # Use correct volume type id + vol.update(dict(volume_type=db_vol_type['id'])) + body.update(dict(volume=vol)) + res_dict = self.controller.create(req, body) + volume_id = res_dict['volume']['id'] + self.assertEqual(len(res_dict), 1) + self.assertEqual(res_dict['volume']['volume_type'], + db_vol_type['name']) + + def test_volume_creation_fails_with_bad_size(self): + vol = {"size": '', + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1"} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + self.assertRaises(exception.InvalidInput, + self.controller.create, + req, + body) + + def test_volume_creation_fails_with_bad_availability_zone(self): + vol = {"size": '1', + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "zonen:hostn"} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + self.assertRaises(exception.InvalidInput, + self.controller.create, + req, body) + + def test_volume_create_with_image_id(self): + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + self.ext_mgr.extensions = {'os-image-create': 'fake'} + test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77" + vol = {"size": '1', + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "nova", + "imageRef": test_id} + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'Volume Test Desc', + 'availability_zone': 'nova', + 'display_name': 'Volume Test Name', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'image_id': test_id, + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'attached_mode': 'rw', + 'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': '1'}} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + res_dict = self.controller.create(req, body) + self.assertEqual(res_dict, expected) + + def test_volume_create_with_image_id_is_integer(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + self.ext_mgr.extensions = {'os-image-create': 'fake'} + vol = {"size": '1', + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "cinder", + "imageRef": 1234} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, + body) + + def test_volume_create_with_image_id_not_uuid_format(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + self.ext_mgr.extensions = {'os-image-create': 'fake'} + vol = {"size": '1', + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "cinder", + "imageRef": '12345'} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, + body) + + def test_volume_update(self): + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) + + updates = { + "display_name": "Updated Test Name", + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.update(req, '1', body) + expected = {'volume': { + 'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'Updated Test Name', + 'attachments': [{ + 'id': '1', + 'volume_id': '1', + 'server_id': 'fakeuuid', + 'host_name': None, + 'device': '/' + }], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'attached_mode': 'rw', + 'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1}} + self.assertEqual(res_dict, expected) + + def test_volume_update_metadata(self): + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) + + updates = { + "metadata": {"qos_max_iops": 2000} + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.update(req, '1', body) + expected = {'volume': { + 'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{ + 'id': '1', + 'volume_id': '1', + 'server_id': 'fakeuuid', + 'host_name': None, + 'device': '/' + }], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {"qos_max_iops": 2000, + "readonly": "False", + "attached_mode": "rw"}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1 + }} + self.assertEqual(res_dict, expected) + + def test_volume_update_with_admin_metadata(self): + self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) + + volume = stubs.stub_volume("1") + del volume['name'] + del volume['volume_type'] + del volume['volume_type_id'] + volume['metadata'] = {'key': 'value'} + db.volume_create(context.get_admin_context(), volume) + db.volume_admin_metadata_update(context.get_admin_context(), "1", + {"readonly": "True", + "invisible_key": "invisible_value"}, + False) + + updates = { + "display_name": "Updated Test Name", + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v1/volumes/1') + admin_ctx = context.RequestContext('admin', 'fakeproject', True) + req.environ['cinder.context'] = admin_ctx + res_dict = self.controller.update(req, '1', body) + expected = {'volume': { + 'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'Updated Test Name', + 'attachments': [{ + 'id': '1', + 'volume_id': '1', + 'server_id': 'fakeuuid', + 'host_name': None, + 'device': '/' + }], + 'bootable': 'false', + 'volume_type': 'None', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'key': 'value', + 'readonly': 'True'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1}} + self.assertEqual(res_dict, expected) + + def test_update_empty_body(self): + body = {} + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.update, + req, '1', body) + + def test_update_invalid_body(self): + body = {'display_name': 'missing top level volume key'} + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.update, + req, '1', body) + + def test_update_not_found(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + updates = { + "display_name": "Updated Test Name", + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, + req, '1', body) + + def test_volume_list(self): + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + self.stubs.Set(volume_api.API, 'get_all', + stubs.stub_volume_get_all_by_project) + + req = fakes.HTTPRequest.blank('/v1/volumes') + res_dict = self.controller.index(req) + expected = {'volumes': [{'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'attached_mode': 'rw', + 'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}]} + self.assertEqual(res_dict, expected) + # Finally test that we cached the returned volumes + self.assertEqual(1, len(req.cached_resource())) + + def test_volume_list_with_admin_metadata(self): + volume = stubs.stub_volume("1") + del volume['name'] + del volume['volume_type'] + del volume['volume_type_id'] + volume['metadata'] = {'key': 'value'} + db.volume_create(context.get_admin_context(), volume) + db.volume_admin_metadata_update(context.get_admin_context(), "1", + {"readonly": "True", + "invisible_key": "invisible_value"}, + False) + + req = fakes.HTTPRequest.blank('/v1/volumes') + admin_ctx = context.RequestContext('admin', 'fakeproject', True) + req.environ['cinder.context'] = admin_ctx + res_dict = self.controller.index(req) + expected = {'volumes': [{'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'None', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'key': 'value', + 'readonly': 'True'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}]} + self.assertEqual(res_dict, expected) + + def test_volume_list_detail(self): + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + self.stubs.Set(volume_api.API, 'get_all', + stubs.stub_volume_get_all_by_project) + + req = fakes.HTTPRequest.blank('/v1/volumes/detail') + res_dict = self.controller.index(req) + expected = {'volumes': [{'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'attached_mode': 'rw', + 'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}]} + self.assertEqual(res_dict, expected) + # Finally test that we cached the returned volumes + self.assertEqual(1, len(req.cached_resource())) + + def test_volume_list_detail_with_admin_metadata(self): + volume = stubs.stub_volume("1") + del volume['name'] + del volume['volume_type'] + del volume['volume_type_id'] + volume['metadata'] = {'key': 'value'} + db.volume_create(context.get_admin_context(), volume) + db.volume_admin_metadata_update(context.get_admin_context(), "1", + {"readonly": "True", + "invisible_key": "invisible_value"}, + False) + + req = fakes.HTTPRequest.blank('/v1/volumes/detail') + admin_ctx = context.RequestContext('admin', 'fakeproject', True) + req.environ['cinder.context'] = admin_ctx + res_dict = self.controller.index(req) + expected = {'volumes': [{'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'None', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'key': 'value', + 'readonly': 'True'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}]} + self.assertEqual(res_dict, expected) + + def test_volume_list_by_name(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + stubs.stub_volume(3, display_name='vol3'), + ] + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + + # no display_name filter + req = fakes.HTTPRequest.blank('/v1/volumes') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 3) + # filter on display_name + req = fakes.HTTPRequest.blank('/v1/volumes?display_name=vol2') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['display_name'], 'vol2') + # filter no match + req = fakes.HTTPRequest.blank('/v1/volumes?display_name=vol4') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 0) + + def test_volume_list_by_metadata(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1', + status='available', + volume_metadata=[{'key': 'key1', + 'value': 'value1'}]), + stubs.stub_volume(2, display_name='vol2', + status='available', + volume_metadata=[{'key': 'key1', + 'value': 'value2'}]), + stubs.stub_volume(3, display_name='vol3', + status='in-use', + volume_metadata=[{'key': 'key1', + 'value': 'value2'}]), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + + # no metadata filter + req = fakes.HTTPRequest.blank('/v1/volumes', use_admin_context=True) + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 3) + + # single match + qparams = urllib.urlencode({'metadata': {'key1': 'value1'}}) + req = fakes.HTTPRequest.blank('/v1/volumes?%s' % qparams, + use_admin_context=True) + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['display_name'], 'vol1') + self.assertEqual(resp['volumes'][0]['metadata']['key1'], 'value1') + + # multiple matches + qparams = urllib.urlencode({'metadata': {'key1': 'value2'}}) + req = fakes.HTTPRequest.blank('/v1/volumes?%s' % qparams, + use_admin_context=True) + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 2) + for volume in resp['volumes']: + self.assertEqual(volume['metadata']['key1'], 'value2') + + # multiple filters + qparams = urllib.urlencode({'metadata': {'key1': 'value2'}}) + req = fakes.HTTPRequest.blank('/v1/volumes?status=in-use&%s' % qparams, + use_admin_context=True) + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['display_name'], 'vol3') + + # no match + qparams = urllib.urlencode({'metadata': {'key1': 'value3'}}) + req = fakes.HTTPRequest.blank('/v1/volumes?%s' % qparams, + use_admin_context=True) + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 0) + + def test_volume_list_by_status(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1', status='available'), + stubs.stub_volume(2, display_name='vol2', status='available'), + stubs.stub_volume(3, display_name='vol3', status='in-use'), + ] + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + + # no status filter + req = fakes.HTTPRequest.blank('/v1/volumes') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 3) + # single match + req = fakes.HTTPRequest.blank('/v1/volumes?status=in-use') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['status'], 'in-use') + # multiple match + req = fakes.HTTPRequest.blank('/v1/volumes?status=available') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 2) + for volume in resp['volumes']: + self.assertEqual(volume['status'], 'available') + # multiple filters + req = fakes.HTTPRequest.blank('/v1/volumes?status=available&' + 'display_name=vol1') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['display_name'], 'vol1') + self.assertEqual(resp['volumes'][0]['status'], 'available') + # no match + req = fakes.HTTPRequest.blank('/v1/volumes?status=in-use&' + 'display_name=vol1') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 0) + + def test_volume_show(self): + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.show(req, '1') + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'attached_mode': 'rw', + 'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}} + self.assertEqual(res_dict, expected) + # Finally test that we cached the returned volume + self.assertIsNotNone(req.cached_resource_by_id('1')) + + def test_volume_show_no_attachments(self): + def stub_volume_get(self, context, volume_id): + return stubs.stub_volume(volume_id, attach_status='detached') + + self.stubs.Set(volume_api.API, 'get', stub_volume_get) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.show(req, '1') + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [], + 'bootable': 'false', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}} + self.assertEqual(res_dict, expected) + + def test_volume_show_bootable(self): + def stub_volume_get(self, context, volume_id): + return (stubs.stub_volume(volume_id, + volume_glance_metadata=dict(foo='bar'))) + + self.stubs.Set(volume_api.API, 'get', stub_volume_get) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.show(req, '1') + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'true', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'attached_mode': 'rw', + 'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}} + self.assertEqual(res_dict, expected) + + def test_volume_show_no_volume(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + req, + 1) + # Finally test that we did not cache anything + self.assertIsNone(req.cached_resource_by_id('1')) + + def test_volume_detail_limit_offset(self): + def volume_detail_limit_offset(is_admin): + def stub_volume_get_all_by_project(context, project_id, marker, + limit, sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + ] + + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + + req = fakes.HTTPRequest.blank('/v1/volumes/detail?limit=2\ + &offset=1', + use_admin_context=is_admin) + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(len(volumes), 1) + self.assertEqual(volumes[0]['id'], 2) + + #admin case + volume_detail_limit_offset(is_admin=True) + #non_admin case + volume_detail_limit_offset(is_admin=False) + + def test_volume_show_with_admin_metadata(self): + volume = stubs.stub_volume("1") + del volume['name'] + del volume['volume_type'] + del volume['volume_type_id'] + volume['metadata'] = {'key': 'value'} + db.volume_create(context.get_admin_context(), volume) + db.volume_admin_metadata_update(context.get_admin_context(), "1", + {"readonly": "True", + "invisible_key": "invisible_value"}, + False) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + admin_ctx = context.RequestContext('admin', 'fakeproject', True) + req.environ['cinder.context'] = admin_ctx + res_dict = self.controller.show(req, '1') + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1'}], + 'bootable': 'false', + 'volume_type': 'None', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'key': 'value', + 'readonly': 'True'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}} + self.assertEqual(res_dict, expected) + + def test_volume_delete(self): + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + resp = self.controller.delete(req, 1) + self.assertEqual(resp.status_int, 202) + + def test_volume_delete_no_volume(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, + req, + 1) + + def test_admin_list_volumes_limited_to_project(self): + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + + req = fakes.HTTPRequest.blank('/v1/fake/volumes', + use_admin_context=True) + res = self.controller.index(req) + + self.assertIn('volumes', res) + self.assertEqual(1, len(res['volumes'])) + + def test_admin_list_volumes_all_tenants(self): + req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1', + use_admin_context=True) + res = self.controller.index(req) + self.assertIn('volumes', res) + self.assertEqual(3, len(res['volumes'])) + + def test_all_tenants_non_admin_gets_all_tenants(self): + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + + req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1') + res = self.controller.index(req) + self.assertIn('volumes', res) + self.assertEqual(1, len(res['volumes'])) + + def test_non_admin_get_by_project(self): + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db) + + req = fakes.HTTPRequest.blank('/v1/fake/volumes') + res = self.controller.index(req) + self.assertIn('volumes', res) + self.assertEqual(1, len(res['volumes'])) + + def test_add_visible_admin_metadata_visible_key_only(self): + admin_metadata = [{"key": "invisible_key", "value": "invisible_value"}, + {"key": "readonly", "value": "visible"}, + {"key": "attached_mode", "value": "visible"}] + metadata = [{"key": "key", "value": "value"}] + volume = dict(volume_admin_metadata=admin_metadata, + volume_metadata=metadata) + admin_ctx = context.get_admin_context() + self.controller._add_visible_admin_metadata(admin_ctx, + volume) + self.assertEqual(volume['volume_metadata'], + [{"key": "key", "value": "value"}, + {"key": "readonly", "value": "visible"}, + {"key": "attached_mode", "value": "visible"}]) + + admin_metadata = {"invisible_key": "invisible_value", + "readonly": "visible", + "attached_mode": "visible"} + metadata = {"key": "value"} + volume = dict(admin_metadata=admin_metadata, + metadata=metadata) + admin_ctx = context.get_admin_context() + self.controller._add_visible_admin_metadata(admin_ctx, + volume) + self.assertEqual(volume['metadata'], + {'key': 'value', + 'attached_mode': 'visible', + 'readonly': 'visible'}) + + +class VolumeSerializerTest(test.TestCase): + def _verify_volume_attachment(self, attach, tree): + for attr in ('id', 'volume_id', 'server_id', 'device'): + self.assertEqual(str(attach[attr]), tree.get(attr)) + + def _verify_volume(self, vol, tree): + self.assertEqual(tree.tag, NS + 'volume') + + for attr in ('id', 'status', 'size', 'availability_zone', 'created_at', + 'display_name', 'display_description', 'volume_type', + 'bootable', 'snapshot_id'): + self.assertEqual(str(vol[attr]), tree.get(attr)) + + for child in tree: + self.assertIn(child.tag, (NS + 'attachments', NS + 'metadata')) + if child.tag == 'attachments': + self.assertEqual(1, len(child)) + self.assertEqual('attachment', child[0].tag) + self._verify_volume_attachment(vol['attachments'][0], child[0]) + elif child.tag == 'metadata': + not_seen = set(vol['metadata'].keys()) + for gr_child in child: + self.assertIn(gr_child.get("key"), not_seen) + self.assertEqual(str(vol['metadata'][gr_child.get("key")]), + gr_child.text) + not_seen.remove(gr_child.get('key')) + self.assertEqual(0, len(not_seen)) + + def test_volume_show_create_serializer(self): + serializer = volumes.VolumeTemplate() + raw_volume = dict( + id='vol_id', + status='vol_status', + size=1024, + availability_zone='vol_availability', + bootable='false', + created_at=datetime.datetime.now(), + attachments=[dict(id='vol_id', + volume_id='vol_id', + server_id='instance_uuid', + device='/foo')], + display_name='vol_name', + display_description='vol_desc', + volume_type='vol_type', + snapshot_id='snap_id', + source_volid='source_volid', + metadata=dict(foo='bar', + baz='quux', ), ) + text = serializer.serialize(dict(volume=raw_volume)) + + tree = etree.fromstring(text) + + self._verify_volume(raw_volume, tree) + + def test_volume_index_detail_serializer(self): + serializer = volumes.VolumesTemplate() + raw_volumes = [dict(id='vol1_id', + status='vol1_status', + size=1024, + availability_zone='vol1_availability', + bootable='true', + created_at=datetime.datetime.now(), + attachments=[dict(id='vol1_id', + volume_id='vol1_id', + server_id='instance_uuid', + device='/foo1')], + display_name='vol1_name', + display_description='vol1_desc', + volume_type='vol1_type', + snapshot_id='snap1_id', + source_volid=None, + metadata=dict(foo='vol1_foo', + bar='vol1_bar', ), ), + dict(id='vol2_id', + status='vol2_status', + size=1024, + availability_zone='vol2_availability', + bootable='true', + created_at=datetime.datetime.now(), + attachments=[dict(id='vol2_id', + volume_id='vol2_id', + server_id='instance_uuid', + device='/foo2')], + display_name='vol2_name', + display_description='vol2_desc', + volume_type='vol2_type', + snapshot_id='snap2_id', + source_volid=None, + metadata=dict(foo='vol2_foo', + bar='vol2_bar', ), )] + text = serializer.serialize(dict(volumes=raw_volumes)) + + tree = etree.fromstring(text) + + self.assertEqual(NS + 'volumes', tree.tag) + self.assertEqual(len(raw_volumes), len(tree)) + for idx, child in enumerate(tree): + self._verify_volume(raw_volumes[idx], child) + + +class TestVolumeCreateRequestXMLDeserializer(test.TestCase): + + def setUp(self): + super(TestVolumeCreateRequestXMLDeserializer, self).setUp() + self.deserializer = volumes.CreateDeserializer() + + def test_minimal_volume(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = {"volume": {"size": "1", }, } + self.assertEqual(request['body'], expected) + + def test_display_name(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "display_name": "Volume-xml", + }, + } + self.assertEqual(request['body'], expected) + + def test_display_description(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "display_name": "Volume-xml", + "display_description": "description", + }, + } + self.assertEqual(request['body'], expected) + + def test_volume_type(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "display_name": "Volume-xml", + "size": "1", + "display_name": "Volume-xml", + "display_description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + }, + } + self.assertEqual(request['body'], expected) + + def test_availability_zone(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "display_name": "Volume-xml", + "display_description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + "availability_zone": "us-east1", + }, + } + self.assertEqual(request['body'], expected) + + def test_metadata(self): + self_request = """ + + work""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "display_name": "Volume-xml", + "size": "1", + "metadata": { + "Type": "work", + }, + }, + } + self.assertEqual(request['body'], expected) + + def test_full_volume(self): + self_request = """ + + work""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "display_name": "Volume-xml", + "display_description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + "availability_zone": "us-east1", + "metadata": { + "Type": "work", + }, + }, + } + self.assertEqual(request['body'], expected) + + def test_imageref(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "display_name": "Volume-xml", + "display_description": "description", + "imageRef": "4a90189d-d702-4c7c-87fc-6608c554d737", + }, + } + self.assertEqual(expected, request['body']) + + def test_snapshot_id(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "display_name": "Volume-xml", + "display_description": "description", + "snapshot_id": "4a90189d-d702-4c7c-87fc-6608c554d737", + }, + } + self.assertEqual(expected, request['body']) + + def test_source_volid(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "display_name": "Volume-xml", + "display_description": "description", + "source_volid": "4a90189d-d702-4c7c-87fc-6608c554d737", + }, + } + self.assertEqual(expected, request['body']) + + +class VolumesUnprocessableEntityTestCase(test.TestCase): + + """Tests of places we throw 422 Unprocessable Entity from.""" + + def setUp(self): + super(VolumesUnprocessableEntityTestCase, self).setUp() + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.controller = volumes.VolumeController(self.ext_mgr) + + def _unprocessable_volume_create(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/volumes') + req.method = 'POST' + + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.create, req, body) + + def test_create_no_body(self): + self._unprocessable_volume_create(body=None) + + def test_create_missing_volume(self): + body = {'foo': {'a': 'b'}} + self._unprocessable_volume_create(body=body) + + def test_create_malformed_entity(self): + body = {'volume': 'string'} + self._unprocessable_volume_create(body=body) diff --git a/cinder/tests/api/v2/__init__.py b/cinder/tests/api/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/api/v2/stubs.py b/cinder/tests/api/v2/stubs.py new file mode 100644 index 0000000000..a940db19f2 --- /dev/null +++ b/cinder/tests/api/v2/stubs.py @@ -0,0 +1,149 @@ +# Copyright 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from cinder import exception as exc + +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +FAKE_UUIDS = {} + + +def stub_volume(id, **kwargs): + volume = { + 'id': id, + 'user_id': 'fakeuser', + 'project_id': 'fakeproject', + 'host': 'fakehost', + 'size': 1, + 'availability_zone': 'fakeaz', + 'instance_uuid': 'fakeuuid', + 'attached_host': None, + 'mountpoint': '/', + 'status': 'fakestatus', + 'migration_status': None, + 'attach_status': 'attached', + 'bootable': 'false', + 'name': 'vol name', + 'display_name': 'displayname', + 'display_description': 'displaydesc', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'snapshot_id': None, + 'source_volid': None, + 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', + 'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'}, + {'key': 'readonly', 'value': 'False'}], + 'bootable': False, + 'volume_type': {'name': 'vol_type_name'}} + + volume.update(kwargs) + if kwargs.get('volume_glance_metadata', None): + volume['bootable'] = True + if kwargs.get('attach_status') == 'detached': + del volume['volume_admin_metadata'][0] + return volume + + +def stub_volume_create(self, context, size, name, description, snapshot, + **param): + vol = stub_volume('1') + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + vol['source_volid'] = None + vol['bootable'] = False + try: + vol['snapshot_id'] = snapshot['id'] + except (KeyError, TypeError): + vol['snapshot_id'] = None + vol['availability_zone'] = param.get('availability_zone', 'fakeaz') + return vol + + +def stub_volume_create_from_image(self, context, size, name, description, + snapshot, volume_type, metadata, + availability_zone): + vol = stub_volume('1') + vol['status'] = 'creating' + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + vol['availability_zone'] = 'cinder' + vol['bootable'] = False + return vol + + +def stub_volume_update(self, context, *args, **param): + pass + + +def stub_volume_delete(self, context, *args, **param): + pass + + +def stub_volume_get(self, context, volume_id): + return stub_volume(volume_id) + + +def stub_volume_get_notfound(self, context, volume_id): + raise exc.NotFound + + +def stub_volume_get_db(context, volume_id): + return stub_volume(volume_id) + + +def stub_volume_get_all(context, search_opts=None, marker=None, limit=None, + sort_key='created_at', sort_dir='desc'): + return [stub_volume(100, project_id='fake'), + stub_volume(101, project_id='superfake'), + stub_volume(102, project_id='superduperfake')] + + +def stub_volume_get_all_by_project(self, context, marker, limit, sort_key, + sort_dir, filters={}): + return [stub_volume_get(self, context, '1')] + + +def stub_snapshot(id, **kwargs): + snapshot = {'id': id, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', + 'project_id': 'fake'} + + snapshot.update(kwargs) + return snapshot + + +def stub_snapshot_get_all(self): + return [stub_snapshot(100, project_id='fake'), + stub_snapshot(101, project_id='superfake'), + stub_snapshot(102, project_id='superduperfake')] + + +def stub_snapshot_get_all_by_project(self, context): + return [stub_snapshot(1)] + + +def stub_snapshot_update(self, context, *args, **param): + pass + + +def stub_service_get_all_by_topic(context, topic): + return [{'availability_zone': "zone1:host1", "disabled": 0}] diff --git a/cinder/tests/api/v2/test_limits.py b/cinder/tests/api/v2/test_limits.py new file mode 100644 index 0000000000..6c2316cd18 --- /dev/null +++ b/cinder/tests/api/v2/test_limits.py @@ -0,0 +1,900 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests dealing with HTTP rate-limiting. +""" + +import httplib +import StringIO + +from lxml import etree +import webob +from xml.dom import minidom + +from cinder.api.v2 import limits +from cinder.api import views +from cinder.api import xmlutil +import cinder.context +from cinder.openstack.common import jsonutils +from cinder import test + + +TEST_LIMITS = [ + limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), + limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE), + limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE), + limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), + limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE), +] +NS = { + 'atom': 'http://www.w3.org/2005/Atom', + 'ns': 'http://docs.openstack.org/common/api/v1.0', +} + + +class BaseLimitTestSuite(test.TestCase): + """Base test suite which provides relevant stubs and time abstraction.""" + + def setUp(self): + super(BaseLimitTestSuite, self).setUp() + self.time = 0.0 + self.stubs.Set(limits.Limit, "_get_time", self._get_time) + self.absolute_limits = {} + + def stub_get_project_quotas(context, project_id, usages=True): + return dict((k, dict(limit=v)) + for k, v in self.absolute_limits.items()) + + self.stubs.Set(cinder.quota.QUOTAS, "get_project_quotas", + stub_get_project_quotas) + + def _get_time(self): + """Return the "time" according to this test suite.""" + return self.time + + +class LimitsControllerTest(BaseLimitTestSuite): + + """Tests for `limits.LimitsController` class.""" + + def setUp(self): + """Run before each test.""" + super(LimitsControllerTest, self).setUp() + self.controller = limits.create_resource() + + def _get_index_request(self, accept_header="application/json"): + """Helper to set routing arguments.""" + request = webob.Request.blank("/") + request.accept = accept_header + request.environ["wsgiorg.routing_args"] = (None, { + "action": "index", + "controller": "", + }) + context = cinder.context.RequestContext('testuser', 'testproject') + request.environ["cinder.context"] = context + return request + + def _populate_limits(self, request): + """Put limit info into a request.""" + _limits = [ + limits.Limit("GET", "*", ".*", 10, 60).display(), + limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), + limits.Limit("GET", "changes-since*", "changes-since", + 5, 60).display(), + ] + request.environ["cinder.limits"] = _limits + return request + + def test_empty_index_json(self): + """Test getting empty limit details in JSON.""" + request = self._get_index_request() + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [], + "absolute": {}, + }, + } + body = jsonutils.loads(response.body) + self.assertEqual(expected, body) + + def test_index_json(self): + """Test getting limit details in JSON.""" + request = self._get_index_request() + request = self._populate_limits(request) + self.absolute_limits = { + 'gigabytes': 512, + 'volumes': 5, + } + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [ + { + "regex": ".*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + { + "verb": "POST", + "next-available": "1970-01-01T00:00:00Z", + "unit": "HOUR", + "value": 5, + "remaining": 5, + }, + ], + }, + { + "regex": "changes-since", + "uri": "changes-since*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 5, + "remaining": 5, + }, + ], + }, + + ], + "absolute": {"maxTotalVolumeGigabytes": 512, + "maxTotalVolumes": 5, }, + }, + } + body = jsonutils.loads(response.body) + self.assertEqual(expected, body) + + def _populate_limits_diff_regex(self, request): + """Put limit info into a request.""" + _limits = [ + limits.Limit("GET", "*", ".*", 10, 60).display(), + limits.Limit("GET", "*", "*.*", 10, 60).display(), + ] + request.environ["cinder.limits"] = _limits + return request + + def test_index_diff_regex(self): + """Test getting limit details in JSON.""" + request = self._get_index_request() + request = self._populate_limits_diff_regex(request) + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [ + { + "regex": ".*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + ], + }, + { + "regex": "*.*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": "1970-01-01T00:00:00Z", + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + ], + }, + + ], + "absolute": {}, + }, + } + body = jsonutils.loads(response.body) + self.assertEqual(expected, body) + + def _test_index_absolute_limits_json(self, expected): + request = self._get_index_request() + response = request.get_response(self.controller) + body = jsonutils.loads(response.body) + self.assertEqual(expected, body['limits']['absolute']) + + def test_index_ignores_extra_absolute_limits_json(self): + self.absolute_limits = {'unknown_limit': 9001} + self._test_index_absolute_limits_json({}) + + +class TestLimiter(limits.Limiter): + pass + + +class LimitMiddlewareTest(BaseLimitTestSuite): + + """Tests for the `limits.RateLimitingMiddleware` class.""" + + @webob.dec.wsgify + def _empty_app(self, request): + """Do-nothing WSGI app.""" + pass + + def setUp(self): + """Prepare middleware for use through fake WSGI app.""" + super(LimitMiddlewareTest, self).setUp() + _limits = '(GET, *, .*, 1, MINUTE)' + self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, + "%s.TestLimiter" % + self.__class__.__module__) + + def test_limit_class(self): + """Test that middleware selected correct limiter class.""" + assert isinstance(self.app._limiter, TestLimiter) + + def test_good_request(self): + """Test successful GET request through middleware.""" + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_limited_request_json(self): + """Test a rate-limited (413) GET request through middleware.""" + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(200, response.status_int) + + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(response.status_int, 413) + + self.assertIn('Retry-After', response.headers) + retry_after = int(response.headers['Retry-After']) + self.assertAlmostEqual(retry_after, 60, 1) + + body = jsonutils.loads(response.body) + expected = "Only 1 GET request(s) can be made to * every minute." + value = body["overLimitFault"]["details"].strip() + self.assertEqual(value, expected) + + def test_limited_request_xml(self): + """Test a rate-limited (413) response as XML.""" + request = webob.Request.blank("/") + response = request.get_response(self.app) + self.assertEqual(200, response.status_int) + + request = webob.Request.blank("/") + request.accept = "application/xml" + response = request.get_response(self.app) + self.assertEqual(response.status_int, 413) + + root = minidom.parseString(response.body).childNodes[0] + expected = "Only 1 GET request(s) can be made to * every minute." + + details = root.getElementsByTagName("details") + self.assertEqual(details.length, 1) + + value = details.item(0).firstChild.data.strip() + self.assertEqual(value, expected) + + +class LimitTest(BaseLimitTestSuite): + + """Tests for the `limits.Limit` class.""" + + def test_GET_no_delay(self): + """Test a limit handles 1 GET per second.""" + limit = limits.Limit("GET", "*", ".*", 1, 1) + delay = limit("GET", "/anything") + self.assertIsNone(delay) + self.assertEqual(0, limit.next_request) + self.assertEqual(0, limit.last_request) + + def test_GET_delay(self): + """Test two calls to 1 GET per second limit.""" + limit = limits.Limit("GET", "*", ".*", 1, 1) + delay = limit("GET", "/anything") + self.assertIsNone(delay) + + delay = limit("GET", "/anything") + self.assertEqual(1, delay) + self.assertEqual(1, limit.next_request) + self.assertEqual(0, limit.last_request) + + self.time += 4 + + delay = limit("GET", "/anything") + self.assertIsNone(delay) + self.assertEqual(4, limit.next_request) + self.assertEqual(4, limit.last_request) + + +class ParseLimitsTest(BaseLimitTestSuite): + + """Tests for the default limits parser in the `limits.Limiter` class.""" + + def test_invalid(self): + """Test that parse_limits() handles invalid input correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + ';;;;;') + + def test_bad_rule(self): + """Test that parse_limits() handles bad rules correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + 'GET, *, .*, 20, minute') + + def test_missing_arg(self): + """Test that parse_limits() handles missing args correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, 20)') + + def test_bad_value(self): + """Test that parse_limits() handles bad values correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, foo, minute)') + + def test_bad_unit(self): + """Test that parse_limits() handles bad units correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, 20, lightyears)') + + def test_multiple_rules(self): + """Test that parse_limits() handles multiple rules correctly.""" + try: + l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);' + '(PUT, /foo*, /foo.*, 10, hour);' + '(POST, /bar*, /bar.*, 5, second);' + '(Say, /derp*, /derp.*, 1, day)') + except ValueError as e: + assert False, str(e) + + # Make sure the number of returned limits are correct + self.assertEqual(len(l), 4) + + # Check all the verbs... + expected = ['GET', 'PUT', 'POST', 'SAY'] + self.assertEqual([t.verb for t in l], expected) + + # ...the URIs... + expected = ['*', '/foo*', '/bar*', '/derp*'] + self.assertEqual([t.uri for t in l], expected) + + # ...the regexes... + expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] + self.assertEqual([t.regex for t in l], expected) + + # ...the values... + expected = [20, 10, 5, 1] + self.assertEqual([t.value for t in l], expected) + + # ...and the units... + expected = [limits.PER_MINUTE, limits.PER_HOUR, + limits.PER_SECOND, limits.PER_DAY] + self.assertEqual([t.unit for t in l], expected) + + +class LimiterTest(BaseLimitTestSuite): + + """Tests for the in-memory `limits.Limiter` class.""" + + def setUp(self): + """Run before each test.""" + super(LimiterTest, self).setUp() + userlimits = {'limits.user3': '', + 'limits.user0': '(get, *, .*, 4, minute);' + '(put, *, .*, 2, minute)'} + self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) + + def _check(self, num, verb, url, username=None): + """Check and yield results from checks.""" + for x in xrange(num): + yield self.limiter.check_for_delay(verb, url, username)[0] + + def _check_sum(self, num, verb, url, username=None): + """Check and sum results from checks.""" + results = self._check(num, verb, url, username) + return sum(item for item in results if item) + + def test_no_delay_GET(self): + """Ensure no delay on a single call for a limit verb we didn't set.""" + delay = self.limiter.check_for_delay("GET", "/anything") + self.assertEqual(delay, (None, None)) + + def test_no_delay_PUT(self): + """Ensure no delay on a single call for a known limit.""" + delay = self.limiter.check_for_delay("PUT", "/anything") + self.assertEqual(delay, (None, None)) + + def test_delay_PUT(self): + """Test delay on 11th PUT request. + + Ensure the 11th PUT will result in a delay of 6.0 seconds until + the next request will be granced. + """ + expected = [None] * 10 + [6.0] + results = list(self._check(11, "PUT", "/anything")) + + self.assertEqual(expected, results) + + def test_delay_POST(self): + """Test delay on 8th POST request. + + Ensure the 8th POST will result in a delay of 6.0 seconds until + the next request will be granced. + """ + expected = [None] * 7 + results = list(self._check(7, "POST", "/anything")) + self.assertEqual(expected, results) + + expected = 60.0 / 7.0 + results = self._check_sum(1, "POST", "/anything") + self.assertAlmostEqual(expected, results, 8) + + def test_delay_GET(self): + """Ensure the 11th GET will result in NO delay.""" + expected = [None] * 11 + results = list(self._check(11, "GET", "/anything")) + self.assertEqual(expected, results) + + expected = [None] * 4 + [15.0] + results = list(self._check(5, "GET", "/foo", "user0")) + self.assertEqual(expected, results) + + def test_delay_PUT_volumes(self): + """Test delay on /volumes. + + Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere + is still OK after 5 requests...but then after 11 total requests, + PUT limiting kicks in. + """ + # First 6 requests on PUT /volumes + expected = [None] * 5 + [12.0] + results = list(self._check(6, "PUT", "/volumes")) + self.assertEqual(expected, results) + + # Next 5 request on PUT /anything + expected = [None] * 4 + [6.0] + results = list(self._check(5, "PUT", "/anything")) + self.assertEqual(expected, results) + + def test_delay_PUT_wait(self): + """Test limit is lifted again. + + Ensure after hitting the limit and then waiting for + the correct amount of time, the limit will be lifted. + """ + expected = [None] * 10 + [6.0] + results = list(self._check(11, "PUT", "/anything")) + self.assertEqual(expected, results) + + # Advance time + self.time += 6.0 + + expected = [None, 6.0] + results = list(self._check(2, "PUT", "/anything")) + self.assertEqual(expected, results) + + def test_multiple_delays(self): + """Ensure multiple requests still get a delay.""" + expected = [None] * 10 + [6.0] * 10 + results = list(self._check(20, "PUT", "/anything")) + self.assertEqual(expected, results) + + self.time += 1.0 + + expected = [5.0] * 10 + results = list(self._check(10, "PUT", "/anything")) + self.assertEqual(expected, results) + + expected = [None] * 2 + [30.0] * 8 + results = list(self._check(10, "PUT", "/anything", "user0")) + self.assertEqual(expected, results) + + def test_user_limit(self): + """Test user-specific limits.""" + self.assertEqual(self.limiter.levels['user3'], []) + self.assertEqual(len(self.limiter.levels['user0']), 2) + + def test_multiple_users(self): + """Tests involving multiple users.""" + + # User0 + expected = [None] * 2 + [30.0] * 8 + results = list(self._check(10, "PUT", "/anything", "user0")) + self.assertEqual(expected, results) + + # User1 + expected = [None] * 10 + [6.0] * 10 + results = list(self._check(20, "PUT", "/anything", "user1")) + self.assertEqual(expected, results) + + # User2 + expected = [None] * 10 + [6.0] * 5 + results = list(self._check(15, "PUT", "/anything", "user2")) + self.assertEqual(expected, results) + + # User3 + expected = [None] * 20 + results = list(self._check(20, "PUT", "/anything", "user3")) + self.assertEqual(expected, results) + + self.time += 1.0 + + # User1 again + expected = [5.0] * 10 + results = list(self._check(10, "PUT", "/anything", "user1")) + self.assertEqual(expected, results) + + self.time += 1.0 + + # User1 again + expected = [4.0] * 5 + results = list(self._check(5, "PUT", "/anything", "user2")) + self.assertEqual(expected, results) + + # User0 again + expected = [28.0] + results = list(self._check(1, "PUT", "/anything", "user0")) + self.assertEqual(expected, results) + + self.time += 28.0 + + expected = [None, 30.0] + results = list(self._check(2, "PUT", "/anything", "user0")) + self.assertEqual(expected, results) + + +class WsgiLimiterTest(BaseLimitTestSuite): + + """Tests for `limits.WsgiLimiter` class.""" + + def setUp(self): + """Run before each test.""" + super(WsgiLimiterTest, self).setUp() + self.app = limits.WsgiLimiter(TEST_LIMITS) + + def _request_data(self, verb, path): + """Get data describing a limit request verb/path.""" + return jsonutils.dumps({"verb": verb, "path": path}) + + def _request(self, verb, url, username=None): + """Make sure that POSTing to the given url causes the given username + to perform the given action. Make the internal rate limiter return + delay and make sure that the WSGI app returns the correct response. + """ + if username: + request = webob.Request.blank("/%s" % username) + else: + request = webob.Request.blank("/") + + request.method = "POST" + request.body = self._request_data(verb, url) + response = request.get_response(self.app) + + if "X-Wait-Seconds" in response.headers: + self.assertEqual(response.status_int, 403) + return response.headers["X-Wait-Seconds"] + + self.assertEqual(response.status_int, 204) + + def test_invalid_methods(self): + """Only POSTs should work.""" + for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: + request = webob.Request.blank("/", method=method) + response = request.get_response(self.app) + self.assertEqual(response.status_int, 405) + + def test_good_url(self): + delay = self._request("GET", "/something") + self.assertIsNone(delay) + + def test_escaping(self): + delay = self._request("GET", "/something/jump%20up") + self.assertIsNone(delay) + + def test_response_to_delays(self): + delay = self._request("GET", "/delayed") + self.assertIsNone(delay) + + delay = self._request("GET", "/delayed") + self.assertEqual(delay, '60.00') + + def test_response_to_delays_usernames(self): + delay = self._request("GET", "/delayed", "user1") + self.assertIsNone(delay) + + delay = self._request("GET", "/delayed", "user2") + self.assertIsNone(delay) + + delay = self._request("GET", "/delayed", "user1") + self.assertEqual(delay, '60.00') + + delay = self._request("GET", "/delayed", "user2") + self.assertEqual(delay, '60.00') + + +class FakeHttplibSocket(object): + + """Fake `httplib.HTTPResponse` replacement.""" + + def __init__(self, response_string): + """Initialize new `FakeHttplibSocket`.""" + self._buffer = StringIO.StringIO(response_string) + + def makefile(self, _mode, _other): + """Returns the socket's internal buffer.""" + return self._buffer + + +class FakeHttplibConnection(object): + + """Fake `httplib.HTTPConnection`.""" + + def __init__(self, app, host): + """Initialize `FakeHttplibConnection`.""" + self.app = app + self.host = host + + def request(self, method, path, body="", headers=None): + """Fake request handler. + + Requests made via this connection actually get translated and + routed into our WSGI app, we then wait for the response and turn + it back into an `httplib.HTTPResponse`. + """ + if not headers: + headers = {} + + req = webob.Request.blank(path) + req.method = method + req.headers = headers + req.host = self.host + req.body = body + + resp = str(req.get_response(self.app)) + resp = "HTTP/1.0 %s" % resp + sock = FakeHttplibSocket(resp) + self.http_response = httplib.HTTPResponse(sock) + self.http_response.begin() + + def getresponse(self): + """Return our generated response from the request.""" + return self.http_response + + +def wire_HTTPConnection_to_WSGI(host, app): + """Monkeypatches HTTPConnection so that if you try to connect to host, you + are instead routed straight to the given WSGI app. + + After calling this method, when any code calls + + httplib.HTTPConnection(host) + + the connection object will be a fake. Its requests will be sent directly + to the given WSGI app rather than through a socket. + + Code connecting to hosts other than host will not be affected. + + This method may be called multiple times to map different hosts to + different apps. + + This method returns the original HTTPConnection object, so that the caller + can restore the default HTTPConnection interface (for all hosts). + """ + class HTTPConnectionDecorator(object): + """Wraps the real HTTPConnection class so that when you instantiate + the class you might instead get a fake instance. + """ + + def __init__(self, wrapped): + self.wrapped = wrapped + + def __call__(self, connection_host, *args, **kwargs): + if connection_host == host: + return FakeHttplibConnection(app, host) + else: + return self.wrapped(connection_host, *args, **kwargs) + + oldHTTPConnection = httplib.HTTPConnection + httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection) + return oldHTTPConnection + + +class WsgiLimiterProxyTest(BaseLimitTestSuite): + + """Tests for the `limits.WsgiLimiterProxy` class.""" + + def setUp(self): + """setUp() for WsgiLimiterProxyTest. + + Do some nifty HTTP/WSGI magic which allows for WSGI to be called + directly by something like the `httplib` library. + """ + super(WsgiLimiterProxyTest, self).setUp() + self.app = limits.WsgiLimiter(TEST_LIMITS) + self.oldHTTPConnection = ( + wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) + self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") + + def test_200(self): + """Successful request test.""" + delay = self.proxy.check_for_delay("GET", "/anything") + self.assertEqual(delay, (None, None)) + + def test_403(self): + """Forbidden request test.""" + delay = self.proxy.check_for_delay("GET", "/delayed") + self.assertEqual(delay, (None, None)) + + delay, error = self.proxy.check_for_delay("GET", "/delayed") + error = error.strip() + + expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be " + "made to /delayed every minute.") + + self.assertEqual((delay, error), expected) + + def tearDown(self): + # restore original HTTPConnection object + httplib.HTTPConnection = self.oldHTTPConnection + super(WsgiLimiterProxyTest, self).tearDown() + + +class LimitsViewBuilderTest(test.TestCase): + def setUp(self): + super(LimitsViewBuilderTest, self).setUp() + self.view_builder = views.limits.ViewBuilder() + self.rate_limits = [{"URI": "*", + "regex": ".*", + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "resetTime": 1311272226}, + {"URI": "*/volumes", + "regex": "^/volumes", + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "resetTime": 1311272226}] + self.absolute_limits = {"metadata_items": 1, + "injected_files": 5, + "injected_file_content_bytes": 5} + + def test_build_limits(self): + tdate = "2011-07-21T18:17:06Z" + expected_limits = { + "limits": {"rate": [{"uri": "*", + "regex": ".*", + "limit": [{"value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": tdate}]}, + {"uri": "*/volumes", + "regex": "^/volumes", + "limit": [{"value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": tdate}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 5}}} + + output = self.view_builder.build(self.rate_limits, + self.absolute_limits) + self.assertDictMatch(output, expected_limits) + + def test_build_limits_empty_limits(self): + expected_limits = {"limits": {"rate": [], + "absolute": {}}} + + abs_limits = {} + rate_limits = [] + output = self.view_builder.build(rate_limits, abs_limits) + self.assertDictMatch(output, expected_limits) + + +class LimitsXMLSerializationTest(test.TestCase): + def test_xml_declaration(self): + serializer = limits.LimitsTemplate() + + fixture = {"limits": { + "rate": [], + "absolute": {}}} + + output = serializer.serialize(fixture) + has_dec = output.startswith("") + self.assertTrue(has_dec) + + def test_index(self): + tdate = "2011-12-15T22:42:45Z" + serializer = limits.LimitsTemplate() + fixture = {"limits": {"rate": [{"uri": "*", + "regex": ".*", + "limit": [{"value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": tdate}]}, + {"uri": "*/servers", + "regex": "^/servers", + "limit": [{"value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": tdate}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 10240}}} + + output = serializer.serialize(fixture) + root = etree.XML(output) + xmlutil.validate_schema(root, 'limits') + + #verify absolute limits + absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) + self.assertEqual(len(absolutes), 4) + for limit in absolutes: + name = limit.get('name') + value = limit.get('value') + self.assertEqual(value, str(fixture['limits']['absolute'][name])) + + #verify rate limits + rates = root.xpath('ns:rates/ns:rate', namespaces=NS) + self.assertEqual(len(rates), 2) + for i, rate in enumerate(rates): + for key in ['uri', 'regex']: + self.assertEqual(rate.get(key), + str(fixture['limits']['rate'][i][key])) + rate_limits = rate.xpath('ns:limit', namespaces=NS) + self.assertEqual(len(rate_limits), 1) + for j, limit in enumerate(rate_limits): + for key in ['verb', 'value', 'remaining', 'unit', + 'next-available']: + self.assertEqual( + limit.get(key), + str(fixture['limits']['rate'][i]['limit'][j][key])) + + def test_index_no_limits(self): + serializer = limits.LimitsTemplate() + + fixture = {"limits": { + "rate": [], + "absolute": {}}} + + output = serializer.serialize(fixture) + root = etree.XML(output) + xmlutil.validate_schema(root, 'limits') + + #verify absolute limits + absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) + self.assertEqual(len(absolutes), 0) + + #verify rate limits + rates = root.xpath('ns:rates/ns:rate', namespaces=NS) + self.assertEqual(len(rates), 0) diff --git a/cinder/tests/api/v2/test_snapshot_metadata.py b/cinder/tests/api/v2/test_snapshot_metadata.py new file mode 100644 index 0000000000..8fc1b1f8fb --- /dev/null +++ b/cinder/tests/api/v2/test_snapshot_metadata.py @@ -0,0 +1,553 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo.config import cfg +import webob + +from cinder.api import extensions +from cinder.api.v2 import snapshot_metadata +from cinder.api.v2 import snapshots +import cinder.db +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes + + +CONF = cfg.CONF + + +def return_create_snapshot_metadata_max(context, + snapshot_id, + metadata, + delete): + return stub_max_snapshot_metadata() + + +def return_create_snapshot_metadata(context, snapshot_id, metadata, delete): + return stub_snapshot_metadata() + + +def return_create_snapshot_metadata_insensitive(context, snapshot_id, + metadata, delete): + return stub_snapshot_metadata_insensitive() + + +def return_new_snapshot_metadata(context, snapshot_id, metadata, delete): + return stub_new_snapshot_metadata() + + +def return_snapshot_metadata(context, snapshot_id): + if not isinstance(snapshot_id, str) or not len(snapshot_id) == 36: + msg = 'id %s must be a uuid in return snapshot metadata' % snapshot_id + raise Exception(msg) + return stub_snapshot_metadata() + + +def return_empty_snapshot_metadata(context, snapshot_id): + return {} + + +def return_empty_container_metadata(context, snapshot_id, metadata, delete): + return {} + + +def delete_snapshot_metadata(context, snapshot_id, key): + pass + + +def stub_snapshot_metadata(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + return metadata + + +def stub_snapshot_metadata_insensitive(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "KEY4": "value4", + } + return metadata + + +def stub_new_snapshot_metadata(): + metadata = { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + } + return metadata + + +def stub_max_snapshot_metadata(): + metadata = {"metadata": {}} + for num in range(CONF.quota_metadata_items): + metadata['metadata']['key%i' % num] = "blah" + return metadata + + +def return_snapshot(context, snapshot_id): + return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', + 'name': 'fake', + 'status': 'available', + 'metadata': {}} + + +def return_volume(context, volume_id): + return {'id': 'fake-vol-id', + 'size': 100, + 'name': 'fake', + 'host': 'fake-host', + 'status': 'available', + 'encryption_key_id': None, + 'volume_type_id': None, + 'migration_status': None, + 'metadata': {}} + + +def return_snapshot_nonexistent(context, snapshot_id): + raise exception.SnapshotNotFound('bogus test message') + + +def fake_update_snapshot_metadata(self, context, snapshot, diff): + pass + + +class SnapshotMetaDataTest(test.TestCase): + + def setUp(self): + super(SnapshotMetaDataTest, self).setUp() + self.volume_api = cinder.volume.api.API() + fakes.stub_out_key_pair_funcs(self.stubs) + self.stubs.Set(cinder.db, 'volume_get', return_volume) + self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot) + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + + self.stubs.Set(self.volume_api, 'update_snapshot_metadata', + fake_update_snapshot_metadata) + + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr) + self.controller = snapshot_metadata.Controller() + self.req_id = str(uuid.uuid4()) + self.url = '/v2/fake/snapshots/%s/metadata' % self.req_id + + snap = {"volume_size": 100, + "volume_id": "fake-vol-id", + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "host": "fake-host", + "metadata": {}} + body = {"snapshot": snap} + req = fakes.HTTPRequest.blank('/v2/snapshots') + self.snapshot_controller.create(req, body) + + def test_index(self): + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.req_id) + + expected = { + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + }, + } + self.assertEqual(expected, res_dict) + + def test_index_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.index, req, self.url) + + def test_index_no_data(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.req_id) + expected = {'metadata': {}} + self.assertEqual(expected, res_dict) + + def test_show(self): + req = fakes.HTTPRequest.blank(self.url + '/key2') + res_dict = self.controller.show(req, self.req_id, 'key2') + expected = {'meta': {'key2': 'value2'}} + self.assertEqual(expected, res_dict) + + def test_show_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key2') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.req_id, 'key2') + + def test_show_meta_not_found(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.req_id, 'key6') + + def test_delete(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_delete', + delete_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key2') + req.method = 'DELETE' + res = self.controller.delete(req, self.req_id, 'key2') + + self.assertEqual(200, res.status_int) + + def test_delete_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.req_id, 'key1') + + def test_delete_meta_not_found(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.req_id, 'key6') + + def test_create(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + + req = fakes.HTTPRequest.blank('/v2/snapshot_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key1": "value1", + "key2": "value2", + "key3": "value3"}} + req.body = jsonutils.dumps(body) + res_dict = self.controller.create(req, self.req_id, body) + self.assertEqual(body, res_dict) + + def test_create_with_keys_in_uppercase_and_lowercase(self): + # if the keys in uppercase_and_lowercase, should return the one + # which server added + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_empty_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata_insensitive) + + req = fakes.HTTPRequest.blank('/v2/snapshot_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key1": "value1", + "KEY1": "value1", + "key2": "value2", + "KEY2": "value2", + "key3": "value3", + "KEY4": "value4"}} + expected = {"metadata": {"key1": "value1", + "key2": "value2", + "key3": "value3", + "KEY4": "value4"}} + req.body = jsonutils.dumps(body) + res_dict = self.controller.create(req, self.req_id, body) + self.assertEqual(expected, res_dict) + + def test_create_empty_body(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, None) + + def test_create_item_empty_key(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, body) + + def test_create_item_key_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, self.req_id, body) + + def test_create_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + + req = fakes.HTTPRequest.blank('/v2/snapshot_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key9": "value9"}} + req.body = jsonutils.dumps(body) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.create, req, self.req_id, body) + + def test_update_all(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_new_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_with_keys_in_uppercase_and_lowercase(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_get', + return_create_snapshot_metadata) + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_new_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + body = { + 'metadata': { + 'key10': 'value10', + 'KEY10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, body) + + self.assertEqual(expected, res_dict) + + def test_update_all_empty_container(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_empty_container_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': {}} + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_malformed_container(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'meta': {}} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.req_id, + expected) + + def test_update_all_malformed_data(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': ['asdf']} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.req_id, + expected) + + def test_update_all_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + body = {'metadata': {'key10': 'value10'}} + req.body = jsonutils.dumps(body) + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update_all, req, '100', body) + + def test_update_item(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + res_dict = self.controller.update(req, self.req_id, 'key1', body) + expected = {'meta': {'key1': 'value1'}} + self.assertEqual(expected, res_dict) + + def test_update_item_nonexistent_snapshot(self): + self.stubs.Set(cinder.db, 'snapshot_get', + return_snapshot_nonexistent) + req = fakes.HTTPRequest.blank( + '/v2/fake/snapshots/asdf/metadata/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, req, self.req_id, 'key1', + body) + + def test_update_item_empty_body(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'key1', + None) + + def test_update_item_empty_key(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, '', body) + + def test_update_item_key_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.req_id, ("a" * 260), body) + + def test_update_item_value_too_long(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": ("a" * 260)}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.req_id, "key1", body) + + def test_update_item_too_many_keys(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1", "key2": "value2"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'key1', + body) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url + '/bad') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'bad', + body) + + def test_invalid_metadata_items_on_create(self): + self.stubs.Set(cinder.db, 'snapshot_metadata_update', + return_create_snapshot_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + #test for long key + data = {"metadata": {"a" * 260: "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.req_id, data) + + #test for long value + data = {"metadata": {"key": "v" * 260}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.req_id, data) + + #test for empty key. + data = {"metadata": {"": "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, data) diff --git a/cinder/tests/api/v2/test_snapshots.py b/cinder/tests/api/v2/test_snapshots.py new file mode 100644 index 0000000000..43fa3a4e19 --- /dev/null +++ b/cinder/tests/api/v2/test_snapshots.py @@ -0,0 +1,461 @@ +# Copyright 2011 Denali Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree +import webob + +from cinder.api.v2 import snapshots +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder import volume + + +LOG = logging.getLogger(__name__) + +UUID = '00000000-0000-0000-0000-000000000001' +INVALID_UUID = '00000000-0000-0000-0000-000000000002' + + +def _get_default_snapshot_param(): + return { + 'id': UUID, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', + } + + +def stub_snapshot_create(self, context, + volume_id, name, + description, metadata): + snapshot = _get_default_snapshot_param() + snapshot['volume_id'] = volume_id + snapshot['display_name'] = name + snapshot['display_description'] = description + snapshot['metadata'] = metadata + return snapshot + + +def stub_snapshot_delete(self, context, snapshot): + if snapshot['id'] != UUID: + raise exception.NotFound + + +def stub_snapshot_get(self, context, snapshot_id): + if snapshot_id != UUID: + raise exception.NotFound + + param = _get_default_snapshot_param() + return param + + +def stub_snapshot_get_all(self, context, search_opts=None): + param = _get_default_snapshot_param() + return [param] + + +class SnapshotApiTest(test.TestCase): + def setUp(self): + super(SnapshotApiTest, self).setUp() + self.controller = snapshots.SnapshotsController() + + self.stubs.Set(db, 'snapshot_get_all_by_project', + stubs.stub_snapshot_get_all_by_project) + self.stubs.Set(db, 'snapshot_get_all', + stubs.stub_snapshot_get_all) + + def test_snapshot_create(self): + self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create) + self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get) + snapshot_name = 'Snapshot Test Name' + snapshot_description = 'Snapshot Test Desc' + snapshot = { + "volume_id": '12', + "force": False, + "name": snapshot_name, + "description": snapshot_description + } + + body = dict(snapshot=snapshot) + req = fakes.HTTPRequest.blank('/v2/snapshots') + resp_dict = self.controller.create(req, body) + + self.assertIn('snapshot', resp_dict) + self.assertEqual(resp_dict['snapshot']['name'], + snapshot_name) + self.assertEqual(resp_dict['snapshot']['description'], + snapshot_description) + + def test_snapshot_create_force(self): + self.stubs.Set(volume.api.API, "create_snapshot_force", + stub_snapshot_create) + self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get) + snapshot_name = 'Snapshot Test Name' + snapshot_description = 'Snapshot Test Desc' + snapshot = { + "volume_id": '12', + "force": True, + "name": snapshot_name, + "description": snapshot_description + } + body = dict(snapshot=snapshot) + req = fakes.HTTPRequest.blank('/v2/snapshots') + resp_dict = self.controller.create(req, body) + + self.assertIn('snapshot', resp_dict) + self.assertEqual(resp_dict['snapshot']['name'], + snapshot_name) + self.assertEqual(resp_dict['snapshot']['description'], + snapshot_description) + + snapshot = { + "volume_id": "12", + "force": "**&&^^%%$$##@@", + "name": "Snapshot Test Name", + "description": "Snapshot Test Desc" + } + body = dict(snapshot=snapshot) + req = fakes.HTTPRequest.blank('/v2/snapshots') + self.assertRaises(exception.InvalidParameterValue, + self.controller.create, + req, + body) + + def test_snapshot_create_without_volume_id(self): + snapshot_name = 'Snapshot Test Name' + snapshot_description = 'Snapshot Test Desc' + body = { + "snapshot": { + "force": True, + "name": snapshot_name, + "description": snapshot_description + } + } + req = fakes.HTTPRequest.blank('/v2/snapshots') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_snapshot_update(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + self.stubs.Set(volume.api.API, "update_snapshot", + stubs.stub_snapshot_update) + updates = { + "name": "Updated Test Name", + } + body = {"snapshot": updates} + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) + res_dict = self.controller.update(req, UUID, body) + expected = { + 'snapshot': { + 'id': UUID, + 'volume_id': 12, + 'status': 'available', + 'size': 100, + 'created_at': None, + 'name': 'Updated Test Name', + 'description': 'Default description', + 'metadata': {}, + } + } + self.assertEqual(expected, res_dict) + + def test_snapshot_update_missing_body(self): + body = {} + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, UUID, body) + + def test_snapshot_update_invalid_body(self): + body = {'name': 'missing top level snapshot key'} + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, UUID, body) + + def test_snapshot_update_not_found(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + updates = { + "name": "Updated Test Name", + } + body = {"snapshot": updates} + req = fakes.HTTPRequest.blank('/v2/snapshots/not-the-uuid') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, + 'not-the-uuid', body) + + def test_snapshot_delete(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) + + snapshot_id = UUID + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) + resp = self.controller.delete(req, snapshot_id) + self.assertEqual(resp.status_int, 202) + + def test_snapshot_delete_invalid_id(self): + self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) + snapshot_id = INVALID_UUID + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) + self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, + req, snapshot_id) + + def test_snapshot_show(self): + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) + resp_dict = self.controller.show(req, UUID) + + self.assertIn('snapshot', resp_dict) + self.assertEqual(resp_dict['snapshot']['id'], UUID) + + def test_snapshot_show_invalid_id(self): + snapshot_id = INVALID_UUID + req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, snapshot_id) + + def test_snapshot_detail(self): + self.stubs.Set(volume.api.API, "get_all_snapshots", + stub_snapshot_get_all) + req = fakes.HTTPRequest.blank('/v2/snapshots/detail') + resp_dict = self.controller.detail(req) + + self.assertIn('snapshots', resp_dict) + resp_snapshots = resp_dict['snapshots'] + self.assertEqual(len(resp_snapshots), 1) + + resp_snapshot = resp_snapshots.pop() + self.assertEqual(resp_snapshot['id'], UUID) + + def test_snapshot_list_by_status(self): + def stub_snapshot_get_all_by_project(context, project_id): + return [ + stubs.stub_snapshot(1, display_name='backup1', + status='available'), + stubs.stub_snapshot(2, display_name='backup2', + status='available'), + stubs.stub_snapshot(3, display_name='backup3', + status='creating'), + ] + self.stubs.Set(db, 'snapshot_get_all_by_project', + stub_snapshot_get_all_by_project) + + # no status filter + req = fakes.HTTPRequest.blank('/v2/snapshots') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 3) + # single match + req = fakes.HTTPRequest.blank('/v2/snapshots?status=creating') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEqual(resp['snapshots'][0]['status'], 'creating') + # multiple match + req = fakes.HTTPRequest.blank('/v2/snapshots?status=available') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 2) + for snapshot in resp['snapshots']: + self.assertEqual(snapshot['status'], 'available') + # no match + req = fakes.HTTPRequest.blank('/v2/snapshots?status=error') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 0) + + def test_snapshot_list_by_volume(self): + def stub_snapshot_get_all_by_project(context, project_id): + return [ + stubs.stub_snapshot(1, volume_id='vol1', status='creating'), + stubs.stub_snapshot(2, volume_id='vol1', status='available'), + stubs.stub_snapshot(3, volume_id='vol2', status='available'), + ] + self.stubs.Set(db, 'snapshot_get_all_by_project', + stub_snapshot_get_all_by_project) + + # single match + req = fakes.HTTPRequest.blank('/v2/snapshots?volume_id=vol2') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol2') + # multiple match + req = fakes.HTTPRequest.blank('/v2/snapshots?volume_id=vol1') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 2) + for snapshot in resp['snapshots']: + self.assertEqual(snapshot['volume_id'], 'vol1') + # multiple filters + req = fakes.HTTPRequest.blank('/v2/snapshots?volume_id=vol1' + '&status=available') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol1') + self.assertEqual(resp['snapshots'][0]['status'], 'available') + + def test_snapshot_list_by_name(self): + def stub_snapshot_get_all_by_project(context, project_id): + return [ + stubs.stub_snapshot(1, display_name='backup1'), + stubs.stub_snapshot(2, display_name='backup2'), + stubs.stub_snapshot(3, display_name='backup3'), + ] + self.stubs.Set(db, 'snapshot_get_all_by_project', + stub_snapshot_get_all_by_project) + + # no name filter + req = fakes.HTTPRequest.blank('/v2/snapshots') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 3) + # filter by one name + req = fakes.HTTPRequest.blank('/v2/snapshots?name=backup2') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 1) + self.assertEqual(resp['snapshots'][0]['name'], 'backup2') + # filter no match + req = fakes.HTTPRequest.blank('/v2/snapshots?name=backup4') + resp = self.controller.index(req) + self.assertEqual(len(resp['snapshots']), 0) + + def test_admin_list_snapshots_limited_to_project(self): + req = fakes.HTTPRequest.blank('/v2/fake/snapshots', + use_admin_context=True) + res = self.controller.index(req) + + self.assertIn('snapshots', res) + self.assertEqual(1, len(res['snapshots'])) + + def test_list_snapshots_with_limit_and_offset(self): + def list_snapshots_with_limit_and_offset(is_admin): + def stub_snapshot_get_all_by_project(context, project_id): + return [ + stubs.stub_snapshot(1, display_name='backup1'), + stubs.stub_snapshot(2, display_name='backup2'), + stubs.stub_snapshot(3, display_name='backup3'), + ] + + self.stubs.Set(db, 'snapshot_get_all_by_project', + stub_snapshot_get_all_by_project) + + req = fakes.HTTPRequest.blank('/v2/fake/snapshots?limit=1\ + &offset=1', + use_admin_context=is_admin) + res = self.controller.index(req) + + self.assertIn('snapshots', res) + self.assertEqual(1, len(res['snapshots'])) + self.assertEqual(2, res['snapshots'][0]['id']) + + #admin case + list_snapshots_with_limit_and_offset(is_admin=True) + #non_admin case + list_snapshots_with_limit_and_offset(is_admin=False) + + def test_admin_list_snapshots_all_tenants(self): + req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1', + use_admin_context=True) + res = self.controller.index(req) + self.assertIn('snapshots', res) + self.assertEqual(3, len(res['snapshots'])) + + def test_all_tenants_non_admin_gets_all_tenants(self): + req = fakes.HTTPRequest.blank('/v2/fake/snapshots?all_tenants=1') + res = self.controller.index(req) + self.assertIn('snapshots', res) + self.assertEqual(1, len(res['snapshots'])) + + def test_non_admin_get_by_project(self): + req = fakes.HTTPRequest.blank('/v2/fake/snapshots') + res = self.controller.index(req) + self.assertIn('snapshots', res) + self.assertEqual(1, len(res['snapshots'])) + + def _create_snapshot_bad_body(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/snapshots') + req.method = 'POST' + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_create_no_body(self): + self._create_snapshot_bad_body(body=None) + + def test_create_missing_snapshot(self): + body = {'foo': {'a': 'b'}} + self._create_snapshot_bad_body(body=body) + + def test_create_malformed_entity(self): + body = {'snapshot': 'string'} + self._create_snapshot_bad_body(body=body) + + +class SnapshotSerializerTest(test.TestCase): + def _verify_snapshot(self, snap, tree): + self.assertEqual(tree.tag, 'snapshot') + + for attr in ('id', 'status', 'size', 'created_at', + 'name', 'description', 'volume_id'): + self.assertEqual(str(snap[attr]), tree.get(attr)) + + def test_snapshot_show_create_serializer(self): + serializer = snapshots.SnapshotTemplate() + raw_snapshot = dict( + id='snap_id', + status='snap_status', + size=1024, + created_at=datetime.datetime.now(), + name='snap_name', + description='snap_desc', + display_description='snap_desc', + volume_id='vol_id', + ) + text = serializer.serialize(dict(snapshot=raw_snapshot)) + + tree = etree.fromstring(text) + + self._verify_snapshot(raw_snapshot, tree) + + def test_snapshot_index_detail_serializer(self): + serializer = snapshots.SnapshotsTemplate() + raw_snapshots = [ + dict( + id='snap1_id', + status='snap1_status', + size=1024, + created_at=datetime.datetime.now(), + name='snap1_name', + description='snap1_desc', + volume_id='vol1_id', + ), + dict( + id='snap2_id', + status='snap2_status', + size=1024, + created_at=datetime.datetime.now(), + name='snap2_name', + description='snap2_desc', + volume_id='vol2_id', + ) + ] + text = serializer.serialize(dict(snapshots=raw_snapshots)) + + tree = etree.fromstring(text) + + self.assertEqual('snapshots', tree.tag) + self.assertEqual(len(raw_snapshots), len(tree)) + for idx, child in enumerate(tree): + self._verify_snapshot(raw_snapshots[idx], child) diff --git a/cinder/tests/api/v2/test_types.py b/cinder/tests/api/v2/test_types.py new file mode 100644 index 0000000000..1af532f0b3 --- /dev/null +++ b/cinder/tests/api/v2/test_types.py @@ -0,0 +1,211 @@ +# Copyright 2011 OpenStack Foundation +# aLL Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree +import webob + +from cinder.api.v2 import types +from cinder.api.views import types as views_types +from cinder import exception +from cinder.openstack.common import timeutils +from cinder import test +from cinder.tests.api import fakes +from cinder.volume import volume_types + + +def stub_volume_type(id): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5" + } + return dict( + id=id, + name='vol_type_%s' % str(id), + extra_specs=specs, + ) + + +def return_volume_types_get_all_types(context): + return dict( + vol_type_1=stub_volume_type(1), + vol_type_2=stub_volume_type(2), + vol_type_3=stub_volume_type(3) + ) + + +def return_empty_volume_types_get_all_types(context): + return {} + + +def return_volume_types_get_volume_type(context, id): + if id == "777": + raise exception.VolumeTypeNotFound(volume_type_id=id) + return stub_volume_type(int(id)) + + +def return_volume_types_get_by_name(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + return stub_volume_type(int(name.split("_")[2])) + + +class VolumeTypesApiTest(test.TestCase): + def setUp(self): + super(VolumeTypesApiTest, self).setUp() + self.controller = types.VolumeTypesController() + + def test_volume_types_index(self): + self.stubs.Set(volume_types, 'get_all_types', + return_volume_types_get_all_types) + + req = fakes.HTTPRequest.blank('/v2/fake/types') + res_dict = self.controller.index(req) + + self.assertEqual(3, len(res_dict['volume_types'])) + + expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3'] + actual_names = map(lambda e: e['name'], res_dict['volume_types']) + self.assertEqual(set(actual_names), set(expected_names)) + for entry in res_dict['volume_types']: + self.assertEqual('value1', entry['extra_specs']['key1']) + + def test_volume_types_index_no_data(self): + self.stubs.Set(volume_types, 'get_all_types', + return_empty_volume_types_get_all_types) + + req = fakes.HTTPRequest.blank('/v2/fake/types') + res_dict = self.controller.index(req) + + self.assertEqual(0, len(res_dict['volume_types'])) + + def test_volume_types_show(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + + req = fakes.HTTPRequest.blank('/v2/fake/types/1') + res_dict = self.controller.show(req, 1) + + self.assertEqual(1, len(res_dict)) + self.assertEqual('1', res_dict['volume_type']['id']) + self.assertEqual('vol_type_1', res_dict['volume_type']['name']) + + def test_volume_types_show_not_found(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + + req = fakes.HTTPRequest.blank('/v2/fake/types/777') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + req, '777') + + def test_view_builder_show(self): + view_builder = views_types.ViewBuilder() + + now = timeutils.isotime() + raw_volume_type = dict( + name='new_type', + deleted=False, + created_at=now, + updated_at=now, + extra_specs={}, + deleted_at=None, + id=42, + ) + + request = fakes.HTTPRequest.blank("/v2") + output = view_builder.show(request, raw_volume_type) + + self.assertIn('volume_type', output) + expected_volume_type = dict( + name='new_type', + extra_specs={}, + id=42, + ) + self.assertDictMatch(output['volume_type'], expected_volume_type) + + def test_view_builder_list(self): + view_builder = views_types.ViewBuilder() + + now = timeutils.isotime() + raw_volume_types = [] + for i in range(0, 10): + raw_volume_types.append( + dict( + name='new_type', + deleted=False, + created_at=now, + updated_at=now, + extra_specs={}, + deleted_at=None, + id=42 + i + ) + ) + + request = fakes.HTTPRequest.blank("/v2") + output = view_builder.index(request, raw_volume_types) + + self.assertIn('volume_types', output) + for i in range(0, 10): + expected_volume_type = dict( + name='new_type', + extra_specs={}, + id=42 + i + ) + self.assertDictMatch(output['volume_types'][i], + expected_volume_type) + + +class VolumeTypesSerializerTest(test.TestCase): + def _verify_volume_type(self, vtype, tree): + self.assertEqual('volume_type', tree.tag) + self.assertEqual(vtype['name'], tree.get('name')) + self.assertEqual(str(vtype['id']), tree.get('id')) + self.assertEqual(1, len(tree)) + extra_specs = tree[0] + self.assertEqual('extra_specs', extra_specs.tag) + seen = set(vtype['extra_specs'].keys()) + for child in extra_specs: + self.assertIn(child.tag, seen) + self.assertEqual(vtype['extra_specs'][child.tag], child.text) + seen.remove(child.tag) + self.assertEqual(len(seen), 0) + + def test_index_serializer(self): + serializer = types.VolumeTypesTemplate() + + # Just getting some input data + vtypes = return_volume_types_get_all_types(None) + text = serializer.serialize({'volume_types': vtypes.values()}) + + tree = etree.fromstring(text) + + self.assertEqual('volume_types', tree.tag) + self.assertEqual(len(vtypes), len(tree)) + for child in tree: + name = child.get('name') + self.assertIn(name, vtypes) + self._verify_volume_type(vtypes[name], child) + + def test_voltype_serializer(self): + serializer = types.VolumeTypeTemplate() + + vtype = stub_volume_type(1) + text = serializer.serialize(dict(volume_type=vtype)) + + tree = etree.fromstring(text) + + self._verify_volume_type(vtype, tree) diff --git a/cinder/tests/api/v2/test_volume_metadata.py b/cinder/tests/api/v2/test_volume_metadata.py new file mode 100644 index 0000000000..51c1dad38a --- /dev/null +++ b/cinder/tests/api/v2/test_volume_metadata.py @@ -0,0 +1,537 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo.config import cfg +import webob + +from cinder.api import extensions +from cinder.api.v2 import volume_metadata +from cinder.api.v2 import volumes +from cinder import db +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder.volume import api as volume_api + + +CONF = cfg.CONF + + +def return_create_volume_metadata_max(context, volume_id, metadata, delete): + return stub_max_volume_metadata() + + +def return_create_volume_metadata(context, volume_id, metadata, delete): + return stub_volume_metadata() + + +def return_new_volume_metadata(context, volume_id, metadata, delete): + return stub_new_volume_metadata() + + +def return_create_volume_metadata_insensitive(context, snapshot_id, + metadata, delete): + return stub_volume_metadata_insensitive() + + +def return_volume_metadata(context, volume_id): + if not isinstance(volume_id, str) or not len(volume_id) == 36: + msg = 'id %s must be a uuid in return volume metadata' % volume_id + raise Exception(msg) + return stub_volume_metadata() + + +def return_empty_volume_metadata(context, volume_id): + return {} + + +def return_empty_container_metadata(context, volume_id, metadata, delete): + return {} + + +def delete_volume_metadata(context, volume_id, key): + pass + + +def stub_volume_metadata(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + return metadata + + +def stub_new_volume_metadata(): + metadata = { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + } + return metadata + + +def stub_volume_metadata_insensitive(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "KEY4": "value4", + } + return metadata + + +def stub_max_volume_metadata(): + metadata = {"metadata": {}} + for num in range(CONF.quota_metadata_items): + metadata['metadata']['key%i' % num] = "blah" + return metadata + + +def return_volume(context, volume_id): + return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', + 'name': 'fake', + 'metadata': {}} + + +def return_volume_nonexistent(context, volume_id): + raise exception.VolumeNotFound('bogus test message') + + +def fake_update_volume_metadata(self, context, volume, diff): + pass + + +class volumeMetaDataTest(test.TestCase): + + def setUp(self): + super(volumeMetaDataTest, self).setUp() + self.volume_api = volume_api.API() + fakes.stub_out_key_pair_funcs(self.stubs) + self.stubs.Set(db, 'volume_get', return_volume) + self.stubs.Set(db, 'volume_metadata_get', + return_volume_metadata) + self.stubs.Set(db, 'service_get_all_by_topic', + stubs.stub_service_get_all_by_topic) + + self.stubs.Set(self.volume_api, 'update_volume_metadata', + fake_update_volume_metadata) + + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.volume_controller = volumes.VolumeController(self.ext_mgr) + self.controller = volume_metadata.Controller() + self.req_id = str(uuid.uuid4()) + self.url = '/v2/fake/volumes/%s/metadata' % self.req_id + + vol = {"size": 100, + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "metadata": {}} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + self.volume_controller.create(req, body) + + def test_index(self): + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.req_id) + + expected = { + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + }, + } + self.assertEqual(expected, res_dict) + + def test_index_nonexistent_volume(self): + self.stubs.Set(db, 'volume_metadata_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.index, req, self.url) + + def test_index_no_data(self): + self.stubs.Set(db, 'volume_metadata_get', + return_empty_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + res_dict = self.controller.index(req, self.req_id) + expected = {'metadata': {}} + self.assertEqual(expected, res_dict) + + def test_show(self): + req = fakes.HTTPRequest.blank(self.url + '/key2') + res_dict = self.controller.show(req, self.req_id, 'key2') + expected = {'meta': {'key2': 'value2'}} + self.assertEqual(expected, res_dict) + + def test_show_nonexistent_volume(self): + self.stubs.Set(db, 'volume_metadata_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key2') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.req_id, 'key2') + + def test_show_meta_not_found(self): + self.stubs.Set(db, 'volume_metadata_get', + return_empty_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, req, self.req_id, 'key6') + + def test_delete(self): + self.stubs.Set(db, 'volume_metadata_get', + return_volume_metadata) + self.stubs.Set(db, 'volume_metadata_delete', + delete_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key2') + req.method = 'DELETE' + res = self.controller.delete(req, self.req_id, 'key2') + + self.assertEqual(200, res.status_int) + + def test_delete_nonexistent_volume(self): + self.stubs.Set(db, 'volume_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.req_id, 'key1') + + def test_delete_meta_not_found(self): + self.stubs.Set(db, 'volume_metadata_get', + return_empty_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key6') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, req, self.req_id, 'key6') + + def test_create(self): + self.stubs.Set(db, 'volume_metadata_get', + return_empty_volume_metadata) + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + + req = fakes.HTTPRequest.blank('/v2/volume_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key1": "value1", + "key2": "value2", + "key3": "value3", }} + req.body = jsonutils.dumps(body) + res_dict = self.controller.create(req, self.req_id, body) + self.assertEqual(body, res_dict) + + def test_create_with_keys_in_uppercase_and_lowercase(self): + # if the keys in uppercase_and_lowercase, should return the one + # which server added + self.stubs.Set(db, 'volume_metadata_get', + return_empty_volume_metadata) + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata_insensitive) + + req = fakes.HTTPRequest.blank('/v2/volume_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key1": "value1", + "KEY1": "value1", + "key2": "value2", + "KEY2": "value2", + "key3": "value3", + "KEY4": "value4"}} + expected = {"metadata": {"key1": "value1", + "key2": "value2", + "key3": "value3", + "KEY4": "value4"}} + req.body = jsonutils.dumps(body) + res_dict = self.controller.create(req, self.req_id, body) + self.assertEqual(expected, res_dict) + + def test_create_empty_body(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, None) + + def test_create_item_empty_key(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, body) + + def test_create_item_key_too_long(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, self.req_id, body) + + def test_create_nonexistent_volume(self): + self.stubs.Set(db, 'volume_get', + return_volume_nonexistent) + self.stubs.Set(db, 'volume_metadata_get', + return_volume_metadata) + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + + req = fakes.HTTPRequest.blank('/v2/volume_metadata') + req.method = 'POST' + req.content_type = "application/json" + body = {"metadata": {"key9": "value9"}} + req.body = jsonutils.dumps(body) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.create, req, self.req_id, body) + + def test_update_all(self): + self.stubs.Set(db, 'volume_metadata_update', + return_new_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_with_keys_in_uppercase_and_lowercase(self): + self.stubs.Set(db, 'volume_metadata_get', + return_create_volume_metadata) + self.stubs.Set(db, 'volume_metadata_update', + return_new_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + body = { + 'metadata': { + 'key10': 'value10', + 'KEY10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + 'KEY20': 'value20', + }, + } + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, body) + + self.assertEqual(expected, res_dict) + + def test_update_all_empty_container(self): + self.stubs.Set(db, 'volume_metadata_update', + return_empty_container_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': {}} + req.body = jsonutils.dumps(expected) + res_dict = self.controller.update_all(req, self.req_id, expected) + + self.assertEqual(expected, res_dict) + + def test_update_all_malformed_container(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'meta': {}} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.req_id, + expected) + + def test_update_all_malformed_data(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': ['asdf']} + req.body = jsonutils.dumps(expected) + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update_all, req, self.req_id, + expected) + + def test_update_all_nonexistent_volume(self): + self.stubs.Set(db, 'volume_get', return_volume_nonexistent) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'PUT' + req.content_type = "application/json" + body = {'metadata': {'key10': 'value10'}} + req.body = jsonutils.dumps(body) + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update_all, req, '100', body) + + def test_update_item(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + res_dict = self.controller.update(req, self.req_id, 'key1', body) + expected = {'meta': {'key1': 'value1'}} + self.assertEqual(expected, res_dict) + + def test_update_item_nonexistent_volume(self): + self.stubs.Set(db, 'volume_get', + return_volume_nonexistent) + req = fakes.HTTPRequest.blank('/v2/fake/volumes/asdf/metadata/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, req, self.req_id, 'key1', + body) + + def test_update_item_empty_body(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'key1', + None) + + def test_update_item_empty_key(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, '', body) + + def test_update_item_key_too_long(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {("a" * 260): "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.req_id, ("a" * 260), body) + + def test_update_item_value_too_long(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": ("a" * 260)}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.update, + req, self.req_id, "key1", body) + + def test_update_item_too_many_keys(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/key1') + req.method = 'PUT' + body = {"meta": {"key1": "value1", "key2": "value2"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'key1', + body) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url + '/bad') + req.method = 'PUT' + body = {"meta": {"key1": "value1"}} + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, self.req_id, 'bad', + body) + + def test_invalid_metadata_items_on_create(self): + self.stubs.Set(db, 'volume_metadata_update', + return_create_volume_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.headers["content-type"] = "application/json" + + #test for long key + data = {"metadata": {"a" * 260: "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.req_id, data) + + #test for long value + data = {"metadata": {"key": "v" * 260}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.controller.create, req, self.req_id, data) + + #test for empty key. + data = {"metadata": {"": "value1"}} + req.body = jsonutils.dumps(data) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, self.req_id, data) diff --git a/cinder/tests/api/v2/test_volumes.py b/cinder/tests/api/v2/test_volumes.py new file mode 100644 index 0000000000..da6ba5dc94 --- /dev/null +++ b/cinder/tests/api/v2/test_volumes.py @@ -0,0 +1,1408 @@ +# Copyright 2013 Josh Durgin +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import datetime + +from lxml import etree +from oslo.config import cfg +import urllib +import webob + +from cinder.api import extensions +from cinder.api.v2 import volumes +from cinder import context +from cinder import db +from cinder import exception +from cinder import test +from cinder.tests.api import fakes +from cinder.tests.api.v2 import stubs +from cinder.tests.image import fake as fake_image +from cinder.volume import api as volume_api + + +CONF = cfg.CONF + +NS = '{http://docs.openstack.org/api/openstack-volume/2.0/content}' + +TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001' + + +def stub_snapshot_get(self, context, snapshot_id): + if snapshot_id != TEST_SNAPSHOT_UUID: + raise exception.NotFound + + return { + 'id': snapshot_id, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'name': 'Default name', + 'description': 'Default description', + } + + +class VolumeApiTest(test.TestCase): + def setUp(self): + super(VolumeApiTest, self).setUp() + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + fake_image.stub_out_image_service(self.stubs) + self.controller = volumes.VolumeController(self.ext_mgr) + + self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all) + self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete) + self.stubs.Set(db, 'service_get_all_by_topic', + stubs.stub_service_get_all_by_topic) + self.maxDiff = None + + def test_volume_create(self): + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + + vol = { + "size": 100, + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "zone1:host1" + } + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + res_dict = self.controller.create(req, body) + ex = {'volume': {'attachments': + [{'device': '/', + 'host_name': None, + 'id': '1', + 'server_id': 'fakeuuid', + 'volume_id': '1'}], + 'availability_zone': 'zone1:host1', + 'bootable': 'false', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'description': 'Volume Test Desc', + 'id': '1', + 'links': + [{'href': 'http://localhost/v2/fake/volumes/1', + 'rel': 'self'}, + {'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark'}], + 'metadata': {'attached_mode': 'rw', + 'readonly': 'False'}, + 'name': 'Volume Test Name', + 'size': 100, + 'snapshot_id': None, + 'source_volid': None, + 'status': 'fakestatus', + 'user_id': 'fakeuser', + 'volume_type': 'vol_type_name'}} + self.assertEqual(res_dict, ex) + + def test_volume_create_with_type(self): + vol_type = db.volume_type_create( + context.get_admin_context(), + dict(name=CONF.default_volume_type, extra_specs={}) + ) + + db_vol_type = db.volume_type_get(context.get_admin_context(), + vol_type.id) + + vol = { + "size": 100, + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "zone1:host1", + "volume_type": "FakeTypeName", + } + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + # Raise 404 when type name isn't valid + self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, + req, body) + + # Use correct volume type name + vol.update(dict(volume_type=CONF.default_volume_type)) + body.update(dict(volume=vol)) + res_dict = self.controller.create(req, body) + volume_id = res_dict['volume']['id'] + self.assertEqual(len(res_dict), 1) + + # Use correct volume type id + vol.update(dict(volume_type=db_vol_type['id'])) + body.update(dict(volume=vol)) + res_dict = self.controller.create(req, body) + volume_id = res_dict['volume']['id'] + self.assertEqual(len(res_dict), 1) + + self.stubs.Set(volume_api.API, 'get_all', + lambda *args, **kwargs: + [stubs.stub_volume(volume_id, + volume_type={'name': vol_type})]) + req = fakes.HTTPRequest.blank('/v2/volumes/detail') + res_dict = self.controller.detail(req) + + def test_volume_creation_fails_with_bad_size(self): + vol = {"size": '', + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "zone1:host1"} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + self.assertRaises(exception.InvalidInput, + self.controller.create, + req, + body) + + def test_volume_creation_fails_with_bad_availability_zone(self): + vol = {"size": '1', + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "zonen:hostn"} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + self.assertRaises(exception.InvalidInput, + self.controller.create, + req, body) + + def test_volume_create_with_image_id(self): + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + + self.ext_mgr.extensions = {'os-image-create': 'fake'} + vol = {"size": '1', + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "nova", + "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'} + ex = {'volume': {'attachments': [{'device': '/', + 'host_name': None, + 'id': '1', + 'server_id': 'fakeuuid', + 'volume_id': '1'}], + 'availability_zone': 'nova', + 'bootable': 'false', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'description': 'Volume Test Desc', + 'id': '1', + 'links': + [{'href': 'http://localhost/v2/fake/volumes/1', + 'rel': 'self'}, + {'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark'}], + 'metadata': {'attached_mode': 'rw', + 'readonly': 'False'}, + 'name': 'Volume Test Name', + 'size': '1', + 'snapshot_id': None, + 'source_volid': None, + 'status': 'fakestatus', + 'user_id': 'fakeuser', + 'volume_type': 'vol_type_name'}} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + res_dict = self.controller.create(req, body) + self.assertEqual(res_dict, ex) + + def test_volume_create_with_image_id_is_integer(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + self.ext_mgr.extensions = {'os-image-create': 'fake'} + vol = { + "size": '1', + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "cinder", + "imageRef": 1234, + } + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, + body) + + def test_volume_create_with_image_id_not_uuid_format(self): + self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create) + self.ext_mgr.extensions = {'os-image-create': 'fake'} + vol = { + "size": '1', + "name": "Volume Test Name", + "description": "Volume Test Desc", + "availability_zone": "cinder", + "imageRef": '12345' + } + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v2/volumes') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + req, + body) + + def test_volume_update(self): + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) + + updates = { + "name": "Updated Test Name", + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v2/volumes/1') + res_dict = self.controller.update(req, '1', body) + expected = { + 'volume': { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'bootable': 'false', + 'name': 'Updated Test Name', + 'attachments': [ + { + 'id': '1', + 'volume_id': '1', + 'server_id': 'fakeuuid', + 'host_name': None, + 'device': '/', + } + ], + 'user_id': 'fakeuser', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'attached_mode': 'rw', 'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v2/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + } + self.assertEqual(res_dict, expected) + + def test_volume_update_metadata(self): + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) + + updates = { + "metadata": {"qos_max_iops": 2000} + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v2/volumes/1') + res_dict = self.controller.update(req, '1', body) + expected = {'volume': { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'bootable': 'false', + 'name': 'displayname', + 'attachments': [{ + 'id': '1', + 'volume_id': '1', + 'server_id': 'fakeuuid', + 'host_name': None, + 'device': '/', + }], + 'user_id': 'fakeuser', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {"qos_max_iops": 2000, + "readonly": "False", + "attached_mode": "rw"}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v2/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + }} + self.assertEqual(res_dict, expected) + + def test_volume_update_with_admin_metadata(self): + self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) + + volume = stubs.stub_volume("1") + del volume['name'] + del volume['volume_type'] + del volume['volume_type_id'] + volume['metadata'] = {'key': 'value'} + db.volume_create(context.get_admin_context(), volume) + db.volume_admin_metadata_update(context.get_admin_context(), "1", + {"readonly": "True", + "invisible_key": "invisible_value"}, + False) + + updates = { + "display_name": "Updated Test Name", + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v2/volumes/1') + admin_ctx = context.RequestContext('admin', 'fake', True) + req.environ['cinder.context'] = admin_ctx + res_dict = self.controller.update(req, '1', body) + expected = {'volume': { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'bootable': 'false', + 'name': 'displayname', + 'attachments': [{ + 'id': '1', + 'volume_id': '1', + 'server_id': 'fakeuuid', + 'host_name': None, + 'device': '/', + }], + 'user_id': 'fakeuser', + 'volume_type': None, + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'key': 'value', + 'readonly': 'True'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v2/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + }} + self.assertEqual(res_dict, expected) + + def test_update_empty_body(self): + body = {} + req = fakes.HTTPRequest.blank('/v2/volumes/1') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, + req, '1', body) + + def test_update_invalid_body(self): + body = { + 'name': 'missing top level volume key' + } + req = fakes.HTTPRequest.blank('/v2/volumes/1') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, + req, '1', body) + + def test_update_not_found(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + updates = { + "name": "Updated Test Name", + } + body = {"volume": updates} + req = fakes.HTTPRequest.blank('/v2/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.update, + req, '1', body) + + def test_volume_list_summary(self): + self.stubs.Set(volume_api.API, 'get_all', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes') + res_dict = self.controller.index(req) + expected = { + 'volumes': [ + { + 'name': 'displayname', + 'id': '1', + 'links': [ + { + 'href': 'http://localhost/v2/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + ] + } + self.assertEqual(res_dict, expected) + # Finally test that we cached the returned volumes + self.assertEqual(1, len(req.cached_resource())) + + def test_volume_list_detail(self): + self.stubs.Set(volume_api.API, 'get_all', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes/detail') + res_dict = self.controller.detail(req) + expected = { + 'volumes': [ + { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'bootable': 'false', + 'name': 'displayname', + 'attachments': [ + { + 'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1' + } + ], + 'user_id': 'fakeuser', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'attached_mode': 'rw', 'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v2/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + ] + } + self.assertEqual(res_dict, expected) + # Finally test that we cached the returned volumes + self.assertEqual(1, len(req.cached_resource())) + + def test_volume_list_detail_with_admin_metadata(self): + volume = stubs.stub_volume("1") + del volume['name'] + del volume['volume_type'] + del volume['volume_type_id'] + volume['metadata'] = {'key': 'value'} + db.volume_create(context.get_admin_context(), volume) + db.volume_admin_metadata_update(context.get_admin_context(), "1", + {"readonly": "True", + "invisible_key": "invisible_value"}, + False) + + req = fakes.HTTPRequest.blank('/v2/volumes/detail') + admin_ctx = context.RequestContext('admin', 'fakeproject', True) + req.environ['cinder.context'] = admin_ctx + res_dict = self.controller.detail(req) + expected = { + 'volumes': [ + { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'bootable': 'false', + 'name': 'displayname', + 'attachments': [ + { + 'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1' + } + ], + 'user_id': 'fakeuser', + 'volume_type': None, + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'key': 'value', 'readonly': 'True'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v2/fakeproject' + '/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fakeproject/volumes/1', + 'rel': 'bookmark' + } + ], + } + ] + } + self.assertEqual(res_dict, expected) + + def test_volume_index_with_marker(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes?marker=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(len(volumes), 2) + self.assertEqual(volumes[0]['id'], 1) + self.assertEqual(volumes[1]['id'], 2) + + def test_volume_index_limit(self): + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes?limit=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(len(volumes), 1) + + def test_volume_index_limit_negative(self): + req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1') + self.assertRaises(exception.Invalid, + self.controller.index, + req) + + def test_volume_index_limit_non_int(self): + req = fakes.HTTPRequest.blank('/v2/volumes?limit=a') + self.assertRaises(exception.Invalid, + self.controller.index, + req) + + def test_volume_index_limit_marker(self): + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes?marker=1&limit=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(len(volumes), 1) + self.assertEqual(volumes[0]['id'], '1') + + def test_volume_index_limit_offset(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes?limit=2&offset=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(len(volumes), 1) + self.assertEqual(volumes[0]['id'], 2) + + req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1&offset=1') + self.assertRaises(exception.InvalidInput, + self.controller.index, + req) + + req = fakes.HTTPRequest.blank('/v2/volumes?limit=a&offset=1') + self.assertRaises(exception.InvalidInput, + self.controller.index, + req) + + def test_volume_detail_with_marker(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(len(volumes), 2) + self.assertEqual(volumes[0]['id'], 1) + self.assertEqual(volumes[1]['id'], 2) + + def test_volume_detail_limit(self): + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(len(volumes), 1) + + def test_volume_detail_limit_negative(self): + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1') + self.assertRaises(exception.Invalid, + self.controller.index, + req) + + def test_volume_detail_limit_non_int(self): + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a') + self.assertRaises(exception.Invalid, + self.controller.index, + req) + + def test_volume_detail_limit_marker(self): + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1&limit=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(len(volumes), 1) + self.assertEqual(volumes[0]['id'], '1') + + def test_volume_detail_limit_offset(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1') + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(len(volumes), 1) + self.assertEqual(volumes[0]['id'], 2) + + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1', + use_admin_context=True) + res_dict = self.controller.index(req) + volumes = res_dict['volumes'] + self.assertEqual(len(volumes), 1) + self.assertEqual(volumes[0]['id'], 2) + + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1&offset=1') + self.assertRaises(exception.InvalidInput, + self.controller.index, + req) + + req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a&offset=1') + self.assertRaises(exception.InvalidInput, + self.controller.index, + req) + + def test_volume_list_by_name(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1'), + stubs.stub_volume(2, display_name='vol2'), + stubs.stub_volume(3, display_name='vol3'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + # no name filter + req = fakes.HTTPRequest.blank('/v2/volumes') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 3) + # filter on name + req = fakes.HTTPRequest.blank('/v2/volumes?name=vol2') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['name'], 'vol2') + # filter no match + req = fakes.HTTPRequest.blank('/v2/volumes?name=vol4') + resp = self.controller.index(req) + self.assertEqual(len(resp['volumes']), 0) + + def test_volume_list_by_metadata(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1', + status='available', + volume_metadata=[{'key': 'key1', + 'value': 'value1'}]), + stubs.stub_volume(2, display_name='vol2', + status='available', + volume_metadata=[{'key': 'key1', + 'value': 'value2'}]), + stubs.stub_volume(3, display_name='vol3', + status='in-use', + volume_metadata=[{'key': 'key1', + 'value': 'value2'}]), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + + # no metadata filter + req = fakes.HTTPRequest.blank('/v2/volumes', use_admin_context=True) + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 3) + + # single match + qparams = urllib.urlencode({'metadata': {'key1': 'value1'}}) + req = fakes.HTTPRequest.blank('/v2/volumes?%s' % qparams, + use_admin_context=True) + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['name'], 'vol1') + self.assertEqual(resp['volumes'][0]['metadata']['key1'], 'value1') + + # multiple matches + qparams = urllib.urlencode({'metadata': {'key1': 'value2'}}) + req = fakes.HTTPRequest.blank('/v2/volumes?%s' % qparams, + use_admin_context=True) + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 2) + for volume in resp['volumes']: + self.assertEqual(volume['metadata']['key1'], 'value2') + + # multiple filters + qparams = urllib.urlencode({'metadata': {'key1': 'value2'}}) + req = fakes.HTTPRequest.blank('/v2/volumes?status=in-use&%s' % qparams, + use_admin_context=True) + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['name'], 'vol3') + + # no match + qparams = urllib.urlencode({'metadata': {'key1': 'value3'}}) + req = fakes.HTTPRequest.blank('/v2/volumes?%s' % qparams, + use_admin_context=True) + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 0) + + def test_volume_list_by_status(self): + def stub_volume_get_all_by_project(context, project_id, marker, limit, + sort_key, sort_dir): + return [ + stubs.stub_volume(1, display_name='vol1', status='available'), + stubs.stub_volume(2, display_name='vol2', status='available'), + stubs.stub_volume(3, display_name='vol3', status='in-use'), + ] + self.stubs.Set(db, 'volume_get_all_by_project', + stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + # no status filter + req = fakes.HTTPRequest.blank('/v2/volumes/details') + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 3) + # single match + req = fakes.HTTPRequest.blank('/v2/volumes/details?status=in-use') + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['status'], 'in-use') + # multiple match + req = fakes.HTTPRequest.blank('/v2/volumes/details/?status=available') + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 2) + for volume in resp['volumes']: + self.assertEqual(volume['status'], 'available') + # multiple filters + req = fakes.HTTPRequest.blank('/v2/volumes/details/?status=available&' + 'name=vol1') + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 1) + self.assertEqual(resp['volumes'][0]['name'], 'vol1') + self.assertEqual(resp['volumes'][0]['status'], 'available') + # no match + req = fakes.HTTPRequest.blank('/v2/volumes/details?status=in-use&' + 'name=vol1') + resp = self.controller.detail(req) + self.assertEqual(len(resp['volumes']), 0) + + def test_volume_show(self): + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes/1') + res_dict = self.controller.show(req, '1') + expected = { + 'volume': { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'bootable': 'false', + 'name': 'displayname', + 'attachments': [ + { + 'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1' + } + ], + 'user_id': 'fakeuser', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'attached_mode': 'rw', 'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v2/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + } + self.assertEqual(res_dict, expected) + # Finally test that we cached the returned volume + self.assertIsNotNone(req.cached_resource_by_id('1')) + + def test_volume_show_no_attachments(self): + def stub_volume_get(self, context, volume_id): + return stubs.stub_volume(volume_id, attach_status='detached') + + self.stubs.Set(volume_api.API, 'get', stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes/1') + res_dict = self.controller.show(req, '1') + expected = { + 'volume': { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'bootable': 'false', + 'name': 'displayname', + 'attachments': [], + 'user_id': 'fakeuser', + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'readonly': 'False'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v2/fake/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fake/volumes/1', + 'rel': 'bookmark' + } + ], + } + } + + self.assertEqual(res_dict, expected) + + def test_volume_show_no_volume(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + + req = fakes.HTTPRequest.blank('/v2/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + req, 1) + # Finally test that nothing was cached + self.assertIsNone(req.cached_resource_by_id('1')) + + def test_volume_show_with_admin_metadata(self): + volume = stubs.stub_volume("1") + del volume['name'] + del volume['volume_type'] + del volume['volume_type_id'] + volume['metadata'] = {'key': 'value'} + db.volume_create(context.get_admin_context(), volume) + db.volume_admin_metadata_update(context.get_admin_context(), "1", + {"readonly": "True", + "invisible_key": "invisible_value"}, + False) + + req = fakes.HTTPRequest.blank('/v2/volumes/1') + admin_ctx = context.RequestContext('admin', 'fakeproject', True) + req.environ['cinder.context'] = admin_ctx + res_dict = self.controller.show(req, '1') + expected = { + 'volume': { + 'status': 'fakestatus', + 'description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'bootable': 'false', + 'name': 'displayname', + 'attachments': [ + { + 'device': '/', + 'server_id': 'fakeuuid', + 'host_name': None, + 'id': '1', + 'volume_id': '1' + } + ], + 'user_id': 'fakeuser', + 'volume_type': None, + 'snapshot_id': None, + 'source_volid': None, + 'metadata': {'key': 'value', + 'readonly': 'True'}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'size': 1, + 'links': [ + { + 'href': 'http://localhost/v2/fakeproject/volumes/1', + 'rel': 'self' + }, + { + 'href': 'http://localhost/fakeproject/volumes/1', + 'rel': 'bookmark' + } + ], + } + } + self.assertEqual(res_dict, expected) + + def test_volume_delete(self): + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes/1') + resp = self.controller.delete(req, 1) + self.assertEqual(resp.status_int, 202) + + def test_volume_delete_attached(self): + def stub_volume_attached(self, context, volume, force=False): + raise exception.VolumeAttached(volume_id=volume['id']) + self.stubs.Set(volume_api.API, "delete", stub_volume_attached) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/volumes/1') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.delete, + req, 1) + + def test_volume_delete_no_volume(self): + self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound) + + req = fakes.HTTPRequest.blank('/v2/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, + req, 1) + + def test_admin_list_volumes_limited_to_project(self): + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + + req = fakes.HTTPRequest.blank('/v2/fake/volumes', + use_admin_context=True) + res = self.controller.index(req) + + self.assertIn('volumes', res) + self.assertEqual(1, len(res['volumes'])) + + def test_admin_list_volumes_all_tenants(self): + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + + req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1', + use_admin_context=True) + res = self.controller.index(req) + self.assertIn('volumes', res) + self.assertEqual(3, len(res['volumes'])) + + def test_all_tenants_non_admin_gets_all_tenants(self): + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1') + res = self.controller.index(req) + self.assertIn('volumes', res) + self.assertEqual(1, len(res['volumes'])) + + def test_non_admin_get_by_project(self): + self.stubs.Set(db, 'volume_get_all_by_project', + stubs.stub_volume_get_all_by_project) + self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get) + + req = fakes.HTTPRequest.blank('/v2/fake/volumes') + res = self.controller.index(req) + self.assertIn('volumes', res) + self.assertEqual(1, len(res['volumes'])) + + def _create_volume_bad_request(self, body): + req = fakes.HTTPRequest.blank('/v2/fake/volumes') + req.method = 'POST' + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_create_no_body(self): + self._create_volume_bad_request(body=None) + + def test_create_missing_volume(self): + body = {'foo': {'a': 'b'}} + self._create_volume_bad_request(body=body) + + def test_create_malformed_entity(self): + body = {'volume': 'string'} + self._create_volume_bad_request(body=body) + + def test_add_visible_admin_metadata_visible_key_only(self): + admin_metadata = [{"key": "invisible_key", "value": "invisible_value"}, + {"key": "readonly", "value": "visible"}, + {"key": "attached_mode", "value": "visible"}] + metadata = [{"key": "key", "value": "value"}] + volume = dict(volume_admin_metadata=admin_metadata, + volume_metadata=metadata) + admin_ctx = context.get_admin_context() + self.controller._add_visible_admin_metadata(admin_ctx, + volume) + self.assertEqual(volume['volume_metadata'], + [{"key": "key", "value": "value"}, + {"key": "readonly", "value": "visible"}, + {"key": "attached_mode", "value": "visible"}]) + + admin_metadata = {"invisible_key": "invisible_value", + "readonly": "visible", + "attached_mode": "visible"} + metadata = {"key": "value"} + volume = dict(admin_metadata=admin_metadata, + metadata=metadata) + admin_ctx = context.get_admin_context() + self.controller._add_visible_admin_metadata(admin_ctx, + volume) + self.assertEqual(volume['metadata'], + {'key': 'value', + 'attached_mode': 'visible', + 'readonly': 'visible'}) + + +class VolumeSerializerTest(test.TestCase): + def _verify_volume_attachment(self, attach, tree): + for attr in ('id', 'volume_id', 'server_id', 'device'): + self.assertEqual(str(attach[attr]), tree.get(attr)) + + def _verify_volume(self, vol, tree): + self.assertEqual(tree.tag, NS + 'volume') + + for attr in ('id', 'status', 'size', 'availability_zone', 'created_at', + 'name', 'description', 'volume_type', 'bootable', + 'snapshot_id', 'source_volid'): + self.assertEqual(str(vol[attr]), tree.get(attr)) + + for child in tree: + self.assertIn(child.tag, (NS + 'attachments', NS + 'metadata')) + if child.tag == 'attachments': + self.assertEqual(1, len(child)) + self.assertEqual('attachment', child[0].tag) + self._verify_volume_attachment(vol['attachments'][0], child[0]) + elif child.tag == 'metadata': + not_seen = set(vol['metadata'].keys()) + for gr_child in child: + self.assertIn(gr_child.get("key"), not_seen) + self.assertEqual(str(vol['metadata'][gr_child.get("key")]), + gr_child.text) + not_seen.remove(gr_child.get('key')) + self.assertEqual(0, len(not_seen)) + + def test_volume_show_create_serializer(self): + serializer = volumes.VolumeTemplate() + raw_volume = dict( + id='vol_id', + status='vol_status', + size=1024, + availability_zone='vol_availability', + bootable=False, + created_at=datetime.datetime.now(), + attachments=[ + dict( + id='vol_id', + volume_id='vol_id', + server_id='instance_uuid', + device='/foo' + ) + ], + name='vol_name', + description='vol_desc', + volume_type='vol_type', + snapshot_id='snap_id', + source_volid='source_volid', + metadata=dict( + foo='bar', + baz='quux', + ), + ) + text = serializer.serialize(dict(volume=raw_volume)) + + tree = etree.fromstring(text) + + self._verify_volume(raw_volume, tree) + + def test_volume_index_detail_serializer(self): + serializer = volumes.VolumesTemplate() + raw_volumes = [ + dict( + id='vol1_id', + status='vol1_status', + size=1024, + availability_zone='vol1_availability', + bootable=True, + created_at=datetime.datetime.now(), + attachments=[ + dict( + id='vol1_id', + volume_id='vol1_id', + server_id='instance_uuid', + device='/foo1' + ) + ], + name='vol1_name', + description='vol1_desc', + volume_type='vol1_type', + snapshot_id='snap1_id', + source_volid=None, + metadata=dict(foo='vol1_foo', + bar='vol1_bar', ), ), + dict( + id='vol2_id', + status='vol2_status', + size=1024, + availability_zone='vol2_availability', + bootable=False, + created_at=datetime.datetime.now(), + attachments=[dict(id='vol2_id', + volume_id='vol2_id', + server_id='instance_uuid', + device='/foo2')], + name='vol2_name', + description='vol2_desc', + volume_type='vol2_type', + snapshot_id='snap2_id', + source_volid=None, + metadata=dict(foo='vol2_foo', + bar='vol2_bar', ), )] + text = serializer.serialize(dict(volumes=raw_volumes)) + + tree = etree.fromstring(text) + + self.assertEqual(NS + 'volumes', tree.tag) + self.assertEqual(len(raw_volumes), len(tree)) + for idx, child in enumerate(tree): + self._verify_volume(raw_volumes[idx], child) + + +class TestVolumeCreateRequestXMLDeserializer(test.TestCase): + + def setUp(self): + super(TestVolumeCreateRequestXMLDeserializer, self).setUp() + self.deserializer = volumes.CreateDeserializer() + + def test_minimal_volume(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + }, + } + self.assertEqual(request['body'], expected) + + def test_name(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "name": "Volume-xml", + }, + } + self.assertEqual(request['body'], expected) + + def test_description(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "name": "Volume-xml", + "description": "description", + }, + } + self.assertEqual(request['body'], expected) + + def test_volume_type(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "name": "Volume-xml", + "size": "1", + "name": "Volume-xml", + "description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + }, + } + self.assertEqual(request['body'], expected) + + def test_availability_zone(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "name": "Volume-xml", + "description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + "availability_zone": "us-east1", + }, + } + self.assertEqual(request['body'], expected) + + def test_metadata(self): + self_request = """ + + work""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "name": "Volume-xml", + "size": "1", + "metadata": { + "Type": "work", + }, + }, + } + self.assertEqual(request['body'], expected) + + def test_full_volume(self): + self_request = """ + + work""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "name": "Volume-xml", + "description": "description", + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + "availability_zone": "us-east1", + "metadata": { + "Type": "work", + }, + }, + } + self.assertEqual(request['body'], expected) + + def test_imageref(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "name": "Volume-xml", + "description": "description", + "imageRef": "4a90189d-d702-4c7c-87fc-6608c554d737", + }, + } + self.assertEqual(expected, request['body']) + + def test_snapshot_id(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "name": "Volume-xml", + "description": "description", + "snapshot_id": "4a90189d-d702-4c7c-87fc-6608c554d737", + }, + } + self.assertEqual(expected, request['body']) + + def test_source_volid(self): + self_request = """ +""" + request = self.deserializer.deserialize(self_request) + expected = { + "volume": { + "size": "1", + "name": "Volume-xml", + "description": "description", + "source_volid": "4a90189d-d702-4c7c-87fc-6608c554d737", + }, + } + self.assertEqual(expected, request['body']) diff --git a/cinder/tests/backup/__init__.py b/cinder/tests/backup/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/backup/fake_service.py b/cinder/tests/backup/fake_service.py new file mode 100644 index 0000000000..fa8fac4dae --- /dev/null +++ b/cinder/tests/backup/fake_service.py @@ -0,0 +1,41 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.backup.driver import BackupDriver +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class FakeBackupService(BackupDriver): + def __init__(self, context, db_driver=None): + super(FakeBackupService, self).__init__(db_driver) + + def backup(self, backup, volume_file): + pass + + def restore(self, backup, volume_id, volume_file): + pass + + def delete(self, backup): + # if backup has magic name of 'fail_on_delete' + # we raise an error - useful for some tests - + # otherwise we return without error + if backup['display_name'] == 'fail_on_delete': + raise IOError('fake') + + +def get_backup_driver(context): + return FakeBackupService(context) diff --git a/cinder/tests/backup/fake_swift_client.py b/cinder/tests/backup/fake_swift_client.py new file mode 100644 index 0000000000..dc35ea7f2c --- /dev/null +++ b/cinder/tests/backup/fake_swift_client.py @@ -0,0 +1,113 @@ +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import httplib +import json +import os +import socket +import zlib + +from cinder.openstack.common import log as logging +from swiftclient import client as swift + +LOG = logging.getLogger(__name__) + + +class FakeSwiftClient(object): + """Logs calls instead of executing.""" + def __init__(self, *args, **kwargs): + pass + + @classmethod + def Connection(self, *args, **kargs): + LOG.debug("fake FakeSwiftClient Connection") + return FakeSwiftConnection() + + +class FakeSwiftConnection(object): + """Logging calls instead of executing.""" + def __init__(self, *args, **kwargs): + pass + + def head_container(self, container): + LOG.debug("fake head_container(%s)" % container) + if container == 'missing_container': + raise swift.ClientException('fake exception', + http_status=httplib.NOT_FOUND) + elif container == 'unauthorized_container': + raise swift.ClientException('fake exception', + http_status=httplib.UNAUTHORIZED) + elif container == 'socket_error_on_head': + raise socket.error(111, 'ECONNREFUSED') + pass + + def put_container(self, container): + LOG.debug("fake put_container(%s)" % container) + pass + + def get_container(self, container, **kwargs): + LOG.debug("fake get_container(%s)" % container) + fake_header = None + fake_body = [{'name': 'backup_001'}, + {'name': 'backup_002'}, + {'name': 'backup_003'}] + return fake_header, fake_body + + def head_object(self, container, name): + LOG.debug("fake put_container(%s, %s)" % (container, name)) + return {'etag': 'fake-md5-sum'} + + def get_object(self, container, name): + LOG.debug("fake get_object(%s, %s)" % (container, name)) + if container == 'socket_error_on_get': + raise socket.error(111, 'ECONNREFUSED') + if 'metadata' in name: + fake_object_header = None + metadata = {} + if container == 'unsupported_version': + metadata['version'] = '9.9.9' + else: + metadata['version'] = '1.0.0' + metadata['backup_id'] = 123 + metadata['volume_id'] = 123 + metadata['backup_name'] = 'fake backup' + metadata['backup_description'] = 'fake backup description' + metadata['created_at'] = '2013-02-19 11:20:54,805' + metadata['objects'] = [{ + 'backup_001': {'compression': 'zlib', 'length': 10}, + 'backup_002': {'compression': 'zlib', 'length': 10}, + 'backup_003': {'compression': 'zlib', 'length': 10} + }] + metadata_json = json.dumps(metadata, sort_keys=True, indent=2) + fake_object_body = metadata_json + return (fake_object_header, fake_object_body) + + fake_header = None + fake_object_body = os.urandom(1024 * 1024) + return (fake_header, zlib.compress(fake_object_body)) + + def put_object(self, container, name, reader, content_length=None, + etag=None, chunk_size=None, content_type=None, + headers=None, query_string=None): + LOG.debug("fake put_object(%s, %s)" % (container, name)) + if container == 'socket_error_on_put': + raise socket.error(111, 'ECONNREFUSED') + return 'fake-md5-sum' + + def delete_object(self, container, name): + LOG.debug("fake delete_object(%s, %s)" % (container, name)) + if container == 'socket_error_on_delete': + raise socket.error(111, 'ECONNREFUSED') + pass diff --git a/cinder/tests/brick/__init__.py b/cinder/tests/brick/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/brick/fake_lvm.py b/cinder/tests/brick/fake_lvm.py new file mode 100644 index 0000000000..cd2d6171b3 --- /dev/null +++ b/cinder/tests/brick/fake_lvm.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class FakeBrickLVM(object): + """Logs and records calls, for unit tests.""" + def __init__(self, vg_name, create, pv_list, vtype, execute=None): + super(FakeBrickLVM, self).__init__() + self.vg_size = '5.00' + self.vg_free_space = '5.00' + self.vg_name = vg_name + + def supports_thin_provisioning(): + return False + + def get_all_volumes(vg_name=None): + if vg_name is not None: + return [vg_name] + return ['cinder-volumes', 'fake-vg-1'] + + def get_volumes(self): + return ['fake-volume'] + + def get_volume(self, name): + return ['name'] + + def get_all_physical_volumes(vg_name=None): + return [] + + def get_physical_volumes(self): + return [] + + def get_all_volume_groups(vg_name=None): + return ['cinder-volumes', 'fake-vg'] + + def update_volume_group_info(self): + pass + + def create_thin_pool(self, name=None, size_str=0): + pass + + def create_volume(self, name, size_str, lv_type='default', mirror_count=0): + pass + + def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): + pass + + def delete(self, name): + pass + + def revert(self, snapshot_name): + pass + + def lv_has_snapshot(self, name): + return False + + def activate_lv(self, lv, is_snapshot=False): + pass diff --git a/cinder/tests/brick/test_brick_connector.py b/cinder/tests/brick/test_brick_connector.py new file mode 100644 index 0000000000..d977091551 --- /dev/null +++ b/cinder/tests/brick/test_brick_connector.py @@ -0,0 +1,628 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path +import string +import time + +import mox + +from cinder.brick import exception +from cinder.brick.initiator import connector +from cinder.brick.initiator import host_driver +from cinder.openstack.common import log as logging +from cinder.openstack.common import loopingcall +from cinder.openstack.common import processutils as putils +from cinder import test + +LOG = logging.getLogger(__name__) + + +class ConnectorTestCase(test.TestCase): + + def setUp(self): + super(ConnectorTestCase, self).setUp() + self.cmds = [] + self.stubs.Set(os.path, 'exists', lambda x: True) + + def fake_execute(self, *cmd, **kwargs): + self.cmds.append(string.join(cmd)) + return "", None + + def test_connect_volume(self): + self.connector = connector.InitiatorConnector(None) + self.assertRaises(NotImplementedError, + self.connector.connect_volume, None) + + def test_disconnect_volume(self): + self.connector = connector.InitiatorConnector(None) + self.assertRaises(NotImplementedError, + self.connector.disconnect_volume, None, None) + + def test_factory(self): + obj = connector.InitiatorConnector.factory('iscsi', None) + self.assertEqual(obj.__class__.__name__, "ISCSIConnector") + + obj = connector.InitiatorConnector.factory('fibre_channel', None) + self.assertEqual(obj.__class__.__name__, "FibreChannelConnector") + + obj = connector.InitiatorConnector.factory('aoe', None) + self.assertEqual(obj.__class__.__name__, "AoEConnector") + + obj = connector.InitiatorConnector.factory( + 'nfs', None, nfs_mount_point_base='/mnt/test') + self.assertEqual(obj.__class__.__name__, "RemoteFsConnector") + + obj = connector.InitiatorConnector.factory( + 'glusterfs', None, glusterfs_mount_point_base='/mnt/test') + self.assertEqual(obj.__class__.__name__, "RemoteFsConnector") + + obj = connector.InitiatorConnector.factory('local', None) + self.assertEqual(obj.__class__.__name__, "LocalConnector") + + self.assertRaises(ValueError, + connector.InitiatorConnector.factory, + "bogus", None) + + def test_check_valid_device_with_wrong_path(self): + self.connector = connector.InitiatorConnector(None) + self.stubs.Set(self.connector, + '_execute', lambda *args, **kwargs: ("", None)) + self.assertFalse(self.connector.check_valid_device('/d0v')) + + def test_check_valid_device(self): + self.connector = connector.InitiatorConnector(None) + self.stubs.Set(self.connector, + '_execute', lambda *args, **kwargs: ("", "")) + self.assertTrue(self.connector.check_valid_device('/dev')) + + def test_check_valid_device_with_cmd_error(self): + def raise_except(*args, **kwargs): + raise putils.ProcessExecutionError + self.connector = connector.InitiatorConnector(None) + self.stubs.Set(self.connector, + '_execute', raise_except) + self.assertFalse(self.connector.check_valid_device('/dev')) + + +class HostDriverTestCase(test.TestCase): + + def setUp(self): + super(HostDriverTestCase, self).setUp() + self.stubs.Set(os.path, 'isdir', lambda x: True) + self.devlist = ['device1', 'device2'] + self.stubs.Set(os, 'listdir', lambda x: self.devlist) + + def test_host_driver(self): + expected = ['/dev/disk/by-path/' + dev for dev in self.devlist] + driver = host_driver.HostDriver() + actual = driver.get_all_block_devices() + self.assertEqual(expected, actual) + + +class ISCSIConnectorTestCase(ConnectorTestCase): + + def setUp(self): + super(ISCSIConnectorTestCase, self).setUp() + self.connector = connector.ISCSIConnector( + None, execute=self.fake_execute, use_multipath=False) + self.stubs.Set(self.connector._linuxscsi, + 'get_name_from_path', lambda x: "/dev/sdb") + + def tearDown(self): + super(ISCSIConnectorTestCase, self).tearDown() + + def iscsi_connection(self, volume, location, iqn): + return { + 'driver_volume_type': 'iscsi', + 'data': { + 'volume_id': volume['id'], + 'target_portal': location, + 'target_iqn': iqn, + 'target_lun': 1, + } + } + + def test_get_initiator(self): + def initiator_no_file(*args, **kwargs): + raise putils.ProcessExecutionError('No file') + + def initiator_get_text(*arg, **kwargs): + text = ('## DO NOT EDIT OR REMOVE THIS FILE!\n' + '## If you remove this file, the iSCSI daemon ' + 'will not start.\n' + '## If you change the InitiatorName, existing ' + 'access control lists\n' + '## may reject this initiator. The InitiatorName must ' + 'be unique\n' + '## for each iSCSI initiator. Do NOT duplicate iSCSI ' + 'InitiatorNames.\n' + 'InitiatorName=iqn.1234-56.foo.bar:01:23456789abc') + return text, None + + self.stubs.Set(self.connector, '_execute', initiator_no_file) + initiator = self.connector.get_initiator() + self.assertIsNone(initiator) + self.stubs.Set(self.connector, '_execute', initiator_get_text) + initiator = self.connector.get_initiator() + self.assertEqual(initiator, 'iqn.1234-56.foo.bar:01:23456789abc') + + @test.testtools.skipUnless(os.path.exists('/dev/disk/by-path'), + 'Test requires /dev/disk/by-path') + def test_connect_volume(self): + self.stubs.Set(os.path, 'exists', lambda x: True) + location = '10.0.2.15:3260' + name = 'volume-00000001' + iqn = 'iqn.2010-10.org.openstack:%s' % name + vol = {'id': 1, 'name': name} + connection_info = self.iscsi_connection(vol, location, iqn) + device = self.connector.connect_volume(connection_info['data']) + dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn) + self.assertEqual(device['type'], 'block') + self.assertEqual(device['path'], dev_str) + + self.connector.disconnect_volume(connection_info['data'], device) + expected_commands = [('iscsiadm -m node -T %s -p %s' % + (iqn, location)), + ('iscsiadm -m session'), + ('iscsiadm -m node -T %s -p %s --login' % + (iqn, location)), + ('iscsiadm -m node -T %s -p %s --op update' + ' -n node.startup -v automatic' % (iqn, + location)), + ('iscsiadm -m node --rescan'), + ('iscsiadm -m session --rescan'), + ('tee -a /sys/block/sdb/device/delete'), + ('iscsiadm -m node -T %s -p %s --op update' + ' -n node.startup -v manual' % (iqn, location)), + ('iscsiadm -m node -T %s -p %s --logout' % + (iqn, location)), + ('iscsiadm -m node -T %s -p %s --op delete' % + (iqn, location)), ] + LOG.debug("self.cmds = %s" % self.cmds) + LOG.debug("expected = %s" % expected_commands) + + self.assertEqual(expected_commands, self.cmds) + + def test_connect_volume_with_multipath(self): + location = '10.0.2.15:3260' + name = 'volume-00000001' + iqn = 'iqn.2010-10.org.openstack:%s' % name + vol = {'id': 1, 'name': name} + connection_properties = self.iscsi_connection(vol, location, iqn) + + self.connector_with_multipath =\ + connector.ISCSIConnector(None, use_multipath=True) + self.stubs.Set(self.connector_with_multipath, + '_run_iscsiadm_bare', + lambda *args, **kwargs: "%s %s" % (location, iqn)) + self.stubs.Set(self.connector_with_multipath, + '_get_target_portals_from_iscsiadm_output', + lambda x: [[location, iqn]]) + self.stubs.Set(self.connector_with_multipath, + '_connect_to_iscsi_portal', + lambda x: None) + self.stubs.Set(self.connector_with_multipath, + '_rescan_iscsi', + lambda: None) + self.stubs.Set(self.connector_with_multipath, + '_rescan_multipath', + lambda: None) + self.stubs.Set(self.connector_with_multipath, + '_get_multipath_device_name', + lambda x: 'iqn.2010-10.org.openstack:%s' % name) + self.stubs.Set(os.path, 'exists', lambda x: True) + result = self.connector_with_multipath.connect_volume( + connection_properties['data']) + expected_result = {'path': 'iqn.2010-10.org.openstack:volume-00000001', + 'type': 'block'} + self.assertEqual(result, expected_result) + + def test_connect_volume_with_not_found_device(self): + self.stubs.Set(os.path, 'exists', lambda x: False) + self.stubs.Set(time, 'sleep', lambda x: None) + location = '10.0.2.15:3260' + name = 'volume-00000001' + iqn = 'iqn.2010-10.org.openstack:%s' % name + vol = {'id': 1, 'name': name} + connection_info = self.iscsi_connection(vol, location, iqn) + self.assertRaises(exception.VolumeDeviceNotFound, + self.connector.connect_volume, + connection_info['data']) + + def test_get_target_portals_from_iscsiadm_output(self): + connector = self.connector + test_output = '''10.15.84.19:3260 iqn.1992-08.com.netapp:sn.33615311 + 10.15.85.19:3260 iqn.1992-08.com.netapp:sn.33615311''' + res = connector._get_target_portals_from_iscsiadm_output(test_output) + ip_iqn1 = ['10.15.84.19:3260', 'iqn.1992-08.com.netapp:sn.33615311'] + ip_iqn2 = ['10.15.85.19:3260', 'iqn.1992-08.com.netapp:sn.33615311'] + expected = [ip_iqn1, ip_iqn2] + self.assertEqual(expected, res) + + def test_get_multipath_device_name(self): + self.stubs.Set(os.path, 'realpath', lambda x: None) + multipath_return_string = [('mpath2 (20017380006c00036)' + 'dm-7 IBM,2810XIV')] + self.stubs.Set(self.connector, '_run_multipath', + lambda *args, **kwargs: multipath_return_string) + expected = '/dev/mapper/mpath2' + self.assertEqual(expected, + self.connector. + _get_multipath_device_name('/dev/md-1')) + + def test_get_iscsi_devices(self): + paths = [('ip-10.0.0.1:3260-iscsi-iqn.2013-01.ro.' + 'com.netapp:node.netapp02-lun-0')] + self.stubs.Set(os, 'walk', lambda x: [(['.'], ['by-path'], paths)]) + self.assertEqual(self.connector._get_iscsi_devices(), paths) + + def test_get_iscsi_devices_with_empty_dir(self): + self.stubs.Set(os, 'walk', lambda x: []) + self.assertEqual(self.connector._get_iscsi_devices(), []) + + def test_get_multipath_iqn(self): + paths = [('ip-10.0.0.1:3260-iscsi-iqn.2013-01.ro.' + 'com.netapp:node.netapp02-lun-0')] + self.stubs.Set(os.path, 'realpath', + lambda x: '/dev/disk/by-path/%s' % paths[0]) + self.stubs.Set(self.connector, '_get_iscsi_devices', lambda: paths) + self.stubs.Set(self.connector, '_get_multipath_device_name', + lambda x: paths[0]) + self.assertEqual(self.connector._get_multipath_iqn(paths[0]), + 'iqn.2013-01.ro.com.netapp:node.netapp02') + + def test_disconnect_volume_multipath_iscsi(self): + result = [] + + def fake_disconnect_from_iscsi_portal(properties): + result.append(properties) + iqn1 = 'iqn.2013-01.ro.com.netapp:node.netapp01' + iqn2 = 'iqn.2013-01.ro.com.netapp:node.netapp02' + iqns = [iqn1, iqn2] + portal = '10.0.0.1:3260' + dev = ('ip-%s-iscsi-%s-lun-0' % (portal, iqn1)) + self.stubs.Set(self.connector, + '_get_target_portals_from_iscsiadm_output', + lambda x: [[portal, iqn1]]) + self.stubs.Set(self.connector, '_rescan_iscsi', lambda: None) + self.stubs.Set(self.connector, '_rescan_multipath', lambda: None) + self.stubs.Set(self.connector.driver, 'get_all_block_devices', + lambda: [dev, '/dev/mapper/md-1']) + self.stubs.Set(self.connector, '_get_multipath_device_name', + lambda x: '/dev/mapper/md-3') + self.stubs.Set(self.connector, '_get_multipath_iqn', + lambda x: iqns.pop()) + self.stubs.Set(self.connector, '_disconnect_from_iscsi_portal', + fake_disconnect_from_iscsi_portal) + fake_property = {'target_portal': portal, + 'target_iqn': iqn1} + self.connector._disconnect_volume_multipath_iscsi(fake_property, + 'fake/multipath') + # Target in use by other mp devices, don't disconnect + self.assertEqual([], result) + + def test_disconnect_volume_multipath_iscsi_without_other_mp_devices(self): + result = [] + + def fake_disconnect_from_iscsi_portal(properties): + result.append(properties) + portal = '10.0.2.15:3260' + name = 'volume-00000001' + iqn = 'iqn.2010-10.org.openstack:%s' % name + self.stubs.Set(self.connector, + '_get_target_portals_from_iscsiadm_output', + lambda x: [[portal, iqn]]) + self.stubs.Set(self.connector, '_rescan_iscsi', lambda: None) + self.stubs.Set(self.connector, '_rescan_multipath', lambda: None) + self.stubs.Set(self.connector.driver, 'get_all_block_devices', + lambda: []) + self.stubs.Set(self.connector, '_disconnect_from_iscsi_portal', + fake_disconnect_from_iscsi_portal) + fake_property = {'target_portal': portal, + 'target_iqn': iqn} + self.connector._disconnect_volume_multipath_iscsi(fake_property, + 'fake/multipath') + # Target not in use by other mp devices, disconnect + self.assertEqual([fake_property], result) + + +class FibreChannelConnectorTestCase(ConnectorTestCase): + def setUp(self): + super(FibreChannelConnectorTestCase, self).setUp() + self.connector = connector.FibreChannelConnector( + None, execute=self.fake_execute, use_multipath=False) + self.assertIsNotNone(self.connector) + self.assertIsNotNone(self.connector._linuxfc) + self.assertIsNotNone(self.connector._linuxscsi) + + def fake_get_fc_hbas(self): + return [{'ClassDevice': 'host1', + 'ClassDevicePath': '/sys/devices/pci0000:00/0000:00:03.0' + '/0000:05:00.2/host1/fc_host/host1', + 'dev_loss_tmo': '30', + 'fabric_name': '0x1000000533f55566', + 'issue_lip': '', + 'max_npiv_vports': '255', + 'maxframe_size': '2048 bytes', + 'node_name': '0x200010604b019419', + 'npiv_vports_inuse': '0', + 'port_id': '0x680409', + 'port_name': '0x100010604b019419', + 'port_state': 'Online', + 'port_type': 'NPort (fabric via point-to-point)', + 'speed': '10 Gbit', + 'supported_classes': 'Class 3', + 'supported_speeds': '10 Gbit', + 'symbolic_name': 'Emulex 554M FV4.0.493.0 DV8.3.27', + 'tgtid_bind_type': 'wwpn (World Wide Port Name)', + 'uevent': None, + 'vport_create': '', + 'vport_delete': ''}] + + def fake_get_fc_hbas_info(self): + hbas = self.fake_get_fc_hbas() + info = [{'port_name': hbas[0]['port_name'].replace('0x', ''), + 'node_name': hbas[0]['node_name'].replace('0x', ''), + 'host_device': hbas[0]['ClassDevice'], + 'device_path': hbas[0]['ClassDevicePath']}] + return info + + def fibrechan_connection(self, volume, location, wwn): + return {'driver_volume_type': 'fibrechan', + 'data': { + 'volume_id': volume['id'], + 'target_portal': location, + 'target_wwn': wwn, + 'target_lun': 1, + }} + + def test_connect_volume(self): + self.stubs.Set(self.connector._linuxfc, "get_fc_hbas", + self.fake_get_fc_hbas) + self.stubs.Set(self.connector._linuxfc, "get_fc_hbas_info", + self.fake_get_fc_hbas_info) + self.stubs.Set(os.path, 'exists', lambda x: True) + self.stubs.Set(os.path, 'realpath', lambda x: '/dev/sdb') + + multipath_devname = '/dev/md-1' + devices = {"device": multipath_devname, + "id": "1234567890", + "devices": [{'device': '/dev/sdb', + 'address': '1:0:0:1', + 'host': 1, 'channel': 0, + 'id': 0, 'lun': 1}]} + self.stubs.Set(self.connector._linuxscsi, 'find_multipath_device', + lambda x: devices) + self.stubs.Set(self.connector._linuxscsi, 'remove_scsi_device', + lambda x: None) + self.stubs.Set(self.connector._linuxscsi, 'get_device_info', + lambda x: devices['devices'][0]) + location = '10.0.2.15:3260' + name = 'volume-00000001' + vol = {'id': 1, 'name': name} + # Should work for string, unicode, and list + wwns = ['1234567890123456', unicode('1234567890123456'), + ['1234567890123456', '1234567890123457']] + for wwn in wwns: + connection_info = self.fibrechan_connection(vol, location, wwn) + dev_info = self.connector.connect_volume(connection_info['data']) + exp_wwn = wwn[0] if isinstance(wwn, list) else wwn + dev_str = ('/dev/disk/by-path/pci-0000:05:00.2-fc-0x%s-lun-1' % + exp_wwn) + self.assertEqual(dev_info['type'], 'block') + self.assertEqual(dev_info['path'], dev_str) + + self.connector.disconnect_volume(connection_info['data'], dev_info) + expected_commands = [] + self.assertEqual(expected_commands, self.cmds) + + # Should not work for anything other than string, unicode, and list + connection_info = self.fibrechan_connection(vol, location, 123) + self.assertRaises(exception.NoFibreChannelHostsFound, + self.connector.connect_volume, + connection_info['data']) + + self.stubs.Set(self.connector._linuxfc, 'get_fc_hbas', + lambda: []) + self.stubs.Set(self.connector._linuxfc, 'get_fc_hbas_info', + lambda: []) + self.assertRaises(exception.NoFibreChannelHostsFound, + self.connector.connect_volume, + connection_info['data']) + + +class FakeFixedIntervalLoopingCall(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._stop = False + + def stop(self): + self._stop = True + + def wait(self): + return self + + def start(self, interval, initial_delay=None): + while not self._stop: + try: + self.f(*self.args, **self.kw) + except loopingcall.LoopingCallDone: + return self + except Exception: + LOG.exception(_('in fixed duration looping call')) + raise + + +class AoEConnectorTestCase(ConnectorTestCase): + """Test cases for AoE initiator class.""" + def setUp(self): + super(AoEConnectorTestCase, self).setUp() + self.mox = mox.Mox() + self.connector = connector.AoEConnector('sudo') + self.connection_properties = {'target_shelf': 'fake_shelf', + 'target_lun': 'fake_lun'} + self.stubs.Set(loopingcall, + 'FixedIntervalLoopingCall', + FakeFixedIntervalLoopingCall) + + def tearDown(self): + self.mox.VerifyAll() + self.mox.UnsetStubs() + super(AoEConnectorTestCase, self).tearDown() + + def _mock_path_exists(self, aoe_path, mock_values=[]): + self.mox.StubOutWithMock(os.path, 'exists') + for value in mock_values: + os.path.exists(aoe_path).AndReturn(value) + + def test_connect_volume(self): + """Ensure that if path exist aoe-revaliadte was called.""" + aoe_device, aoe_path = self.connector._get_aoe_info( + self.connection_properties) + + self._mock_path_exists(aoe_path, [True, True]) + + self.mox.StubOutWithMock(self.connector, '_execute') + self.connector._execute('aoe-revalidate', + aoe_device, + run_as_root=True, + root_helper='sudo', + check_exit_code=0).AndReturn(("", "")) + self.mox.ReplayAll() + + self.connector.connect_volume(self.connection_properties) + + def test_connect_volume_without_path(self): + """Ensure that if path doesn't exist aoe-discovery was called.""" + + aoe_device, aoe_path = self.connector._get_aoe_info( + self.connection_properties) + expected_info = { + 'type': 'block', + 'device': aoe_device, + 'path': aoe_path, + } + + self._mock_path_exists(aoe_path, [False, True]) + + self.mox.StubOutWithMock(self.connector, '_execute') + self.connector._execute('aoe-discover', + run_as_root=True, + root_helper='sudo', + check_exit_code=0).AndReturn(("", "")) + self.mox.ReplayAll() + + volume_info = self.connector.connect_volume( + self.connection_properties) + + self.assertDictMatch(volume_info, expected_info) + + def test_connect_volume_could_not_discover_path(self): + aoe_device, aoe_path = self.connector._get_aoe_info( + self.connection_properties) + + number_of_calls = 4 + self._mock_path_exists(aoe_path, [False] * (number_of_calls + 1)) + self.mox.StubOutWithMock(self.connector, '_execute') + + for i in xrange(number_of_calls): + self.connector._execute('aoe-discover', + run_as_root=True, + root_helper='sudo', + check_exit_code=0).AndReturn(("", "")) + self.mox.ReplayAll() + self.assertRaises(exception.VolumeDeviceNotFound, + self.connector.connect_volume, + self.connection_properties) + + def test_disconnect_volume(self): + """Ensure that if path exist aoe-revaliadte was called.""" + aoe_device, aoe_path = self.connector._get_aoe_info( + self.connection_properties) + + self._mock_path_exists(aoe_path, [True]) + + self.mox.StubOutWithMock(self.connector, '_execute') + self.connector._execute('aoe-flush', + aoe_device, + run_as_root=True, + root_helper='sudo', + check_exit_code=0).AndReturn(("", "")) + self.mox.ReplayAll() + + self.connector.disconnect_volume(self.connection_properties, {}) + + +class RemoteFsConnectorTestCase(ConnectorTestCase): + """Test cases for Remote FS initiator class.""" + TEST_DEV = '172.18.194.100:/var/nfs' + TEST_PATH = '/mnt/test/df0808229363aad55c27da50c38d6328' + + def setUp(self): + super(RemoteFsConnectorTestCase, self).setUp() + self.mox = mox.Mox() + self.connection_properties = { + 'export': self.TEST_DEV, + 'name': '9c592d52-ce47-4263-8c21-4ecf3c029cdb'} + self.connector = connector.RemoteFsConnector( + 'nfs', root_helper='sudo', nfs_mount_point_base='/mnt/test') + + def tearDown(self): + self.mox.VerifyAll() + self.mox.UnsetStubs() + super(RemoteFsConnectorTestCase, self).tearDown() + + def test_connect_volume(self): + """Test the basic connect volume case.""" + client = self.connector._remotefsclient + self.mox.StubOutWithMock(client, '_execute') + client._execute('mount', + check_exit_code=0).AndReturn(("", "")) + client._execute('mkdir', '-p', self.TEST_PATH, + check_exit_code=0).AndReturn(("", "")) + client._execute('mount', '-t', 'nfs', + self.TEST_DEV, self.TEST_PATH, + root_helper='sudo', run_as_root=True, + check_exit_code=0).AndReturn(("", "")) + self.mox.ReplayAll() + + self.connector.connect_volume(self.connection_properties) + + def test_disconnect_volume(self): + """Nothing should happen here -- make sure it doesn't blow up.""" + self.connector.disconnect_volume(self.connection_properties, {}) + + +class LocalConnectorTestCase(test.TestCase): + + def setUp(self): + super(LocalConnectorTestCase, self).setUp() + self.connection_properties = {'name': 'foo', + 'device_path': '/tmp/bar'} + + def test_connect_volume(self): + self.connector = connector.LocalConnector(None) + cprops = self.connection_properties + dev_info = self.connector.connect_volume(cprops) + self.assertEqual(dev_info['type'], 'local') + self.assertEqual(dev_info['path'], cprops['device_path']) + + def test_connect_volume_with_invalid_connection_data(self): + self.connector = connector.LocalConnector(None) + cprops = {} + self.assertRaises(ValueError, + self.connector.connect_volume, cprops) diff --git a/cinder/tests/brick/test_brick_linuxfc.py b/cinder/tests/brick/test_brick_linuxfc.py new file mode 100644 index 0000000000..8e93ef71a2 --- /dev/null +++ b/cinder/tests/brick/test_brick_linuxfc.py @@ -0,0 +1,170 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path +import string + +from cinder.brick.initiator import linuxfc +from cinder.openstack.common import log as logging +from cinder import test + +LOG = logging.getLogger(__name__) + + +class LinuxFCTestCase(test.TestCase): + + def setUp(self): + super(LinuxFCTestCase, self).setUp() + self.cmds = [] + self.stubs.Set(os.path, 'exists', lambda x: True) + self.lfc = linuxfc.LinuxFibreChannel(None, execute=self.fake_execute) + + def fake_execute(self, *cmd, **kwargs): + self.cmds.append(string.join(cmd)) + return "", None + + def test_rescan_hosts(self): + hbas = [{'host_device': 'foo'}, + {'host_device': 'bar'}, ] + self.lfc.rescan_hosts(hbas) + expected_commands = ['tee -a /sys/class/scsi_host/foo/scan', + 'tee -a /sys/class/scsi_host/bar/scan'] + self.assertEqual(expected_commands, self.cmds) + + def test_get_fc_hbas_fail(self): + def fake_exec1(a, b, c, d, run_as_root=True, root_helper='sudo'): + raise OSError + + def fake_exec2(a, b, c, d, run_as_root=True, root_helper='sudo'): + return None, 'None found' + + self.stubs.Set(self.lfc, "_execute", fake_exec1) + hbas = self.lfc.get_fc_hbas() + self.assertEqual(0, len(hbas)) + self.stubs.Set(self.lfc, "_execute", fake_exec2) + hbas = self.lfc.get_fc_hbas() + self.assertEqual(0, len(hbas)) + + def test_get_fc_hbas(self): + def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'): + return SYSTOOL_FC, None + self.stubs.Set(self.lfc, "_execute", fake_exec) + hbas = self.lfc.get_fc_hbas() + self.assertEqual(2, len(hbas)) + hba1 = hbas[0] + self.assertEqual(hba1["ClassDevice"], "host0") + hba2 = hbas[1] + self.assertEqual(hba2["ClassDevice"], "host2") + + def test_get_fc_hbas_info(self): + def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'): + return SYSTOOL_FC, None + self.stubs.Set(self.lfc, "_execute", fake_exec) + hbas_info = self.lfc.get_fc_hbas_info() + expected_info = [{'device_path': '/sys/devices/pci0000:20/' + '0000:20:03.0/0000:21:00.0/' + 'host0/fc_host/host0', + 'host_device': 'host0', + 'node_name': '50014380242b9751', + 'port_name': '50014380242b9750'}, + {'device_path': '/sys/devices/pci0000:20/' + '0000:20:03.0/0000:21:00.1/' + 'host2/fc_host/host2', + 'host_device': 'host2', + 'node_name': '50014380242b9753', + 'port_name': '50014380242b9752'}, ] + self.assertEqual(expected_info, hbas_info) + + def test_get_fc_wwpns(self): + def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'): + return SYSTOOL_FC, None + self.stubs.Set(self.lfc, "_execute", fake_exec) + wwpns = self.lfc.get_fc_wwpns() + expected_wwpns = ['50014380242b9750', '50014380242b9752'] + self.assertEqual(expected_wwpns, wwpns) + + def test_get_fc_wwnns(self): + def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'): + return SYSTOOL_FC, None + self.stubs.Set(self.lfc, "_execute", fake_exec) + wwnns = self.lfc.get_fc_wwpns() + expected_wwnns = ['50014380242b9750', '50014380242b9752'] + self.assertEqual(expected_wwnns, wwnns) + +SYSTOOL_FC = """ +Class = "fc_host" + + Class Device = "host0" + Class Device path = "/sys/devices/pci0000:20/0000:20:03.0/\ +0000:21:00.0/host0/fc_host/host0" + dev_loss_tmo = "16" + fabric_name = "0x100000051ea338b9" + issue_lip = + max_npiv_vports = "0" + node_name = "0x50014380242b9751" + npiv_vports_inuse = "0" + port_id = "0x960d0d" + port_name = "0x50014380242b9750" + port_state = "Online" + port_type = "NPort (fabric via point-to-point)" + speed = "8 Gbit" + supported_classes = "Class 3" + supported_speeds = "1 Gbit, 2 Gbit, 4 Gbit, 8 Gbit" + symbolic_name = "QMH2572 FW:v4.04.04 DVR:v8.03.07.12-k" + system_hostname = "" + tgtid_bind_type = "wwpn (World Wide Port Name)" + uevent = + vport_create = + vport_delete = + + Device = "host0" + Device path = "/sys/devices/pci0000:20/0000:20:03.0/0000:21:00.0/host0" + edc = + optrom_ctl = + reset = + uevent = "DEVTYPE=scsi_host" + + + Class Device = "host2" + Class Device path = "/sys/devices/pci0000:20/0000:20:03.0/\ +0000:21:00.1/host2/fc_host/host2" + dev_loss_tmo = "16" + fabric_name = "0x100000051ea33b79" + issue_lip = + max_npiv_vports = "0" + node_name = "0x50014380242b9753" + npiv_vports_inuse = "0" + port_id = "0x970e09" + port_name = "0x50014380242b9752" + port_state = "Online" + port_type = "NPort (fabric via point-to-point)" + speed = "8 Gbit" + supported_classes = "Class 3" + supported_speeds = "1 Gbit, 2 Gbit, 4 Gbit, 8 Gbit" + symbolic_name = "QMH2572 FW:v4.04.04 DVR:v8.03.07.12-k" + system_hostname = "" + tgtid_bind_type = "wwpn (World Wide Port Name)" + uevent = + vport_create = + vport_delete = + + Device = "host2" + Device path = "/sys/devices/pci0000:20/0000:20:03.0/0000:21:00.1/host2" + edc = + optrom_ctl = + reset = + uevent = "DEVTYPE=scsi_host" + + +""" diff --git a/cinder/tests/brick/test_brick_linuxscsi.py b/cinder/tests/brick/test_brick_linuxscsi.py new file mode 100644 index 0000000000..47b73dcfb1 --- /dev/null +++ b/cinder/tests/brick/test_brick_linuxscsi.py @@ -0,0 +1,187 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path +import string + +from cinder.brick.initiator import linuxscsi +from cinder.openstack.common import log as logging +from cinder import test + +LOG = logging.getLogger(__name__) + + +class LinuxSCSITestCase(test.TestCase): + def setUp(self): + super(LinuxSCSITestCase, self).setUp() + self.cmds = [] + self.stubs.Set(os.path, 'realpath', lambda x: '/dev/sdc') + self.linuxscsi = linuxscsi.LinuxSCSI(None, execute=self.fake_execute) + + def fake_execute(self, *cmd, **kwargs): + self.cmds.append(string.join(cmd)) + return "", None + + def test_echo_scsi_command(self): + self.linuxscsi.echo_scsi_command("/some/path", "1") + expected_commands = ['tee -a /some/path'] + self.assertEqual(expected_commands, self.cmds) + + def test_get_name_from_path(self): + device_name = "/dev/sdc" + self.stubs.Set(os.path, 'realpath', lambda x: device_name) + disk_path = ("/dev/disk/by-path/ip-10.10.220.253:3260-" + "iscsi-iqn.2000-05.com.3pardata:21810002ac00383d-lun-0") + name = self.linuxscsi.get_name_from_path(disk_path) + self.assertEqual(name, device_name) + self.stubs.Set(os.path, 'realpath', lambda x: "bogus") + name = self.linuxscsi.get_name_from_path(disk_path) + self.assertIsNone(name) + + def test_remove_scsi_device(self): + self.stubs.Set(os.path, "exists", lambda x: False) + self.linuxscsi.remove_scsi_device("sdc") + expected_commands = [] + self.assertEqual(expected_commands, self.cmds) + self.stubs.Set(os.path, "exists", lambda x: True) + self.linuxscsi.remove_scsi_device("sdc") + expected_commands = [('tee -a /sys/block/sdc/device/delete')] + self.assertEqual(expected_commands, self.cmds) + + def test_flush_multipath_device(self): + self.linuxscsi.flush_multipath_device('/dev/dm-9') + expected_commands = [('multipath -f /dev/dm-9')] + self.assertEqual(expected_commands, self.cmds) + + def test_flush_multipath_devices(self): + self.linuxscsi.flush_multipath_devices() + expected_commands = [('multipath -F')] + self.assertEqual(expected_commands, self.cmds) + + def test_remove_multipath_device(self): + def fake_find_multipath_device(device): + devices = [{'device': '/dev/sde', 'host': 0, + 'channel': 0, 'id': 0, 'lun': 1}, + {'device': '/dev/sdf', 'host': 2, + 'channel': 0, 'id': 0, 'lun': 1}, ] + + info = {"device": "dm-3", + "id": "350002ac20398383d", + "devices": devices} + return info + + self.stubs.Set(os.path, "exists", lambda x: True) + self.stubs.Set(self.linuxscsi, 'find_multipath_device', + fake_find_multipath_device) + + self.linuxscsi.remove_multipath_device('/dev/dm-3') + expected_commands = [('tee -a /sys/block/sde/device/delete'), + ('tee -a /sys/block/sdf/device/delete'), + ('multipath -f 350002ac20398383d'), ] + self.assertEqual(expected_commands, self.cmds) + + def test_find_multipath_device_3par(self): + def fake_execute(*cmd, **kwargs): + out = ("mpath6 (350002ac20398383d) dm-3 3PARdata,VV\n" + "size=2.0G features='0' hwhandler='0' wp=rw\n" + "`-+- policy='round-robin 0' prio=-1 status=active\n" + " |- 0:0:0:1 sde 8:64 active undef running\n" + " `- 2:0:0:1 sdf 8:80 active undef running\n" + ) + return out, None + + def fake_execute2(*cmd, **kwargs): + out = ("350002ac20398383d dm-3 3PARdata,VV\n" + "size=2.0G features='0' hwhandler='0' wp=rw\n" + "`-+- policy='round-robin 0' prio=-1 status=active\n" + " |- 0:0:0:1 sde 8:64 active undef running\n" + " `- 2:0:0:1 sdf 8:80 active undef running\n" + ) + return out, None + + self.stubs.Set(self.linuxscsi, '_execute', fake_execute) + + info = self.linuxscsi.find_multipath_device('/dev/sde') + LOG.error("info = %s" % info) + self.assertEqual("/dev/dm-3", info["device"]) + self.assertEqual("/dev/sde", info['devices'][0]['device']) + self.assertEqual("0", info['devices'][0]['host']) + self.assertEqual("0", info['devices'][0]['id']) + self.assertEqual("0", info['devices'][0]['channel']) + self.assertEqual("1", info['devices'][0]['lun']) + + self.assertEqual("/dev/sdf", info['devices'][1]['device']) + self.assertEqual("2", info['devices'][1]['host']) + self.assertEqual("0", info['devices'][1]['id']) + self.assertEqual("0", info['devices'][1]['channel']) + self.assertEqual("1", info['devices'][1]['lun']) + + def test_find_multipath_device_svc(self): + def fake_execute(*cmd, **kwargs): + out = ("36005076da00638089c000000000004d5 dm-2 IBM,2145\n" + "size=954M features='1 queue_if_no_path' hwhandler='0'" + " wp=rw\n" + "|-+- policy='round-robin 0' prio=-1 status=active\n" + "| |- 6:0:2:0 sde 8:64 active undef running\n" + "| `- 6:0:4:0 sdg 8:96 active undef running\n" + "`-+- policy='round-robin 0' prio=-1 status=enabled\n" + " |- 6:0:3:0 sdf 8:80 active undef running\n" + " `- 6:0:5:0 sdh 8:112 active undef running\n" + ) + return out, None + + self.stubs.Set(self.linuxscsi, '_execute', fake_execute) + + info = self.linuxscsi.find_multipath_device('/dev/sde') + LOG.error("info = %s" % info) + self.assertEqual("/dev/dm-2", info["device"]) + self.assertEqual("/dev/sde", info['devices'][0]['device']) + self.assertEqual("6", info['devices'][0]['host']) + self.assertEqual("0", info['devices'][0]['channel']) + self.assertEqual("2", info['devices'][0]['id']) + self.assertEqual("0", info['devices'][0]['lun']) + + self.assertEqual("/dev/sdf", info['devices'][2]['device']) + self.assertEqual("6", info['devices'][2]['host']) + self.assertEqual("0", info['devices'][2]['channel']) + self.assertEqual("3", info['devices'][2]['id']) + self.assertEqual("0", info['devices'][2]['lun']) + + def test_find_multipath_device_ds8000(self): + def fake_execute(*cmd, **kwargs): + out = ("36005076303ffc48e0000000000000101 dm-2 IBM,2107900\n" + "size=1.0G features='1 queue_if_no_path' hwhandler='0'" + " wp=rw\n" + "`-+- policy='round-robin 0' prio=-1 status=active\n" + " |- 6:0:2:0 sdd 8:64 active undef running\n" + " `- 6:1:0:3 sdc 8:32 active undef running\n" + ) + return out, None + + self.stubs.Set(self.linuxscsi, '_execute', fake_execute) + + info = self.linuxscsi.find_multipath_device('/dev/sdd') + LOG.error("info = %s" % info) + self.assertEqual("/dev/dm-2", info["device"]) + self.assertEqual("/dev/sdd", info['devices'][0]['device']) + self.assertEqual("6", info['devices'][0]['host']) + self.assertEqual("0", info['devices'][0]['channel']) + self.assertEqual("2", info['devices'][0]['id']) + self.assertEqual("0", info['devices'][0]['lun']) + + self.assertEqual("/dev/sdc", info['devices'][1]['device']) + self.assertEqual("6", info['devices'][1]['host']) + self.assertEqual("1", info['devices'][1]['channel']) + self.assertEqual("0", info['devices'][1]['id']) + self.assertEqual("3", info['devices'][1]['lun']) diff --git a/cinder/tests/brick/test_brick_lvm.py b/cinder/tests/brick/test_brick_lvm.py new file mode 100644 index 0000000000..a213b96037 --- /dev/null +++ b/cinder/tests/brick/test_brick_lvm.py @@ -0,0 +1,251 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mox + + +from cinder.brick.local_dev import lvm as brick +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import test +from cinder.volume import configuration as conf + +LOG = logging.getLogger(__name__) + + +def create_configuration(): + configuration = mox.MockObject(conf.Configuration) + configuration.append_config_values(mox.IgnoreArg()) + return configuration + + +class BrickLvmTestCase(test.TestCase): + def setUp(self): + self._mox = mox.Mox() + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.volume_group_name = 'fake-vg' + super(BrickLvmTestCase, self).setUp() + + #Stub processutils.execute for static methods + self.stubs.Set(processutils, 'execute', + self.fake_execute) + self.vg = brick.LVM(self.configuration.volume_group_name, + 'sudo', + False, None, + 'default', + self.fake_execute) + + def failed_fake_execute(obj, *cmd, **kwargs): + return ("\n", "fake-error") + + def fake_pretend_lvm_version(obj, *cmd, **kwargs): + return (" LVM version: 2.03.00 (2012-03-06)\n", "") + + def fake_old_lvm_version(obj, *cmd, **kwargs): + # Does not support thin prov or snap activation + return (" LVM version: 2.02.65(2) (2012-03-06)\n", "") + + def fake_customised_lvm_version(obj, *cmd, **kwargs): + return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "") + + def fake_execute(obj, *cmd, **kwargs): + cmd_string = ', '.join(cmd) + data = "\n" + + if ('env, LC_ALL=C, vgs, --noheadings, --unit=g, -o, name' == + cmd_string): + data = " fake-vg\n" + data += " some-other-vg\n" + elif ('env, LC_ALL=C, vgs, --noheadings, -o, name, fake-vg' == + cmd_string): + data = " fake-vg\n" + elif 'env, LC_ALL=C, vgs, --version' in cmd_string: + data = " LVM version: 2.02.95(2) (2012-03-06)\n" + elif ('env, LC_ALL=C, vgs, --noheadings, -o uuid, fake-vg' in + cmd_string): + data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" + elif 'env, LC_ALL=C, vgs, --noheadings, --unit=g, ' \ + '-o, name,size,free,lv_count,uuid, ' \ + '--separator, :, --nosuffix' in cmd_string: + data = " fake-vg:10.00:10.00:0:"\ + "kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" + if 'fake-vg' in cmd_string: + return (data, "") + data += " fake-vg-2:10.00:10.00:0:"\ + "lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n" + data += " fake-vg-3:10.00:10.00:0:"\ + "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n" + elif ('env, LC_ALL=C, lvs, --noheadings, ' + '--unit=g, -o, vg_name,name,size' in cmd_string): + data = " fake-vg fake-1 1.00g\n" + data += " fake-vg fake-2 1.00g\n" + elif ('env, LC_ALL=C, lvdisplay, --noheading, -C, -o, Attr' in + cmd_string): + if 'test-volumes' in cmd_string: + data = ' wi-a-' + else: + data = ' owi-a-' + elif 'env, LC_ALL=C, pvs, --noheadings' in cmd_string: + data = " fake-vg:/dev/sda:10.00:1.00\n" + data += " fake-vg:/dev/sdb:10.00:1.00\n" + data += " fake-vg:/dev/sdc:10.00:8.99\n" + data += " fake-vg-2:/dev/sdd:10.00:9.99\n" + elif 'env, LC_ALL=C, lvs, --noheadings, --unit=g' \ + ', -o, size,data_percent, --separator, :' in cmd_string: + data = " 9:12\n" + elif 'lvcreate, -T, -L, ' in cmd_string: + pass + elif 'lvcreate, -T, -V, ' in cmd_string: + pass + else: + raise AssertionError('unexpected command called: %s' % cmd_string) + + return (data, "") + + def test_vg_exists(self): + self.assertEqual(self.vg._vg_exists(), True) + + def test_get_vg_uuid(self): + self.assertEqual(self.vg._get_vg_uuid()[0], + 'kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1') + + def test_get_all_volumes(self): + out = self.vg.get_volumes() + + self.assertEqual(out[0]['name'], 'fake-1') + self.assertEqual(out[0]['size'], '1.00g') + self.assertEqual(out[0]['vg'], 'fake-vg') + + def test_get_volume(self): + self.assertEqual(self.vg.get_volume('fake-1')['name'], 'fake-1') + + def test_get_all_physical_volumes(self): + # Filtered VG version + pvs = self.vg.get_all_physical_volumes('sudo', 'fake-vg') + self.assertEqual(len(pvs), 3) + + # Non-Filtered, all VG's + pvs = self.vg.get_all_physical_volumes('sudo') + self.assertEqual(len(pvs), 4) + + def test_get_physical_volumes(self): + pvs = self.vg.get_physical_volumes() + self.assertEqual(len(pvs), 3) + + def test_get_volume_groups(self): + self.assertEqual(len(self.vg.get_all_volume_groups('sudo')), 3) + self.assertEqual(len(self.vg.get_all_volume_groups('sudo', + 'fake-vg')), 1) + + def test_thin_support(self): + # lvm.supports_thin() is a static method and doesn't + # use the self._executor fake we pass in on init + # so we need to stub processutils.execute appropriately + + self.stubs.Set(processutils, 'execute', self.fake_execute) + self.assertTrue(self.vg.supports_thin_provisioning('sudo')) + + self.stubs.Set(processutils, 'execute', self.fake_pretend_lvm_version) + self.assertTrue(self.vg.supports_thin_provisioning('sudo')) + + self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version) + self.assertFalse(self.vg.supports_thin_provisioning('sudo')) + + self.stubs.Set(processutils, + 'execute', + self.fake_customised_lvm_version) + self.assertTrue(self.vg.supports_thin_provisioning('sudo')) + + def test_snapshot_lv_activate_support(self): + self.vg._supports_snapshot_lv_activation = None + self.stubs.Set(processutils, 'execute', self.fake_execute) + self.assertTrue(self.vg.supports_snapshot_lv_activation) + + self.vg._supports_snapshot_lv_activation = None + self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version) + self.assertFalse(self.vg.supports_snapshot_lv_activation) + + self.vg._supports_snapshot_lv_activation = None + + def test_lvchange_ignskipact_support_yes(self): + """Tests if lvchange -K is available via a lvm2 version check.""" + + self.vg._supports_lvchange_ignoreskipactivation = None + self.stubs.Set(processutils, 'execute', self.fake_pretend_lvm_version) + self.assertTrue(self.vg.supports_lvchange_ignoreskipactivation) + + self.vg._supports_lvchange_ignoreskipactivation = None + self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version) + self.assertFalse(self.vg.supports_lvchange_ignoreskipactivation) + + self.vg._supports_lvchange_ignoreskipactivation = None + + def test_thin_pool_creation(self): + + # The size of fake-vg volume group is 10g, so the calculated thin + # pool size should be 9.5g (95% of 10g). + self.assertEqual("9.5g", self.vg.create_thin_pool()) + + # Passing a size parameter should result in a thin pool of that exact + # size. + for size in ("1g", "1.2g", "1.75g"): + self.assertEqual(size, self.vg.create_thin_pool(size_str=size)) + + def test_thin_pool_free_space(self): + # The size of fake-vg-pool is 9g and the allocated data sums up to + # 12% so the calculated free space should be 7.92 + self.assertEqual(float("7.92"), + self.vg._get_thin_pool_free_space("fake-vg", + "fake-vg-pool")) + + def test_volume_create_after_thin_creation(self): + """Test self.vg.vg_thin_pool is set to pool_name + + See bug #1220286 for more info. + """ + + vg_name = "vg-name" + pool_name = vg_name + "-pool" + pool_path = "%s/%s" % (vg_name, pool_name) + + def executor(obj, *cmd, **kwargs): + self.assertEqual(pool_path, cmd[-1]) + + self.vg._executor = executor + self.vg.create_thin_pool(pool_name, "1G") + self.vg.create_volume("test", "1G", lv_type='thin') + + self.assertEqual(self.vg.vg_thin_pool, pool_name) + + def test_lv_has_snapshot(self): + self.assertTrue(self.vg.lv_has_snapshot('fake-vg')) + self.assertFalse(self.vg.lv_has_snapshot('test-volumes')) + + def test_activate_lv(self): + self._mox.StubOutWithMock(self.vg, '_execute') + self.vg._supports_lvchange_ignoreskipactivation = True + + self.vg._execute('lvchange', '-a', 'y', '--yes', '-K', + 'fake-vg/my-lv', + root_helper='sudo', run_as_root=True) + + self._mox.ReplayAll() + + self.vg.activate_lv('my-lv') + + self._mox.VerifyAll() + + def test_get_mirrored_available_capacity(self): + self.assertEqual(self.vg.vg_mirror_free_space(1), 2.0) diff --git a/cinder/tests/brick/test_brick_remotefs.py b/cinder/tests/brick/test_brick_remotefs.py new file mode 100644 index 0000000000..322e97aeb3 --- /dev/null +++ b/cinder/tests/brick/test_brick_remotefs.py @@ -0,0 +1,97 @@ +# (c) Copyright 2013 OpenStack Foundation +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mox + +from cinder.brick.remotefs import remotefs +from cinder.openstack.common import log as logging +from cinder import test + +LOG = logging.getLogger(__name__) + + +class BrickRemoteFsTestCase(test.TestCase): + TEST_EXPORT = '1.2.3.4/export1' + TEST_MNT_BASE = '/mnt/test' + TEST_HASH = '4d664fd43b6ff86d80a4ea969c07b3b9' + TEST_MNT_POINT = TEST_MNT_BASE + '/' + TEST_HASH + + def setUp(self): + super(BrickRemoteFsTestCase, self).setUp() + self._mox = mox.Mox() + self._nfsclient = remotefs.RemoteFsClient( + 'nfs', 'sudo', nfs_mount_point_base=self.TEST_MNT_BASE) + self.addCleanup(self._mox.UnsetStubs) + + def test_get_hash_str(self): + """_get_hash_str should calculation correct value.""" + + self.assertEqual(self.TEST_HASH, + self._nfsclient._get_hash_str(self.TEST_EXPORT)) + + def test_get_mount_point(self): + mnt_point = self._nfsclient.get_mount_point(self.TEST_EXPORT) + self.assertEqual(mnt_point, self.TEST_MNT_POINT) + + def test_mount_nfs_should_mount_correctly(self): + mox = self._mox + client = self._nfsclient + + mox.StubOutWithMock(client, '_execute') + client._execute('mount', check_exit_code=0).AndReturn(("", "")) + client._execute('mkdir', '-p', self.TEST_MNT_POINT, + check_exit_code=0).AndReturn(("", "")) + client._execute('mount', '-t', 'nfs', self.TEST_EXPORT, + self.TEST_MNT_POINT, + root_helper='sudo', run_as_root=True, + check_exit_code=0).AndReturn(("", "")) + mox.ReplayAll() + + client.mount(self.TEST_EXPORT) + + mox.VerifyAll() + + def test_mount_nfs_should_not_remount(self): + mox = self._mox + client = self._nfsclient + + line = "%s on %s type nfs (rw)\n" % (self.TEST_EXPORT, + self.TEST_MNT_POINT) + mox.StubOutWithMock(client, '_execute') + client._execute('mount', check_exit_code=0).AndReturn((line, "")) + mox.ReplayAll() + + client.mount(self.TEST_EXPORT) + + mox.VerifyAll() + + def test_nfs_mount_options(self): + opts = 'test_nfs_mount_options' + client = remotefs.RemoteFsClient( + 'nfs', 'sudo', nfs_mount_point_base=self.TEST_MNT_BASE, + nfs_mount_options=opts) + self.assertEqual(opts, client._mount_options) + + def test_nfs_mount_point_base(self): + base = '/mnt/test/nfs/mount/point/base' + client = remotefs.RemoteFsClient('nfs', 'sudo', + nfs_mount_point_base=base) + self.assertEqual(base, client._mount_base) + + def test_glusterfs_mount_point_base(self): + base = '/mnt/test/glusterfs/mount/point/base' + client = remotefs.RemoteFsClient('glusterfs', 'sudo', + glusterfs_mount_point_base=base) + self.assertEqual(base, client._mount_base) diff --git a/cinder/tests/compute/__init__.py b/cinder/tests/compute/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/compute/test_nova.py b/cinder/tests/compute/test_nova.py new file mode 100644 index 0000000000..9f8239a750 --- /dev/null +++ b/cinder/tests/compute/test_nova.py @@ -0,0 +1,53 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.compute import nova +from cinder import context +from cinder import test + + +class FakeNovaClient(object): + class Volumes(object): + def __getattr__(self, item): + return None + + def __init__(self): + self.volumes = self.Volumes() + + def create_volume_snapshot(self, *args, **kwargs): + pass + + def delete_volume_snapshot(self, *args, **kwargs): + pass + + +class NovaApiTestCase(test.TestCase): + def setUp(self): + super(NovaApiTestCase, self).setUp() + + self.api = nova.API() + self.novaclient = FakeNovaClient() + self.ctx = context.get_admin_context() + self.mox.StubOutWithMock(nova, 'novaclient') + + def test_update_server_volume(self): + volume_id = 'volume_id1' + nova.novaclient(self.ctx).AndReturn(self.novaclient) + self.mox.StubOutWithMock(self.novaclient.volumes, + 'update_server_volume') + self.novaclient.volumes.update_server_volume('server_id', 'attach_id', + 'new_volume_id') + self.mox.ReplayAll() + self.api.update_server_volume(self.ctx, 'server_id', 'attach_id', + 'new_volume_id') diff --git a/cinder/tests/conf_fixture.py b/cinder/tests/conf_fixture.py new file mode 100644 index 0000000000..5f88f6348f --- /dev/null +++ b/cinder/tests/conf_fixture.py @@ -0,0 +1,52 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo.config import cfg + + +CONF = cfg.CONF + +CONF.import_opt('iscsi_num_targets', 'cinder.volume.drivers.lvm') +CONF.import_opt('policy_file', 'cinder.policy') +CONF.import_opt('volume_driver', 'cinder.volume.manager') +CONF.import_opt('xiv_ds8k_proxy', 'cinder.volume.drivers.xiv_ds8k') +CONF.import_opt('backup_driver', 'cinder.backup.manager') +CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='keymgr') +CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager') + +def_vol_type = 'fake_vol_type' + + +def set_defaults(conf): + conf.set_default('default_volume_type', def_vol_type) + conf.set_default('volume_driver', + 'cinder.tests.fake_driver.FakeISCSIDriver') + conf.set_default('iscsi_helper', 'fake') + conf.set_default('fake_rabbit', True) + conf.set_default('rpc_backend', 'cinder.openstack.common.rpc.impl_fake') + conf.set_default('iscsi_num_targets', 8) + conf.set_default('verbose', True) + conf.set_default('connection', 'sqlite://', group='database') + conf.set_default('sqlite_synchronous', False) + conf.set_default('policy_file', 'cinder/tests/policy.json') + conf.set_default( + 'xiv_ds8k_proxy', + 'cinder.tests.test_xiv_ds8k.XIVDS8KFakeProxyDriver') + conf.set_default('backup_driver', 'cinder.tests.backup.fake_service') + conf.set_default('fixed_key', default='0' * 64, group='keymgr') + conf.set_default('scheduler_driver', + 'cinder.scheduler.filter_scheduler.FilterScheduler') diff --git a/cinder/tests/db/__init__.py b/cinder/tests/db/__init__.py index 2d43aac42f..fdf33be941 100644 --- a/cinder/tests/db/__init__.py +++ b/cinder/tests/db/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/db/fakes.py b/cinder/tests/db/fakes.py index 99056ea888..90b90324cc 100644 --- a/cinder/tests/db/fakes.py +++ b/cinder/tests/db/fakes.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. -# Copyright 2010 OpenStack, LLC +# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,7 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. -"""Stubouts, mocks and fixtures for the test suite""" +"""Stubouts, mocks and fixtures for the test suite.""" from cinder import db diff --git a/cinder/tests/db/test_finish_migration.py b/cinder/tests/db/test_finish_migration.py new file mode 100644 index 0000000000..f0b04ac589 --- /dev/null +++ b/cinder/tests/db/test_finish_migration.py @@ -0,0 +1,52 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for finish_volume_migration.""" + + +from cinder import context +from cinder import db +from cinder import test +from cinder.tests import utils as testutils + + +class FinishVolumeMigrationTestCase(test.TestCase): + """Test cases for finish_volume_migration.""" + + def setUp(self): + super(FinishVolumeMigrationTestCase, self).setUp() + + def tearDown(self): + super(FinishVolumeMigrationTestCase, self).tearDown() + + def test_finish_volume_migration(self): + ctxt = context.RequestContext(user_id='user_id', + project_id='project_id', + is_admin=True) + src_volume = testutils.create_volume(ctxt, host='src', + migration_status='migrating', + status='available') + dest_volume = testutils.create_volume(ctxt, host='dest', + migration_status='target:fake', + status='available') + db.finish_volume_migration(ctxt, src_volume['id'], + dest_volume['id']) + + src_volume = db.volume_get(ctxt, src_volume['id']) + expected_name = 'volume-%s' % dest_volume['id'] + self.assertEqual(src_volume['_name_id'], dest_volume['id']) + self.assertEqual(src_volume['name'], expected_name) + self.assertEqual(src_volume['host'], 'dest') + self.assertEqual(src_volume['status'], 'available') + self.assertIsNone(src_volume['migration_status']) diff --git a/cinder/tests/db/test_name_id.py b/cinder/tests/db/test_name_id.py new file mode 100644 index 0000000000..0b4fed0ba5 --- /dev/null +++ b/cinder/tests/db/test_name_id.py @@ -0,0 +1,60 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for volume name_id.""" + +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder import test +from cinder.tests import utils as testutils + + +CONF = cfg.CONF + + +class NameIDsTestCase(test.TestCase): + """Test cases for naming volumes with name_id.""" + + def setUp(self): + super(NameIDsTestCase, self).setUp() + self.ctxt = context.RequestContext(user_id='user_id', + project_id='project_id') + + def tearDown(self): + super(NameIDsTestCase, self).tearDown() + + def test_name_id_same(self): + """New volume should have same 'id' and 'name_id'.""" + vol_ref = testutils.create_volume(self.ctxt, size=1) + self.assertEqual(vol_ref['name_id'], vol_ref['id']) + expected_name = CONF.volume_name_template % vol_ref['id'] + self.assertEqual(vol_ref['name'], expected_name) + + def test_name_id_diff(self): + """Change name ID to mimic volume after migration.""" + vol_ref = testutils.create_volume(self.ctxt, size=1) + db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'}) + vol_ref = db.volume_get(self.ctxt, vol_ref['id']) + expected_name = CONF.volume_name_template % 'fake' + self.assertEqual(vol_ref['name'], expected_name) + + def test_name_id_snapshot_volume_name(self): + """Make sure snapshot['volume_name'] is updated.""" + vol_ref = testutils.create_volume(self.ctxt, size=1) + db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'}) + snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id']) + expected_name = CONF.volume_name_template % 'fake' + self.assertEqual(snap_ref['volume_name'], expected_name) diff --git a/cinder/tests/db/test_qos_specs.py b/cinder/tests/db/test_qos_specs.py new file mode 100644 index 0000000000..50a76e99ef --- /dev/null +++ b/cinder/tests/db/test_qos_specs.py @@ -0,0 +1,230 @@ +# Copyright (C) 2013 eBay Inc. +# Copyright (C) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for qaulity_of_service_specs table.""" + + +import time + +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +def fake_qos_specs_get_by_name(context, name, session=None, inactive=False): + pass + + +class QualityOfServiceSpecsTableTestCase(test.TestCase): + """Test case for QualityOfServiceSpecs model.""" + + def setUp(self): + super(QualityOfServiceSpecsTableTestCase, self).setUp() + self.ctxt = context.RequestContext(user_id='user_id', + project_id='project_id', + is_admin=True) + + def tearDown(self): + super(QualityOfServiceSpecsTableTestCase, self).tearDown() + + def _create_qos_specs(self, name, values=None): + """Create a transfer object.""" + if values: + specs = dict(name=name, qos_specs=values) + else: + specs = {'name': name, + 'qos_specs': { + 'consumer': 'back-end', + 'key1': 'value1', + 'key2': 'value2'}} + return db.qos_specs_create(self.ctxt, specs)['id'] + + def test_qos_specs_create(self): + # If there is qos specs with the same name exists, + # a QoSSpecsExists exception will be raised. + name = 'QoSSpecsCreationTest' + self._create_qos_specs(name) + self.assertRaises(exception.QoSSpecsExists, + db.qos_specs_create, self.ctxt, dict(name=name)) + + specs_id = self._create_qos_specs('NewName') + query_id = db.qos_specs_get_by_name( + self.ctxt, 'NewName')['id'] + self.assertEqual(specs_id, query_id) + + def test_qos_specs_get(self): + value = dict(consumer='front-end', + key1='foo', key2='bar') + specs_id = self._create_qos_specs('Name1', value) + + fake_id = 'fake-UUID' + self.assertRaises(exception.QoSSpecsNotFound, + db.qos_specs_get, self.ctxt, fake_id) + + specs = db.qos_specs_get(self.ctxt, specs_id) + expected = dict(name='Name1', id=specs_id, consumer='front-end') + del value['consumer'] + expected.update(dict(specs=value)) + self.assertDictMatch(specs, expected) + + def test_qos_specs_get_all(self): + value1 = dict(consumer='front-end', + key1='v1', key2='v2') + value2 = dict(consumer='back-end', + key3='v3', key4='v4') + value3 = dict(consumer='back-end', + key5='v5', key6='v6') + + spec_id1 = self._create_qos_specs('Name1', value1) + spec_id2 = self._create_qos_specs('Name2', value2) + spec_id3 = self._create_qos_specs('Name3', value3) + + specs = db.qos_specs_get_all(self.ctxt) + self.assertEqual(len(specs), 3, + "Unexpected number of qos specs records") + + expected1 = dict(name='Name1', id=spec_id1, consumer='front-end') + expected2 = dict(name='Name2', id=spec_id2, consumer='back-end') + expected3 = dict(name='Name3', id=spec_id3, consumer='back-end') + del value1['consumer'] + del value2['consumer'] + del value3['consumer'] + expected1.update(dict(specs=value1)) + expected2.update(dict(specs=value2)) + expected3.update(dict(specs=value3)) + self.assertIn(expected1, specs) + self.assertIn(expected2, specs) + self.assertIn(expected3, specs) + + def test_qos_specs_get_by_name(self): + name = str(int(time.time())) + value = dict(consumer='front-end', + foo='Foo', bar='Bar') + specs_id = self._create_qos_specs(name, value) + specs = db.qos_specs_get_by_name(self.ctxt, name) + del value['consumer'] + expected = {'name': name, + 'id': specs_id, + 'consumer': 'front-end', + 'specs': value} + self.assertDictMatch(specs, expected) + + def test_qos_specs_delete(self): + name = str(int(time.time())) + specs_id = self._create_qos_specs(name) + + db.qos_specs_delete(self.ctxt, specs_id) + self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_get, + self.ctxt, specs_id) + + def test_qos_specs_item_delete(self): + name = str(int(time.time())) + value = dict(consumer='front-end', + foo='Foo', bar='Bar') + specs_id = self._create_qos_specs(name, value) + + del value['consumer'] + del value['foo'] + expected = {'name': name, + 'id': specs_id, + 'consumer': 'front-end', + 'specs': value} + db.qos_specs_item_delete(self.ctxt, specs_id, 'foo') + specs = db.qos_specs_get_by_name(self.ctxt, name) + self.assertDictMatch(specs, expected) + + def test_associate_type_with_qos(self): + self.assertRaises(exception.VolumeTypeNotFound, + db.volume_type_qos_associate, + self.ctxt, 'Fake-VOLID', 'Fake-QOSID') + type_id = volume_types.create(self.ctxt, 'TypeName')['id'] + specs_id = self._create_qos_specs('FakeQos') + db.volume_type_qos_associate(self.ctxt, type_id, specs_id) + res = db.qos_specs_associations_get(self.ctxt, specs_id) + self.assertEqual(len(res), 1) + self.assertEqual(res[0]['id'], type_id) + self.assertEqual(res[0]['qos_specs_id'], specs_id) + + def test_qos_associations_get(self): + self.assertRaises(exception.QoSSpecsNotFound, + db.qos_specs_associations_get, + self.ctxt, 'Fake-UUID') + + type_id = volume_types.create(self.ctxt, 'TypeName')['id'] + specs_id = self._create_qos_specs('FakeQos') + res = db.qos_specs_associations_get(self.ctxt, specs_id) + self.assertEqual(len(res), 0) + + db.volume_type_qos_associate(self.ctxt, type_id, specs_id) + res = db.qos_specs_associations_get(self.ctxt, specs_id) + self.assertEqual(len(res), 1) + self.assertEqual(res[0]['id'], type_id) + self.assertEqual(res[0]['qos_specs_id'], specs_id) + + type0_id = volume_types.create(self.ctxt, 'Type0Name')['id'] + db.volume_type_qos_associate(self.ctxt, type0_id, specs_id) + res = db.qos_specs_associations_get(self.ctxt, specs_id) + self.assertEqual(len(res), 2) + self.assertEqual(res[0]['qos_specs_id'], specs_id) + self.assertEqual(res[1]['qos_specs_id'], specs_id) + + def test_qos_specs_disassociate(self): + type_id = volume_types.create(self.ctxt, 'TypeName')['id'] + specs_id = self._create_qos_specs('FakeQos') + db.volume_type_qos_associate(self.ctxt, type_id, specs_id) + res = db.qos_specs_associations_get(self.ctxt, specs_id) + self.assertEqual(res[0]['id'], type_id) + self.assertEqual(res[0]['qos_specs_id'], specs_id) + + db.qos_specs_disassociate(self.ctxt, specs_id, type_id) + res = db.qos_specs_associations_get(self.ctxt, specs_id) + self.assertEqual(len(res), 0) + res = db.volume_type_get(self.ctxt, type_id) + self.assertIsNone(res['qos_specs_id']) + + def test_qos_specs_disassociate_all(self): + specs_id = self._create_qos_specs('FakeQos') + type1_id = volume_types.create(self.ctxt, 'Type1Name')['id'] + type2_id = volume_types.create(self.ctxt, 'Type2Name')['id'] + type3_id = volume_types.create(self.ctxt, 'Type3Name')['id'] + db.volume_type_qos_associate(self.ctxt, type1_id, specs_id) + db.volume_type_qos_associate(self.ctxt, type2_id, specs_id) + db.volume_type_qos_associate(self.ctxt, type3_id, specs_id) + + res = db.qos_specs_associations_get(self.ctxt, specs_id) + self.assertEqual(len(res), 3) + + db.qos_specs_disassociate_all(self.ctxt, specs_id) + res = db.qos_specs_associations_get(self.ctxt, specs_id) + self.assertEqual(len(res), 0) + + def test_qos_specs_update(self): + name = 'FakeName' + specs_id = self._create_qos_specs(name) + value = dict(key2='new_value2', key3='value3') + + self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_update, + self.ctxt, 'Fake-UUID', value) + db.qos_specs_update(self.ctxt, specs_id, value) + specs = db.qos_specs_get(self.ctxt, specs_id) + self.assertEqual(specs['specs']['key2'], 'new_value2') + self.assertEqual(specs['specs']['key3'], 'value3') diff --git a/cinder/tests/db/test_transfers.py b/cinder/tests/db/test_transfers.py new file mode 100644 index 0000000000..750fd77ff4 --- /dev/null +++ b/cinder/tests/db/test_transfers.py @@ -0,0 +1,126 @@ +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for transfers table.""" + +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests import utils + + +LOG = logging.getLogger(__name__) + + +class TransfersTableTestCase(test.TestCase): + """Test case for transfers model.""" + + def setUp(self): + super(TransfersTableTestCase, self).setUp() + self.ctxt = context.RequestContext(user_id='user_id', + project_id='project_id') + + def tearDown(self): + super(TransfersTableTestCase, self).tearDown() + + def _create_transfer(self, volume_id=None): + """Create a transfer object.""" + transfer = {'display_name': 'display_name', + 'salt': 'salt', + 'crypt_hash': 'crypt_hash'} + if volume_id is not None: + transfer['volume_id'] = volume_id + return db.transfer_create(self.ctxt, transfer)['id'] + + def test_transfer_create(self): + # If the volume_id is Null a KeyError exception will be raised. + self.assertRaises(KeyError, + self._create_transfer) + + volume_id = utils.create_volume(self.ctxt)['id'] + self._create_transfer(volume_id) + + def test_transfer_create_not_available(self): + volume_id = utils.create_volume(self.ctxt, size=1, + status='notavailable')['id'] + self.assertRaises(exception.InvalidVolume, + self._create_transfer, + volume_id) + + def test_transfer_get(self): + volume_id1 = utils.create_volume(self.ctxt)['id'] + xfer_id1 = self._create_transfer(volume_id1) + + xfer = db.transfer_get(self.ctxt, xfer_id1) + self.assertEqual(xfer.volume_id, volume_id1, "Unexpected volume_id") + + nctxt = context.RequestContext(user_id='new_user_id', + project_id='new_project_id') + self.assertRaises(exception.TransferNotFound, + db.transfer_get, nctxt, xfer_id1) + + xfer = db.transfer_get(nctxt.elevated(), xfer_id1) + self.assertEqual(xfer.volume_id, volume_id1, "Unexpected volume_id") + + def test_transfer_get_all(self): + volume_id1 = utils.create_volume(self.ctxt)['id'] + volume_id2 = utils.create_volume(self.ctxt)['id'] + self._create_transfer(volume_id1) + self._create_transfer(volume_id2) + + self.assertRaises(exception.NotAuthorized, + db.transfer_get_all, + self.ctxt) + xfer = db.transfer_get_all(context.get_admin_context()) + self.assertEqual(len(xfer), 2, "Unexpected number of transfer records") + + xfer = db.transfer_get_all_by_project(self.ctxt, self.ctxt.project_id) + self.assertEqual(len(xfer), 2, "Unexpected number of transfer records") + + nctxt = context.RequestContext(user_id='new_user_id', + project_id='new_project_id') + self.assertRaises(exception.NotAuthorized, + db.transfer_get_all_by_project, + nctxt, self.ctxt.project_id) + xfer = db.transfer_get_all_by_project(nctxt.elevated(), + self.ctxt.project_id) + self.assertEqual(len(xfer), 2, "Unexpected number of transfer records") + + def test_transfer_destroy(self): + volume_id = utils.create_volume(self.ctxt)['id'] + volume_id2 = utils.create_volume(self.ctxt)['id'] + xfer_id1 = self._create_transfer(volume_id) + xfer_id2 = self._create_transfer(volume_id2) + + xfer = db.transfer_get_all(context.get_admin_context()) + self.assertEqual(len(xfer), 2, "Unexpected number of transfer records") + self.assertFalse(xfer[0]['deleted'], "Deleted flag is set") + + db.transfer_destroy(self.ctxt, xfer_id1) + xfer = db.transfer_get_all(context.get_admin_context()) + self.assertEqual(len(xfer), 1, "Unexpected number of transfer records") + self.assertEqual(xfer[0]['id'], xfer_id2, + "Unexpected value for Transfer id") + + nctxt = context.RequestContext(user_id='new_user_id', + project_id='new_project_id') + self.assertRaises(exception.TransferNotFound, + db.transfer_destroy, nctxt, xfer_id2) + + db.transfer_destroy(nctxt.elevated(), xfer_id2) + xfer = db.transfer_get_all(context.get_admin_context()) + self.assertEqual(len(xfer), 0, "Unexpected number of transfer records") diff --git a/cinder/tests/declare_flags.py b/cinder/tests/declare_conf.py similarity index 78% rename from cinder/tests/declare_flags.py rename to cinder/tests/declare_conf.py index ee4733fc10..a197440331 100644 --- a/cinder/tests/declare_flags.py +++ b/cinder/tests/declare_conf.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -16,8 +14,9 @@ # License for the specific language governing permissions and limitations # under the License. -from cinder import flags -from cinder.openstack.common import cfg -FLAGS = flags.FLAGS -FLAGS.register_opt(cfg.IntOpt('answer', default=42, help='test flag')) +from oslo.config import cfg + + +CONF = cfg.CONF +CONF.register_opt(cfg.IntOpt('answer', default=42, help='test conf')) diff --git a/cinder/tests/fake_driver.py b/cinder/tests/fake_driver.py new file mode 100644 index 0000000000..b258f60fe6 --- /dev/null +++ b/cinder/tests/fake_driver.py @@ -0,0 +1,149 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.openstack.common import log as logging +from cinder.tests.brick.fake_lvm import FakeBrickLVM +from cinder.volume import driver +from cinder.volume.drivers import lvm + + +LOG = logging.getLogger(__name__) + + +class FakeISCSIDriver(lvm.LVMISCSIDriver): + """Logs calls instead of executing.""" + def __init__(self, *args, **kwargs): + super(FakeISCSIDriver, self).__init__(execute=self.fake_execute, + *args, **kwargs) + self.vg = FakeBrickLVM('cinder-volumes', False, + None, 'default', + self.fake_execute) + + def check_for_setup_error(self): + """No setup necessary in fake mode.""" + pass + + def initialize_connection(self, volume, connector): + volume_metadata = {} + for metadata in volume['volume_admin_metadata']: + volume_metadata[metadata['key']] = metadata['value'] + access_mode = volume_metadata.get('attached_mode') + if access_mode is None: + access_mode = ('ro' + if volume_metadata.get('readonly') == 'True' + else 'rw') + return { + 'driver_volume_type': 'iscsi', + 'data': {'access_mode': access_mode} + } + + def terminate_connection(self, volume, connector, **kwargs): + pass + + @staticmethod + def fake_execute(cmd, *_args, **_kwargs): + """Execute that simply logs the command.""" + LOG.debug(_("FAKE ISCSI: %s"), cmd) + return (None, None) + + +class FakeISERDriver(FakeISCSIDriver): + """Logs calls instead of executing.""" + def __init__(self, *args, **kwargs): + super(FakeISERDriver, self).__init__(execute=self.fake_execute, + *args, **kwargs) + + def initialize_connection(self, volume, connector): + return { + 'driver_volume_type': 'iser', + 'data': {} + } + + @staticmethod + def fake_execute(cmd, *_args, **_kwargs): + """Execute that simply logs the command.""" + LOG.debug(_("FAKE ISER: %s"), cmd) + return (None, None) + + +class LoggingVolumeDriver(driver.VolumeDriver): + """Logs and records calls, for unit tests.""" + + def check_for_setup_error(self): + pass + + def create_volume(self, volume): + self.log_action('create_volume', volume) + + def delete_volume(self, volume): + self.clear_volume(volume) + self.log_action('delete_volume', volume) + + def clear_volume(self, volume): + self.log_action('clear_volume', volume) + + def local_path(self, volume): + LOG.error(_("local_path not implemented")) + raise NotImplementedError() + + def ensure_export(self, context, volume): + self.log_action('ensure_export', volume) + + def create_export(self, context, volume): + self.log_action('create_export', volume) + + def remove_export(self, context, volume): + self.log_action('remove_export', volume) + + def initialize_connection(self, volume, connector): + self.log_action('initialize_connection', volume) + + def terminate_connection(self, volume, connector): + self.log_action('terminate_connection', volume) + + _LOGS = [] + + @staticmethod + def clear_logs(): + LoggingVolumeDriver._LOGS = [] + + @staticmethod + def log_action(action, parameters): + """Logs the command.""" + LOG.debug(_("LoggingVolumeDriver: %s") % (action)) + log_dictionary = {} + if parameters: + log_dictionary = dict(parameters) + log_dictionary['action'] = action + LOG.debug(_("LoggingVolumeDriver: %s") % (log_dictionary)) + LoggingVolumeDriver._LOGS.append(log_dictionary) + + @staticmethod + def all_logs(): + return LoggingVolumeDriver._LOGS + + @staticmethod + def logs_like(action, **kwargs): + matches = [] + for entry in LoggingVolumeDriver._LOGS: + if entry['action'] != action: + continue + match = True + for k, v in kwargs.iteritems(): + if entry.get(k) != v: + match = False + break + if match: + matches.append(entry) + return matches diff --git a/cinder/tests/fake_flags.py b/cinder/tests/fake_flags.py deleted file mode 100644 index 15f17114c5..0000000000 --- a/cinder/tests/fake_flags.py +++ /dev/null @@ -1,39 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import flags - -FLAGS = flags.FLAGS - -flags.DECLARE('iscsi_num_targets', 'cinder.volume.driver') -flags.DECLARE('policy_file', 'cinder.policy') -flags.DECLARE('volume_driver', 'cinder.volume.manager') -flags.DECLARE('xiv_proxy', 'cinder.volume.xiv') - - -def set_defaults(conf): - conf.set_default('volume_driver', 'cinder.volume.driver.FakeISCSIDriver') - conf.set_default('connection_type', 'fake') - conf.set_default('fake_rabbit', True) - conf.set_default('rpc_backend', 'cinder.openstack.common.rpc.impl_fake') - conf.set_default('iscsi_num_targets', 8) - conf.set_default('verbose', True) - conf.set_default('sql_connection', "sqlite://") - conf.set_default('sqlite_synchronous', False) - conf.set_default('policy_file', 'cinder/tests/policy.json') - conf.set_default('xiv_proxy', 'cinder.tests.test_xiv.XIVFakeProxyDriver') diff --git a/cinder/tests/fake_utils.py b/cinder/tests/fake_utils.py index 68aaf79646..d7df676cf0 100644 --- a/cinder/tests/fake_utils.py +++ b/cinder/tests/fake_utils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -20,8 +18,8 @@ from eventlet import greenthread -from cinder import exception from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils from cinder import utils LOG = logging.getLogger(__name__) @@ -92,14 +90,13 @@ def fake_execute(*cmd_parts, **kwargs): attempts=attempts, run_as_root=run_as_root, check_exit_code=check_exit_code) - except exception.ProcessExecutionError as e: + except processutils.ProcessExecutionError as e: LOG.debug(_('Faked command raised an exception %s'), e) raise - stdout = reply[0] - stderr = reply[1] LOG.debug(_("Reply to faked command is stdout='%(stdout)s' " - "stderr='%(stderr)s'") % locals()) + "stderr='%(stderr)s'") % {'stdout': reply[0], + 'stderr': reply[1]}) # Replicate the sleep call in the real function greenthread.sleep(0) diff --git a/cinder/tests/glance/__init__.py b/cinder/tests/glance/__init__.py index ef9fa05a7c..eac840c7ed 100644 --- a/cinder/tests/glance/__init__.py +++ b/cinder/tests/glance/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/glance/stubs.py b/cinder/tests/glance/stubs.py index 076afeffc9..9a1bbdd27d 100644 --- a/cinder/tests/glance/stubs.py +++ b/cinder/tests/glance/stubs.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/image/__init__.py b/cinder/tests/image/__init__.py index 387872b21f..9713c65add 100644 --- a/cinder/tests/image/__init__.py +++ b/cinder/tests/image/__init__.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/image/fake.py b/cinder/tests/image/fake.py index 16c600e57e..18cea571f7 100644 --- a/cinder/tests/image/fake.py +++ b/cinder/tests/image/fake.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Justin Santa Barbara -# Copyright 2012 OpenStack LLC +# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,24 +14,20 @@ # License for the specific language governing permissions and limitations # under the License. -"""Implementation of a fake image service""" +"""Implementation of a fake image service.""" import copy import datetime +import uuid from cinder import exception -from cinder import flags import cinder.image.glance from cinder.openstack.common import log as logging -from cinder import utils LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS - - class _FakeImageService(object): """Mock (fake) image service for unit testing.""" @@ -41,104 +35,104 @@ def __init__(self): self.images = {} # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. - timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03) + timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3) image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', - 'name': 'fakeimage123456', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'is_public': False, - 'container_format': 'raw', - 'disk_format': 'raw', - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel', - 'architecture': 'x86_64'}} + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': False, + 'container_format': 'raw', + 'disk_format': 'raw', + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel', + 'architecture': 'x86_64'}} image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c', - 'name': 'fakeimage123456', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'is_public': True, - 'container_format': 'ami', - 'disk_format': 'ami', - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel'}} + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': True, + 'container_format': 'ami', + 'disk_format': 'ami', + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel'}} image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', - 'name': 'fakeimage123456', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'is_public': True, - 'container_format': None, - 'disk_format': None, - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel'}} + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': True, + 'container_format': None, + 'disk_format': None, + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel'}} image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175', - 'name': 'fakeimage123456', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'is_public': True, - 'container_format': 'ami', - 'disk_format': 'ami', - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel'}} + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': True, + 'container_format': 'ami', + 'disk_format': 'ami', + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel'}} image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', - 'name': 'fakeimage123456', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'is_public': True, - 'container_format': 'ami', - 'disk_format': 'ami', - 'properties': {'kernel_id': - '155d900f-4e14-4e4c-a73d-069cbf4541e6', - 'ramdisk_id': None}} + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': True, + 'container_format': 'ami', + 'disk_format': 'ami', + 'properties': { + 'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', + 'ramdisk_id': None}} image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379', - 'name': 'fakeimage6', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'is_public': False, - 'container_format': 'ova', - 'disk_format': 'vhd', - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel', - 'architecture': 'x86_64', - 'auto_disk_config': 'False'}} + 'name': 'fakeimage6', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': False, + 'container_format': 'ova', + 'disk_format': 'vhd', + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel', + 'architecture': 'x86_64', + 'auto_disk_config': 'False'}} image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b', - 'name': 'fakeimage7', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'is_public': False, - 'container_format': 'ova', - 'disk_format': 'vhd', - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel', - 'architecture': 'x86_64', - 'auto_disk_config': 'True'}} + 'name': 'fakeimage7', + 'created_at': timestamp, + 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': False, + 'container_format': 'ova', + 'disk_format': 'vhd', + 'properties': {'kernel_id': 'nokernel', + 'ramdisk_id': 'nokernel', + 'architecture': 'x86_64', + 'auto_disk_config': 'True'}} self.create(None, image1) self.create(None, image2) @@ -178,7 +172,7 @@ def create(self, context, metadata, data=None): :raises: Duplicate if the image already exist. """ - image_id = str(metadata.get('id', utils.gen_uuid())) + image_id = str(metadata.get('id', uuid.uuid4())) metadata['id'] = image_id if image_id in self.images: raise exception.Duplicate() diff --git a/cinder/tests/image/test_glance.py b/cinder/tests/image/test_glance.py index b61a1a8180..3be3568ab4 100644 --- a/cinder/tests/image/test_glance.py +++ b/cinder/tests/image/test_glance.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,21 +15,24 @@ import datetime -import random -import time import glanceclient.exc +import glanceclient.v2.client +from glanceclient.v2.client import Client as glanceclient_v2 +from oslo.config import cfg from cinder import context from cinder import exception from cinder.image import glance from cinder import test -from cinder.tests.api.openstack import fakes from cinder.tests.glance import stubs as glance_stubs +CONF = cfg.CONF + + class NullWriter(object): - """Used to test ImageService.get which takes a writer object""" + """Used to test ImageService.get which takes a writer object.""" def write(self, *arg, **kwargs): pass @@ -74,8 +75,7 @@ def test_serialize(self): class TestGlanceImageService(test.TestCase): - """ - Tests the Glance image service. + """Tests the Glance image service. At a high level, the translations involved are: @@ -106,14 +106,14 @@ def setUp(self): self.stubs.Set(glance.time, 'sleep', lambda s: None) def _create_image_service(self, client): - def _fake_create_glance_client(context, host, port, version): + def _fake_create_glance_client(context, netloc, use_ssl, version): return client - self.stubs.Set(glance, '_create_glance_client', - _fake_create_glance_client) + self.stubs.Set(glance, + '_create_glance_client', + _fake_create_glance_client) - client_wrapper = glance.GlanceClientWrapper( - 'fake', 'fake_host', 9292) + client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292) return glance.GlanceImageService(client=client_wrapper) @staticmethod @@ -131,7 +131,7 @@ def _make_datetime_fixture(self): deleted_at=self.NOW_GLANCE_FORMAT) def test_create_with_instance_id(self): - """Ensure instance_id is persisted as an image-property""" + """Ensure instance_id is persisted as an image-property.""" fixture = {'name': 'test image', 'is_public': False, 'properties': {'instance_id': '42', 'user_id': 'fake'}} @@ -162,7 +162,8 @@ def test_create_with_instance_id(self): self.assertDictMatch(image_metas[0], expected) def test_create_without_instance_id(self): - """ + """Test Creating images without instance_id. + Ensure we can create an image without having to specify an instance_id. Public images are an example of an image not tied to an instance. @@ -196,15 +197,15 @@ def test_create(self): num_images = len(self.service.detail(self.context)) image_id = self.service.create(self.context, fixture)['id'] - self.assertNotEquals(None, image_id) - self.assertEquals(num_images + 1, - len(self.service.detail(self.context))) + self.assertIsNotNone(image_id) + self.assertEqual(num_images + 1, + len(self.service.detail(self.context))) def test_create_and_show_non_existing_image(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] - self.assertNotEquals(None, image_id) + self.assertIsNotNone(image_id) self.assertRaises(exception.ImageNotFound, self.service.show, self.context, @@ -238,7 +239,7 @@ def test_detail_marker(self): ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, marker=ids[1]) - self.assertEquals(len(image_metas), 8) + self.assertEqual(len(image_metas), 8) i = 2 for meta in image_metas: expected = { @@ -272,7 +273,7 @@ def test_detail_limit(self): ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, limit=5) - self.assertEquals(len(image_metas), 5) + self.assertEqual(len(image_metas), 5) def test_detail_default_limit(self): fixtures = [] @@ -295,7 +296,7 @@ def test_detail_marker_and_limit(self): ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, marker=ids[3], limit=5) - self.assertEquals(len(image_metas), 5) + self.assertEqual(len(image_metas), 5) i = 4 for meta in image_metas: expected = { @@ -333,13 +334,12 @@ def test_detail_invalid_marker(self): def test_update(self): fixture = self._make_fixture(name='test image') image = self.service.create(self.context, fixture) - print image image_id = image['id'] fixture['name'] = 'new image name' self.service.update(self.context, image_id, fixture) new_image_data = self.service.show(self.context, image_id) - self.assertEquals('new image name', new_image_data['name']) + self.assertEqual('new image name', new_image_data['name']) def test_delete(self): fixture1 = self._make_fixture(name='test image 1') @@ -347,7 +347,7 @@ def test_delete(self): fixtures = [fixture1, fixture2] num_images = len(self.service.detail(self.context)) - self.assertEquals(0, num_images) + self.assertEqual(0, num_images) ids = [] for fixture in fixtures: @@ -355,12 +355,12 @@ def test_delete(self): ids.append(new_id) num_images = len(self.service.detail(self.context)) - self.assertEquals(2, num_images) + self.assertEqual(2, num_images) self.service.delete(self.context, ids[0]) num_images = len(self.service.detail(self.context)) - self.assertEquals(1, num_images) + self.assertEqual(1, num_images) def test_show_passes_through_to_client(self): fixture = self._make_fixture(name='image1', is_public=True) @@ -458,7 +458,10 @@ def get(self, image_id): # When retries are disabled, we should get an exception self.flags(glance_num_retries=0) self.assertRaises(exception.GlanceConnectionFailed, - service.download, self.context, image_id, writer) + service.download, + self.context, + image_id, + writer) # Now lets enable retries. No exception should happen now. tries = [0] @@ -520,19 +523,67 @@ def get(self, image_id): def test_glance_client_image_id(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] - (service, same_id) = glance.get_remote_image_service( - self.context, image_id) - self.assertEquals(same_id, image_id) + (service, same_id) = glance.get_remote_image_service(self.context, + image_id) + self.assertEqual(same_id, image_id) def test_glance_client_image_ref(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] image_url = 'http://something-less-likely/%s' % image_id - (service, same_id) = glance.get_remote_image_service( - self.context, image_url) - self.assertEquals(same_id, image_id) - self.assertEquals(service._client.host, - 'something-less-likely') + (service, same_id) = glance.get_remote_image_service(self.context, + image_url) + self.assertEqual(same_id, image_id) + self.assertEqual(service._client.netloc, 'something-less-likely') + for ipv6_url in ('[::1]', '::1', '[::1]:444'): + image_url = 'http://%s/%s' % (ipv6_url, image_id) + (service, same_id) = glance.get_remote_image_service(self.context, + image_url) + self.assertEqual(same_id, image_id) + self.assertEqual(service._client.netloc, ipv6_url) + + +class TestGlanceClientVersion(test.TestCase): + """Tests the version of the glance client generated.""" + def setUp(self): + super(TestGlanceClientVersion, self).setUp() + + def fake_get_model(self): + return + + self.stubs.Set(glanceclient_v2, '_get_image_model', + fake_get_model) + + try: + self.stubs.Set(glanceclient_v2, '_get_member_model', + fake_get_model) + except AttributeError: + # method requires stubbing only with newer glanceclients. + pass + + def test_glance_version_by_flag(self): + """Test glance version set by flag is honoured.""" + client_wrapper_v1 = glance.GlanceClientWrapper('fake', 'fake_host', + 9292) + self.assertEqual(client_wrapper_v1.client.__module__, + 'glanceclient.v1.client') + self.flags(glance_api_version=2) + client_wrapper_v2 = glance.GlanceClientWrapper('fake', 'fake_host', + 9292) + self.assertEqual(client_wrapper_v2.client.__module__, + 'glanceclient.v2.client') + CONF.reset() + + def test_glance_version_by_arg(self): + """Test glance version set by arg to GlanceClientWrapper""" + client_wrapper_v1 = glance.GlanceClientWrapper('fake', 'fake_host', + 9292, version=1) + self.assertEqual(client_wrapper_v1.client.__module__, + 'glanceclient.v1.client') + client_wrapper_v2 = glance.GlanceClientWrapper('fake', 'fake_host', + 9292, version=2) + self.assertEqual(client_wrapper_v2.client.__module__, + 'glanceclient.v2.client') def _create_failing_glance_client(info): @@ -545,3 +596,63 @@ def get(self, image_id): return {} return MyGlanceStubClient() + + +class TestGlanceImageServiceClient(test.TestCase): + + def setUp(self): + super(TestGlanceImageServiceClient, self).setUp() + self.context = context.RequestContext('fake', 'fake', auth_token=True) + self.stubs.Set(glance.time, 'sleep', lambda s: None) + + def test_create_glance_client(self): + self.flags(auth_strategy='keystone') + self.flags(glance_request_timeout=60) + + class MyGlanceStubClient(object): + def __init__(inst, version, *args, **kwargs): + self.assertEqual('1', version) + self.assertEqual("http://fake_host:9292", args[0]) + self.assertEqual(True, kwargs['token']) + self.assertEqual(60, kwargs['timeout']) + + self.stubs.Set(glance.glanceclient, 'Client', MyGlanceStubClient) + client = glance._create_glance_client(self.context, 'fake_host:9292', + False) + self.assertIsInstance(client, MyGlanceStubClient) + + def test_create_glance_client_auth_strategy_is_not_keystone(self): + self.flags(auth_strategy='noauth') + self.flags(glance_request_timeout=60) + + class MyGlanceStubClient(object): + def __init__(inst, version, *args, **kwargs): + self.assertEqual('1', version) + self.assertEqual('http://fake_host:9292', args[0]) + self.assertNotIn('token', kwargs) + self.assertEqual(60, kwargs['timeout']) + + self.stubs.Set(glance.glanceclient, 'Client', MyGlanceStubClient) + client = glance._create_glance_client(self.context, 'fake_host:9292', + False) + self.assertIsInstance(client, MyGlanceStubClient) + + def test_create_glance_client_glance_request_default_timeout(self): + self.flags(auth_strategy='keystone') + self.flags(glance_request_timeout=None) + + class MyGlanceStubClient(object): + def __init__(inst, version, *args, **kwargs): + self.assertEqual("1", version) + self.assertEqual("http://fake_host:9292", args[0]) + self.assertEqual(True, kwargs['token']) + self.assertNotIn('timeout', kwargs) + + self.stubs.Set(glance.glanceclient, 'Client', MyGlanceStubClient) + client = glance._create_glance_client(self.context, 'fake_host:9292', + False) + self.assertIsInstance(client, MyGlanceStubClient) + + def tearDown(self): + self.stubs.UnsetAll() + super(TestGlanceImageServiceClient, self).tearDown() diff --git a/cinder/tests/integrated/__init__.py b/cinder/tests/integrated/__init__.py index 7c17b5ad74..1870fd0a8f 100644 --- a/cinder/tests/integrated/__init__.py +++ b/cinder/tests/integrated/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/integrated/api/__init__.py b/cinder/tests/integrated/api/__init__.py index 5798ab3d1e..6168280c24 100644 --- a/cinder/tests/integrated/api/__init__.py +++ b/cinder/tests/integrated/api/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/integrated/api/client.py b/cinder/tests/integrated/api/client.py index 7a7e5a9316..c06827cc1f 100644 --- a/cinder/tests/integrated/api/client.py +++ b/cinder/tests/integrated/api/client.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,8 +15,8 @@ import httplib import urlparse -from cinder.openstack.common import log as logging from cinder.openstack.common import jsonutils +from cinder.openstack.common import log as logging LOG = logging.getLogger(__name__) @@ -31,11 +29,9 @@ def __init__(self, message=None, response=None): message = 'Unspecified error' if response: - _status = response.status - _body = response.read() - message = _('%(message)s\nStatus Code: %(_status)s\n' - 'Body: %(_body)s') % locals() + 'Body: %(_body)s') % {'_status': response.status, + '_body': response.read()} super(OpenStackApiException, self).__init__(message) @@ -53,7 +49,7 @@ def __init__(self, response=None, message=None): if not message: message = _("Authorization error") super(OpenStackApiAuthorizationException, self).__init__(message, - response) + response) class OpenStackApiNotFoundException(OpenStackApiException): @@ -101,7 +97,8 @@ def request(self, url, method='GET', body=None, headers=None): relative_url = parsed_url.path if parsed_url.query: relative_url = relative_url + "?" + parsed_url.query - LOG.info(_("Doing %(method)s on %(relative_url)s") % locals()) + LOG.info(_("Doing %(method)s on %(relative_url)s"), + {'method': method, 'relative_url': relative_url}) if body: LOG.info(_("Body: %s") % body) @@ -121,7 +118,8 @@ def _authenticate(self): headers=headers) http_status = response.status - LOG.debug(_("%(auth_uri)s => code %(http_status)s") % locals()) + LOG.debug(_("%(auth_uri)s => code %(http_status)s"), + {'auth_uri': auth_uri, 'http_status': http_status}) if http_status == 401: raise OpenStackApiAuthenticationException(response=response) @@ -147,18 +145,19 @@ def api_request(self, relative_uri, check_response_status=None, **kwargs): response = self.request(full_uri, **kwargs) http_status = response.status - LOG.debug(_("%(relative_uri)s => code %(http_status)s") % locals()) + LOG.debug(_("%(relative_uri)s => code %(http_status)s"), + {'relative_uri': relative_uri, 'http_status': http_status}) if check_response_status: - if not http_status in check_response_status: + if http_status not in check_response_status: if http_status == 404: raise OpenStackApiNotFoundException(response=response) elif http_status == 401: raise OpenStackApiAuthorizationException(response=response) else: raise OpenStackApiException( - message=_("Unexpected status code"), - response=response) + message=_("Unexpected status code"), + response=response) return response diff --git a/cinder/tests/integrated/integrated_helpers.py b/cinder/tests/integrated/integrated_helpers.py index 72f4e690d4..ef9bb1b58a 100644 --- a/cinder/tests/integrated/integrated_helpers.py +++ b/cinder/tests/integrated/integrated_helpers.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # @@ -21,12 +19,12 @@ import random import string +import uuid from cinder.openstack.common import log as logging from cinder import service from cinder import test # For the flags from cinder.tests.integrated.api import client -from cinder import utils LOG = logging.getLogger(__name__) @@ -51,7 +49,7 @@ def generate_new_element(items, prefix, numeric=False): candidate = prefix + generate_random_numeric(8) else: candidate = prefix + generate_random_alphanumeric(8) - if not candidate in items: + if candidate not in items: return candidate LOG.debug("Random collision on %s" % candidate) @@ -94,6 +92,10 @@ def _get_flags(self): # Auto-assign ports to allow concurrent tests f['osapi_volume_listen_port'] = 0 + # Use simple scheduler to avoid complications - we test schedulers + # separately + f['scheduler_driver'] = 'cinder.scheduler.simple.SimpleScheduler' + return f def get_unused_server_name(self): @@ -102,7 +104,7 @@ def get_unused_server_name(self): return generate_new_element(server_names, 'server') def get_invalid_image(self): - return str(utils.gen_uuid()) + return str(uuid.uuid4()) def _build_minimal_create_server_request(self): server = {} diff --git a/cinder/tests/integrated/test_extensions.py b/cinder/tests/integrated/test_extensions.py index 9c58081063..de4cdcb301 100644 --- a/cinder/tests/integrated/test_extensions.py +++ b/cinder/tests/integrated/test_extensions.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # @@ -15,22 +13,24 @@ # License for the specific language governing permissions and limitations # under the License. -from cinder import flags + +from oslo.config import cfg + from cinder.openstack.common import log as logging from cinder.tests.integrated import integrated_helpers -FLAGS = flags.FLAGS +CONF = cfg.CONF + LOG = logging.getLogger(__name__) class ExtensionsTest(integrated_helpers._IntegratedTestBase): def _get_flags(self): f = super(ExtensionsTest, self)._get_flags() - f['osapi_volume_extension'] = FLAGS.osapi_volume_extension[:] + f['osapi_volume_extension'] = CONF.osapi_volume_extension[:] f['osapi_volume_extension'].append( - 'cinder.tests.api.openstack.volume.extensions.' - 'foxinsocks.Foxinsocks') + 'cinder.tests.api.extensions.foxinsocks.Foxinsocks') return f def test_get_foxnsocks(self): diff --git a/cinder/tests/integrated/test_login.py b/cinder/tests/integrated/test_login.py index 9c1515de82..ef4e193337 100644 --- a/cinder/tests/integrated/test_login.py +++ b/cinder/tests/integrated/test_login.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # diff --git a/cinder/tests/integrated/test_volumes.py b/cinder/tests/integrated/test_volumes.py index 0ec851113d..9bdc872dbf 100644 --- a/cinder/tests/integrated/test_volumes.py +++ b/cinder/tests/integrated/test_volumes.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # @@ -15,14 +13,14 @@ # License for the specific language governing permissions and limitations # under the License. +import testtools import time -import unittest -from cinder import service from cinder.openstack.common import log as logging -from cinder.tests.integrated import integrated_helpers +from cinder import service +from cinder.tests import fake_driver from cinder.tests.integrated.api import client -from cinder.volume import driver +from cinder.tests.integrated import integrated_helpers LOG = logging.getLogger(__name__) @@ -31,7 +29,7 @@ class VolumesTest(integrated_helpers._IntegratedTestBase): def setUp(self): super(VolumesTest, self).setUp() - driver.LoggingVolumeDriver.clear_logs() + fake_driver.LoggingVolumeDriver.clear_logs() def _start_api_service(self): self.osapi = service.WSGIService("osapi_volume") @@ -41,8 +39,7 @@ def _start_api_service(self): def _get_flags(self): f = super(VolumesTest, self)._get_flags() - f['use_local_volumes'] = False # Avoids calling local_path - f['volume_driver'] = 'cinder.volume.driver.LoggingVolumeDriver' + f['volume_driver'] = 'cinder.tests.fake_driver.LoggingVolumeDriver' return f def test_get_volumes_summary(self): @@ -72,7 +69,7 @@ def _poll_while(self, volume_id, continue_states, max_retries=5): self.assertEqual(volume_id, found_volume['id']) - if not found_volume['status'] in continue_states: + if found_volume['status'] not in continue_states: break time.sleep(1) @@ -81,6 +78,7 @@ def _poll_while(self, volume_id, continue_states, max_retries=5): break return found_volume + @testtools.skip('This test is failing: bug 1173266') def test_create_and_delete_volume(self): """Creates and deletes a volume.""" @@ -97,7 +95,7 @@ def test_create_and_delete_volume(self): # It should also be in the all-volume list volumes = self.api.get_volumes() volume_names = [volume['id'] for volume in volumes] - self.assertTrue(created_volume_id in volume_names) + self.assertIn(created_volume_id, volume_names) # Wait (briefly) for creation. Delay is due to the 'message queue' found_volume = self._poll_while(created_volume_id, ['creating']) @@ -114,33 +112,33 @@ def test_create_and_delete_volume(self): # Should be gone self.assertFalse(found_volume) - LOG.debug("Logs: %s" % driver.LoggingVolumeDriver.all_logs()) + LOG.debug("Logs: %s" % fake_driver.LoggingVolumeDriver.all_logs()) - create_actions = driver.LoggingVolumeDriver.logs_like( - 'create_volume', - id=created_volume_id) + create_actions = fake_driver.LoggingVolumeDriver.logs_like( + 'create_volume', + id=created_volume_id) LOG.debug("Create_Actions: %s" % create_actions) - self.assertEquals(1, len(create_actions)) + self.assertEqual(1, len(create_actions)) create_action = create_actions[0] - self.assertEquals(create_action['id'], created_volume_id) - self.assertEquals(create_action['availability_zone'], 'nova') - self.assertEquals(create_action['size'], 1) - - export_actions = driver.LoggingVolumeDriver.logs_like( - 'create_export', - id=created_volume_id) - self.assertEquals(1, len(export_actions)) + self.assertEqual(create_action['id'], created_volume_id) + self.assertEqual(create_action['availability_zone'], 'nova') + self.assertEqual(create_action['size'], 1) + + export_actions = fake_driver.LoggingVolumeDriver.logs_like( + 'create_export', + id=created_volume_id) + self.assertEqual(1, len(export_actions)) export_action = export_actions[0] - self.assertEquals(export_action['id'], created_volume_id) - self.assertEquals(export_action['availability_zone'], 'nova') + self.assertEqual(export_action['id'], created_volume_id) + self.assertEqual(export_action['availability_zone'], 'nova') - delete_actions = driver.LoggingVolumeDriver.logs_like( - 'delete_volume', - id=created_volume_id) - self.assertEquals(1, len(delete_actions)) + delete_actions = fake_driver.LoggingVolumeDriver.logs_like( + 'delete_volume', + id=created_volume_id) + self.assertEqual(1, len(delete_actions)) delete_action = export_actions[0] - self.assertEquals(delete_action['id'], created_volume_id) + self.assertEqual(delete_action['id'], created_volume_id) def test_create_volume_with_metadata(self): """Creates a volume with metadata.""" @@ -164,7 +162,7 @@ def test_create_volume_in_availability_zone(self): """Creates a volume in availability_zone.""" # Create volume - availability_zone = 'zone1:host1' + availability_zone = 'nova' created_volume = self.api.post_volume( {'volume': {'size': 1, 'availability_zone': availability_zone}}) @@ -193,6 +191,3 @@ def test_create_and_update_volume(self): found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual(found_volume['display_name'], 'vol-one') - -if __name__ == "__main__": - unittest.main() diff --git a/cinder/tests/integrated/test_xml.py b/cinder/tests/integrated/test_xml.py index 52da4c96cc..68c89dbc26 100644 --- a/cinder/tests/integrated/test_xml.py +++ b/cinder/tests/integrated/test_xml.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # @@ -17,9 +15,9 @@ from lxml import etree +from cinder.api import common from cinder.openstack.common import log as logging from cinder.tests.integrated import integrated_helpers -from cinder.api.openstack import common LOG = logging.getLogger(__name__) diff --git a/cinder/tests/keymgr/__init__.py b/cinder/tests/keymgr/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/keymgr/fake.py b/cinder/tests/keymgr/fake.py new file mode 100644 index 0000000000..c169e0a4e0 --- /dev/null +++ b/cinder/tests/keymgr/fake.py @@ -0,0 +1,24 @@ +# Copyright 2011 Justin Santa Barbara +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of a fake key manager.""" + + +from cinder.tests.keymgr import mock_key_mgr + + +def fake_api(): + return mock_key_mgr.MockKeyManager() diff --git a/cinder/tests/keymgr/mock_key_mgr.py b/cinder/tests/keymgr/mock_key_mgr.py new file mode 100644 index 0000000000..2b49c8b465 --- /dev/null +++ b/cinder/tests/keymgr/mock_key_mgr.py @@ -0,0 +1,125 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A mock implementation of a key manager that stores keys in a dictionary. + +This key manager implementation is primarily intended for testing. In +particular, it does not store keys persistently. Lack of a centralized key +store also makes this implementation unsuitable for use among different +services. + +Note: Instantiating this class multiple times will create separate key stores. +Keys created in one instance will not be accessible from other instances of +this class. +""" + +import array +import uuid + +from cinder import exception +from cinder.keymgr import key +from cinder.keymgr import key_mgr +from cinder import utils + + +class MockKeyManager(key_mgr.KeyManager): + + """Mocking manager for integration tests. + + This mock key manager implementation supports all the methods specified + by the key manager interface. This implementation stores keys within a + dictionary, and as a result, it is not acceptable for use across different + services. Side effects (e.g., raising exceptions) for each method are + handled as specified by the key manager interface. + + This key manager is not suitable for use in production deployments. + """ + + def __init__(self): + self.keys = {} + + def _generate_hex_key(self, **kwargs): + key_length = kwargs.get('key_length', 256) + # hex digit => 4 bits + hex_encoded = utils.generate_password(length=key_length / 4, + symbolgroups='0123456789ABCDEF') + return hex_encoded + + def _generate_key(self, **kwargs): + _hex = self._generate_hex_key(**kwargs) + return key.SymmetricKey('AES', + array.array('B', _hex.decode('hex')).tolist()) + + def create_key(self, ctxt, **kwargs): + """Creates a key. + + This implementation returns a UUID for the created key. A + NotAuthorized exception is raised if the specified context is None. + """ + if ctxt is None: + raise exception.NotAuthorized() + + key = self._generate_key(**kwargs) + return self.store_key(ctxt, key) + + def _generate_key_id(self): + key_id = str(uuid.uuid4()) + while key_id in self.keys: + key_id = str(uuid.uuid4()) + + return key_id + + def store_key(self, ctxt, key, **kwargs): + """Stores (i.e., registers) a key with the key manager.""" + if ctxt is None: + raise exception.NotAuthorized() + + key_id = self._generate_key_id() + self.keys[key_id] = key + + return key_id + + def copy_key(self, ctxt, key_id, **kwargs): + if ctxt is None: + raise exception.NotAuthorized() + + copied_key_id = self._generate_key_id() + self.keys[copied_key_id] = self.keys[key_id] + + return copied_key_id + + def get_key(self, ctxt, key_id, **kwargs): + """Retrieves the key identified by the specified id. + + This implementation returns the key that is associated with the + specified UUID. A NotAuthorized exception is raised if the specified + context is None; a KeyError is raised if the UUID is invalid. + """ + if ctxt is None: + raise exception.NotAuthorized() + + return self.keys[key_id] + + def delete_key(self, ctxt, key_id, **kwargs): + """Deletes the key identified by the specified id. + + A NotAuthorized exception is raised if the context is None and a + KeyError is raised if the UUID is invalid. + """ + if ctxt is None: + raise exception.NotAuthorized() + + del self.keys[key_id] diff --git a/cinder/tests/keymgr/test_conf_key_mgr.py b/cinder/tests/keymgr/test_conf_key_mgr.py new file mode 100644 index 0000000000..d4ba38c47e --- /dev/null +++ b/cinder/tests/keymgr/test_conf_key_mgr.py @@ -0,0 +1,123 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test cases for the conf key manager. +""" + +import array + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder.keymgr import conf_key_mgr +from cinder.keymgr import key +from cinder.tests.keymgr import test_key_mgr + + +CONF = cfg.CONF +CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='keymgr') + + +class ConfKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): + def __init__(self, *args, **kwargs): + super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs) + + self._hex_key = '1' * 64 + + def _create_key_manager(self): + CONF.set_default('fixed_key', default=self._hex_key, group='keymgr') + return conf_key_mgr.ConfKeyManager() + + def setUp(self): + super(ConfKeyManagerTestCase, self).setUp() + + self.ctxt = context.RequestContext('fake', 'fake') + + self.key_id = '00000000-0000-0000-0000-000000000000' + encoded = array.array('B', self._hex_key.decode('hex')).tolist() + self.key = key.SymmetricKey('AES', encoded) + + def test___init__(self): + self.assertEqual(self.key_id, self.key_mgr.key_id) + + def test_create_key(self): + key_id_1 = self.key_mgr.create_key(self.ctxt) + key_id_2 = self.key_mgr.create_key(self.ctxt) + # ensure that the UUIDs are the same + self.assertEqual(key_id_1, key_id_2) + + def test_create_null_context(self): + self.assertRaises(exception.NotAuthorized, + self.key_mgr.create_key, None) + + def test_store_key(self): + key_id = self.key_mgr.store_key(self.ctxt, self.key) + + actual_key = self.key_mgr.get_key(self.ctxt, key_id) + self.assertEqual(self.key, actual_key) + + def test_store_null_context(self): + self.assertRaises(exception.NotAuthorized, + self.key_mgr.store_key, None, self.key) + + def test_store_key_invalid(self): + encoded = self.key.get_encoded() + inverse_key = key.SymmetricKey('AES', [~b for b in encoded]) + + self.assertRaises(exception.KeyManagerError, + self.key_mgr.store_key, self.ctxt, inverse_key) + + def test_copy_key(self): + key_id = self.key_mgr.create_key(self.ctxt) + key = self.key_mgr.get_key(self.ctxt, key_id) + + copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id) + copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id) + + self.assertEqual(key_id, copied_key_id) + self.assertEqual(key, copied_key) + + def test_copy_null_context(self): + self.assertRaises(exception.NotAuthorized, + self.key_mgr.copy_key, None, None) + + def test_delete_key(self): + key_id = self.key_mgr.create_key(self.ctxt) + self.key_mgr.delete_key(self.ctxt, key_id) + + # cannot delete key -- might have lingering references + self.assertEqual(self.key, + self.key_mgr.get_key(self.ctxt, self.key_id)) + + def test_delete_null_context(self): + self.assertRaises(exception.NotAuthorized, + self.key_mgr.delete_key, None, None) + + def test_delete_unknown_key(self): + self.assertRaises(exception.KeyManagerError, + self.key_mgr.delete_key, self.ctxt, None) + + def test_get_key(self): + self.assertEqual(self.key, + self.key_mgr.get_key(self.ctxt, self.key_id)) + + def test_get_null_context(self): + self.assertRaises(exception.NotAuthorized, + self.key_mgr.get_key, None, None) + + def test_get_unknown_key(self): + self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None) diff --git a/cinder/tests/keymgr/test_key.py b/cinder/tests/keymgr/test_key.py new file mode 100644 index 0000000000..d37688ef29 --- /dev/null +++ b/cinder/tests/keymgr/test_key.py @@ -0,0 +1,67 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test cases for the key classes. +""" + +import array + +from cinder.keymgr import key +from cinder import test + + +class KeyTestCase(test.TestCase): + + def _create_key(self): + raise NotImplementedError() + + def setUp(self): + super(KeyTestCase, self).setUp() + + self.key = self._create_key() + + +class SymmetricKeyTestCase(KeyTestCase): + + def _create_key(self): + return key.SymmetricKey(self.algorithm, self.encoded) + + def setUp(self): + self.algorithm = 'AES' + self.encoded = array.array('B', ('0' * 64).decode('hex')).tolist() + + super(SymmetricKeyTestCase, self).setUp() + + def test_get_algorithm(self): + self.assertEqual(self.key.get_algorithm(), self.algorithm) + + def test_get_format(self): + self.assertEqual(self.key.get_format(), 'RAW') + + def test_get_encoded(self): + self.assertEqual(self.key.get_encoded(), self.encoded) + + def test___eq__(self): + self.assertTrue(self.key == self.key) + + self.assertFalse(self.key == None) + self.assertFalse(None == self.key) + + def test___ne__(self): + self.assertFalse(self.key != self.key) + + self.assertTrue(self.key != None) + self.assertTrue(None != self.key) diff --git a/cinder/tests/keymgr/test_key_mgr.py b/cinder/tests/keymgr/test_key_mgr.py new file mode 100644 index 0000000000..0c37ecd60d --- /dev/null +++ b/cinder/tests/keymgr/test_key_mgr.py @@ -0,0 +1,33 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test cases for the key manager. +""" + +from cinder import test + + +class KeyManagerTestCase(test.TestCase): + def __init__(self, *args, **kwargs): + super(KeyManagerTestCase, self).__init__(*args, **kwargs) + + def _create_key_manager(self): + raise NotImplementedError() + + def setUp(self): + super(KeyManagerTestCase, self).setUp() + + self.key_mgr = self._create_key_manager() diff --git a/cinder/tests/keymgr/test_mock_key_mgr.py b/cinder/tests/keymgr/test_mock_key_mgr.py new file mode 100644 index 0000000000..d73b8ba2a3 --- /dev/null +++ b/cinder/tests/keymgr/test_mock_key_mgr.py @@ -0,0 +1,102 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test cases for the mock key manager. +""" + +import array + +from cinder import context +from cinder import exception +from cinder.keymgr import key as keymgr_key +from cinder.tests.keymgr import mock_key_mgr +from cinder.tests.keymgr import test_key_mgr + + +class MockKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): + + def _create_key_manager(self): + return mock_key_mgr.MockKeyManager() + + def setUp(self): + super(MockKeyManagerTestCase, self).setUp() + + self.ctxt = context.RequestContext('fake', 'fake') + + def test_create_key(self): + key_id_1 = self.key_mgr.create_key(self.ctxt) + key_id_2 = self.key_mgr.create_key(self.ctxt) + # ensure that the UUIDs are unique + self.assertNotEqual(key_id_1, key_id_2) + + def test_create_key_with_length(self): + for length in [64, 128, 256]: + key_id = self.key_mgr.create_key(self.ctxt, key_length=length) + key = self.key_mgr.get_key(self.ctxt, key_id) + self.assertEqual(length / 8, len(key.get_encoded())) + + def test_create_null_context(self): + self.assertRaises(exception.NotAuthorized, + self.key_mgr.create_key, None) + + def test_store_key(self): + secret_key = array.array('B', ('0' * 64).decode('hex')).tolist() + _key = keymgr_key.SymmetricKey('AES', secret_key) + key_id = self.key_mgr.store_key(self.ctxt, _key) + + actual_key = self.key_mgr.get_key(self.ctxt, key_id) + self.assertEqual(_key, actual_key) + + def test_store_null_context(self): + self.assertRaises(exception.NotAuthorized, + self.key_mgr.store_key, None, None) + + def test_copy_key(self): + key_id = self.key_mgr.create_key(self.ctxt) + key = self.key_mgr.get_key(self.ctxt, key_id) + + copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id) + copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id) + + self.assertNotEqual(key_id, copied_key_id) + self.assertEqual(key, copied_key) + + def test_copy_null_context(self): + self.assertRaises(exception.NotAuthorized, + self.key_mgr.copy_key, None, None) + + def test_get_key(self): + pass + + def test_get_null_context(self): + self.assertRaises(exception.NotAuthorized, + self.key_mgr.get_key, None, None) + + def test_get_unknown_key(self): + self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None) + + def test_delete_key(self): + key_id = self.key_mgr.create_key(self.ctxt) + self.key_mgr.delete_key(self.ctxt, key_id) + + self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, key_id) + + def test_delete_null_context(self): + self.assertRaises(exception.NotAuthorized, + self.key_mgr.delete_key, None, None) + + def test_delete_unknown_key(self): + self.assertRaises(KeyError, self.key_mgr.delete_key, self.ctxt, None) diff --git a/cinder/tests/keymgr/test_not_implemented_key_mgr.py b/cinder/tests/keymgr/test_not_implemented_key_mgr.py new file mode 100644 index 0000000000..1849396470 --- /dev/null +++ b/cinder/tests/keymgr/test_not_implemented_key_mgr.py @@ -0,0 +1,50 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test cases for the not implemented key manager. +""" + +from cinder.keymgr import not_implemented_key_mgr +from cinder.tests.keymgr import test_key_mgr + + +class NotImplementedKeyManagerTestCase(test_key_mgr.KeyManagerTestCase): + + def _create_key_manager(self): + return not_implemented_key_mgr.NotImplementedKeyManager() + + def setUp(self): + super(NotImplementedKeyManagerTestCase, self).setUp() + + def test_create_key(self): + self.assertRaises(NotImplementedError, + self.key_mgr.create_key, None) + + def test_store_key(self): + self.assertRaises(NotImplementedError, + self.key_mgr.store_key, None, None) + + def test_copy_key(self): + self.assertRaises(NotImplementedError, + self.key_mgr.copy_key, None, None) + + def test_get_key(self): + self.assertRaises(NotImplementedError, + self.key_mgr.get_key, None, None) + + def test_delete_key(self): + self.assertRaises(NotImplementedError, + self.key_mgr.delete_key, None, None) diff --git a/cinder/tests/monkey_patch_example/__init__.py b/cinder/tests/monkey_patch_example/__init__.py index 25cf9ccfe6..d367a48494 100644 --- a/cinder/tests/monkey_patch_example/__init__.py +++ b/cinder/tests/monkey_patch_example/__init__.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -21,7 +19,7 @@ def example_decorator(name, function): - """ decorator for notify which is used from utils.monkey_patch() + """decorator for notify which is used from utils.monkey_patch(). :param name: name of the function :param function: - object of the function diff --git a/cinder/tests/monkey_patch_example/example_a.py b/cinder/tests/monkey_patch_example/example_a.py index 21e79bcb0f..3fdb4dcc05 100644 --- a/cinder/tests/monkey_patch_example/example_a.py +++ b/cinder/tests/monkey_patch_example/example_a.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/monkey_patch_example/example_b.py b/cinder/tests/monkey_patch_example/example_b.py index 9d8f6d339e..2515fd2be4 100644 --- a/cinder/tests/monkey_patch_example/example_b.py +++ b/cinder/tests/monkey_patch_example/example_b.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/policy.json b/cinder/tests/policy.json index 80a1913cf7..78ee5f1aef 100644 --- a/cinder/tests/policy.json +++ b/cinder/tests/policy.json @@ -1,15 +1,19 @@ { "context_is_admin": [["role:admin"]], "admin_api": [["is_admin:True"]], + "admin_or_owner": [["is_admin:True"], ["project_id:%(project_id)s"]], "volume:create": [], "volume:get": [], "volume:get_all": [], "volume:get_volume_metadata": [], - "volume:delete": [], - "volume:update": [], "volume:delete_volume_metadata": [], "volume:update_volume_metadata": [], + "volume:get_volume_admin_metadata": [["rule:admin_api"]], + "volume:delete_volume_admin_metadata": [["rule:admin_api"]], + "volume:update_volume_admin_metadata": [["rule:admin_api"]], + "volume:delete": [], + "volume:update": [], "volume:attach": [], "volume:detach": [], "volume:reserve_volume": [], @@ -25,13 +29,47 @@ "volume:get_snapshot": [], "volume:get_all_snapshots": [], "volume:update_snapshot": [], + "volume:extend": [], + "volume:migrate_volume": [["rule:admin_api"]], + "volume:migrate_volume_completion": [["rule:admin_api"]], + "volume:update_readonly_flag": [], + "volume:retype": [], "volume_extension:volume_admin_actions:reset_status": [["rule:admin_api"]], "volume_extension:snapshot_admin_actions:reset_status": [["rule:admin_api"]], "volume_extension:volume_admin_actions:force_delete": [["rule:admin_api"]], "volume_extension:snapshot_admin_actions:force_delete": [["rule:admin_api"]], + "volume_extension:volume_admin_actions:force_detach": [["rule:admin_api"]], + "volume_extension:volume_admin_actions:migrate_volume": [["rule:admin_api"]], + "volume_extension:volume_admin_actions:migrate_volume_completion": [["rule:admin_api"]], "volume_extension:volume_actions:upload_image": [], "volume_extension:types_manage": [], "volume_extension:types_extra_specs": [], - "volume_extension:extended_snapshot_attributes": [] + "volume_extension:volume_type_encryption": [["rule:admin_api"]], + "volume_extension:volume_encryption_metadata": [["rule:admin_or_owner"]], + "volume_extension:qos_specs_manage": [], + "volume_extension:extended_snapshot_attributes": [], + "volume_extension:volume_image_metadata": [], + "volume_extension:volume_host_attribute": [["rule:admin_api"]], + "volume_extension:volume_tenant_attribute": [["rule:admin_api"]], + "volume_extension:volume_mig_status_attribute": [["rule:admin_api"]], + "volume_extension:hosts": [["rule:admin_api"]], + "volume_extension:quotas:show": [], + "volume_extension:quotas:update": [], + "volume_extension:quota_classes": [], + + "limits_extension:used_limits": [], + + "snapshot_extension:snapshot_actions:update_snapshot_status": [], + + "volume:create_transfer": [], + "volume:accept_transfer": [], + "volume:delete_transfer": [], + "volume:get_all_transfers": [], + + "backup:create" : [], + "backup:delete": [], + "backup:get": [], + "backup:get_all": [], + "backup:restore": [] } diff --git a/cinder/tests/runtime_flags.py b/cinder/tests/runtime_conf.py similarity index 77% rename from cinder/tests/runtime_flags.py rename to cinder/tests/runtime_conf.py index 4327561fc5..db1a6c5b71 100644 --- a/cinder/tests/runtime_flags.py +++ b/cinder/tests/runtime_conf.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -16,8 +14,9 @@ # License for the specific language governing permissions and limitations # under the License. -from cinder import flags -from cinder.openstack.common import cfg -FLAGS = flags.FLAGS -FLAGS.register_opt(cfg.IntOpt('runtime_answer', default=54, help='test flag')) +from oslo.config import cfg + + +CONF = cfg.CONF +CONF.register_opt(cfg.IntOpt('runtime_answer', default=54, help='test conf')) diff --git a/cinder/tests/scheduler/__init__.py b/cinder/tests/scheduler/__init__.py index 3be5ce944c..a2c6e2e27d 100644 --- a/cinder/tests/scheduler/__init__.py +++ b/cinder/tests/scheduler/__init__.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/cinder/tests/scheduler/fakes.py b/cinder/tests/scheduler/fakes.py index 97a4f8cd85..ed6437d0bc 100644 --- a/cinder/tests/scheduler/fakes.py +++ b/cinder/tests/scheduler/fakes.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,45 +16,68 @@ Fakes For Scheduler tests. """ - +from cinder.openstack.common import timeutils +from cinder.scheduler import filter_scheduler from cinder.scheduler import host_manager -class FakeHostManager(host_manager.HostManager): - """host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0 - host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536 - host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072 - host4: free_ram_mb=8192 free_disk_gb=8192""" +class FakeFilterScheduler(filter_scheduler.FilterScheduler): + def __init__(self, *args, **kwargs): + super(FakeFilterScheduler, self).__init__(*args, **kwargs) + self.host_manager = host_manager.HostManager() + +class FakeHostManager(host_manager.HostManager): def __init__(self): super(FakeHostManager, self).__init__() self.service_states = { - 'host1': { - 'compute': {'host_memory_free': 1073741824}, - }, - 'host2': { - 'compute': {'host_memory_free': 2147483648}, - }, - 'host3': { - 'compute': {'host_memory_free': 3221225472}, - }, - 'host4': { - 'compute': {'host_memory_free': 999999999}, - }, + 'host1': {'total_capacity_gb': 1024, + 'free_capacity_gb': 1024, + 'allocated_capacity_gb': 0, + 'reserved_percentage': 10, + 'volume_backend_name': 'lvm1', + 'timestamp': None}, + 'host2': {'total_capacity_gb': 2048, + 'free_capacity_gb': 300, + 'allocated_capacity_gb': 1748, + 'reserved_percentage': 10, + 'volume_backend_name': 'lvm2', + 'timestamp': None}, + 'host3': {'total_capacity_gb': 512, + 'free_capacity_gb': 256, + 'allocated_capacity_gb': 256, + 'reserved_percentage': 0, + 'volume_backend_name': 'lvm3', + 'timestamp': None}, + 'host4': {'total_capacity_gb': 2048, + 'free_capacity_gb': 200, + 'allocated_capacity_gb': 1848, + 'reserved_percentage': 5, + 'volume_backend_name': 'lvm4', + 'timestamp': None}, } - def get_host_list_from_db(self, context): - return [ - ('host1', dict(free_disk_gb=1024, free_ram_mb=1024)), - ('host2', dict(free_disk_gb=2048, free_ram_mb=2048)), - ('host3', dict(free_disk_gb=4096, free_ram_mb=4096)), - ('host4', dict(free_disk_gb=8192, free_ram_mb=8192)), - ] - class FakeHostState(host_manager.HostState): - def __init__(self, host, topic, attribute_dict): - super(FakeHostState, self).__init__(host, topic) + def __init__(self, host, attribute_dict): + super(FakeHostState, self).__init__(host) for (key, val) in attribute_dict.iteritems(): setattr(self, key, val) + + +def mock_host_manager_db_calls(mock_obj): + services = [ + dict(id=1, host='host1', topic='volume', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()), + dict(id=2, host='host2', topic='volume', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()), + dict(id=3, host='host3', topic='volume', disabled=False, + availability_zone='zone2', updated_at=timeutils.utcnow()), + dict(id=4, host='host4', topic='volume', disabled=False, + availability_zone='zone3', updated_at=timeutils.utcnow()), + # service on host5 is disabled + dict(id=5, host='host5', topic='volume', disabled=True, + availability_zone='zone4', updated_at=timeutils.utcnow()), + ] + mock_obj.return_value = services diff --git a/cinder/tests/scheduler/test_allocated_capacity_weigher.py b/cinder/tests/scheduler/test_allocated_capacity_weigher.py new file mode 100644 index 0000000000..8a02ac92f2 --- /dev/null +++ b/cinder/tests/scheduler/test_allocated_capacity_weigher.py @@ -0,0 +1,92 @@ +# Copyright 2013 eBay Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Allocated Capacity Weigher. +""" + +import mock +from oslo.config import cfg + +from cinder import context +from cinder.openstack.common.scheduler.weights import HostWeightHandler +from cinder.scheduler.weights.capacity import AllocatedCapacityWeigher as ACW +from cinder import test +from cinder.tests.scheduler import fakes + +CONF = cfg.CONF + + +class AllocatedCapacityWeigherTestCase(test.TestCase): + def setUp(self): + super(AllocatedCapacityWeigherTestCase, self).setUp() + self.host_manager = fakes.FakeHostManager() + self.weight_handler = HostWeightHandler('cinder.scheduler.weights') + + def _get_weighed_host(self, hosts, weight_properties=None): + if weight_properties is None: + weight_properties = {} + return self.weight_handler.get_weighed_objects([ACW], hosts, + weight_properties)[0] + + @mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic') + def _get_all_hosts(self, _mock_service_get_all_by_topic): + ctxt = context.get_admin_context() + fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) + host_states = self.host_manager.get_all_host_states(ctxt) + _mock_service_get_all_by_topic.assert_called_once_with( + ctxt, CONF.volume_topic) + return host_states + + def test_default_of_spreading_first(self): + hostinfo_list = self._get_all_hosts() + + # host1: allocated_capacity_gb=0, weight=0 + # host2: allocated_capacity_gb=1748, weight=-1748 + # host3: allocated_capacity_gb=256, weight=-256 + # host4: allocated_capacity_gb=1848, weight=-1848 + + # so, host1 should win: + weighed_host = self._get_weighed_host(hostinfo_list) + self.assertEqual(weighed_host.weight, 0) + self.assertEqual(weighed_host.obj.host, 'host1') + + def test_capacity_weight_multiplier1(self): + self.flags(allocated_capacity_weight_multiplier=1.0) + hostinfo_list = self._get_all_hosts() + + # host1: allocated_capacity_gb=0, weight=0 + # host2: allocated_capacity_gb=1748, weight=1748 + # host3: allocated_capacity_gb=256, weight=256 + # host4: allocated_capacity_gb=1848, weight=1848 + + # so, host4 should win: + weighed_host = self._get_weighed_host(hostinfo_list) + self.assertEqual(weighed_host.weight, 1848.0) + self.assertEqual(weighed_host.obj.host, 'host4') + + def test_capacity_weight_multiplier2(self): + self.flags(allocated_capacity_weight_multiplier=-2.0) + hostinfo_list = self._get_all_hosts() + + # host1: allocated_capacity_gb=0, weight=0 + # host2: allocated_capacity_gb=1748, weight=-3496 + # host3: allocated_capacity_gb=256, weight=-512 + # host4: allocated_capacity_gb=1848, weight=-3696 + + # so, host1 should win: + weighed_host = self._get_weighed_host(hostinfo_list) + self.assertEqual(weighed_host.weight, 0) + self.assertEqual(weighed_host.obj.host, 'host1') diff --git a/cinder/tests/scheduler/test_capacity_weigher.py b/cinder/tests/scheduler/test_capacity_weigher.py new file mode 100644 index 0000000000..680dac1179 --- /dev/null +++ b/cinder/tests/scheduler/test_capacity_weigher.py @@ -0,0 +1,93 @@ +# Copyright 2011-2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Capacity Weigher. +""" + +import mock + +from oslo.config import cfg + +from cinder import context +from cinder.openstack.common.scheduler.weights import HostWeightHandler +from cinder.scheduler.weights.capacity import CapacityWeigher +from cinder import test +from cinder.tests.scheduler import fakes + +CONF = cfg.CONF + + +class CapacityWeigherTestCase(test.TestCase): + def setUp(self): + super(CapacityWeigherTestCase, self).setUp() + self.host_manager = fakes.FakeHostManager() + self.weight_handler = HostWeightHandler('cinder.scheduler.weights') + + def _get_weighed_host(self, hosts, weight_properties=None): + if weight_properties is None: + weight_properties = {} + return self.weight_handler.get_weighed_objects([CapacityWeigher], + hosts, + weight_properties)[0] + + @mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic') + def _get_all_hosts(self, _mock_service_get_all_by_topic): + ctxt = context.get_admin_context() + fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) + host_states = self.host_manager.get_all_host_states(ctxt) + _mock_service_get_all_by_topic.assert_called_once_with( + ctxt, CONF.volume_topic) + return host_states + + def test_default_of_spreading_first(self): + hostinfo_list = self._get_all_hosts() + + # host1: free_capacity_gb=1024, free=1024*(1-0.1) + # host2: free_capacity_gb=300, free=300*(1-0.1) + # host3: free_capacity_gb=512, free=256 + # host4: free_capacity_gb=200, free=200*(1-0.05) + + # so, host1 should win: + weighed_host = self._get_weighed_host(hostinfo_list) + self.assertEqual(weighed_host.weight, 921.0) + self.assertEqual(weighed_host.obj.host, 'host1') + + def test_capacity_weight_multiplier1(self): + self.flags(capacity_weight_multiplier=-1.0) + hostinfo_list = self._get_all_hosts() + + # host1: free_capacity_gb=1024, free=-1024*(1-0.1) + # host2: free_capacity_gb=300, free=-300*(1-0.1) + # host3: free_capacity_gb=512, free=-256 + # host4: free_capacity_gb=200, free=-200*(1-0.05) + + # so, host4 should win: + weighed_host = self._get_weighed_host(hostinfo_list) + self.assertEqual(weighed_host.weight, -190.0) + self.assertEqual(weighed_host.obj.host, 'host4') + + def test_capacity_weight_multiplier2(self): + self.flags(capacity_weight_multiplier=2.0) + hostinfo_list = self._get_all_hosts() + + # host1: free_capacity_gb=1024, free=1024*(1-0.1)*2 + # host2: free_capacity_gb=300, free=300*(1-0.1)*2 + # host3: free_capacity_gb=512, free=256*2 + # host4: free_capacity_gb=200, free=200*(1-0.05)*2 + + # so, host1 should win: + weighed_host = self._get_weighed_host(hostinfo_list) + self.assertEqual(weighed_host.weight, 921.0 * 2) + self.assertEqual(weighed_host.obj.host, 'host1') diff --git a/cinder/tests/scheduler/test_chance_weigher.py b/cinder/tests/scheduler/test_chance_weigher.py new file mode 100644 index 0000000000..5f7f2a93d8 --- /dev/null +++ b/cinder/tests/scheduler/test_chance_weigher.py @@ -0,0 +1,67 @@ +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Chance Weigher. +""" + +import mock + +from cinder.scheduler import host_manager +from cinder.scheduler.weights.chance import ChanceWeigher +from cinder import test + + +class ChanceWeigherTestCase(test.TestCase): + def setUp(self): + super(ChanceWeigherTestCase, self).setUp() + + def fake_random(self, reset=False): + if reset: + self.not_random_float = 0.0 + else: + self.not_random_float += 1.0 + return self.not_random_float + + @mock.patch('random.random') + def test_chance_weigher(self, _mock_random): + # stub random.random() to verify the ChanceWeigher + # is using random.random() (repeated calls to weigh should + # return incrementing weights) + weigher = ChanceWeigher() + _mock_random.side_effect = self.fake_random + self.fake_random(reset=True) + host_state = {'host': 'host.example.com', 'free_capacity_gb': 99999} + weight = weigher._weigh_object(host_state, None) + self.assertEqual(1.0, weight) + weight = weigher._weigh_object(host_state, None) + self.assertEqual(2.0, weight) + weight = weigher._weigh_object(host_state, None) + self.assertEqual(3.0, weight) + + def test_host_manager_choosing_chance_weigher(self): + # ensure HostManager can load the ChanceWeigher + # via the entry points mechanism + hm = host_manager.HostManager() + weighers = hm._choose_host_weighers('ChanceWeigher') + self.assertEqual(1, len(weighers)) + self.assertEqual(weighers[0], ChanceWeigher) + + def test_use_of_chance_weigher_via_host_manager(self): + # ensure we don't lose any hosts when weighing with + # the ChanceWeigher + hm = host_manager.HostManager() + fake_hosts = [host_manager.HostState('fake_host%s' % x) + for x in xrange(1, 5)] + weighed_hosts = hm.get_weighed_hosts(fake_hosts, {}, 'ChanceWeigher') + self.assertEqual(4, len(weighed_hosts)) diff --git a/cinder/tests/scheduler/test_filter_scheduler.py b/cinder/tests/scheduler/test_filter_scheduler.py new file mode 100644 index 0000000000..532c20922e --- /dev/null +++ b/cinder/tests/scheduler/test_filter_scheduler.py @@ -0,0 +1,302 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Filter Scheduler. +""" + +import mock + +from cinder import context +from cinder import exception +from cinder.scheduler import filter_scheduler +from cinder.scheduler import host_manager +from cinder.tests.scheduler import fakes +from cinder.tests.scheduler import test_scheduler + + +class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): + """Test case for Filter Scheduler.""" + + driver_cls = filter_scheduler.FilterScheduler + + def test_create_volume_no_hosts(self): + # Ensure empty hosts/child_zones result in NoValidHosts exception. + sched = fakes.FakeFilterScheduler() + + fake_context = context.RequestContext('user', 'project') + request_spec = {'volume_properties': {'project_id': 1, + 'size': 1}, + 'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_id': ['fake-id1']} + self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, + fake_context, request_spec, {}) + + @mock.patch('cinder.scheduler.host_manager.HostManager.' + 'get_all_host_states') + def test_create_volume_non_admin(self, _mock_get_all_host_states): + # Test creating a volume locally using create_volume, passing + # a non-admin context. DB actions should work. + self.was_admin = False + + def fake_get(ctxt): + # Make sure this is called with admin context, even though + # we're using user context below. + self.was_admin = ctxt.is_admin + return {} + + sched = fakes.FakeFilterScheduler() + _mock_get_all_host_states.side_effect = fake_get + + fake_context = context.RequestContext('user', 'project') + + request_spec = {'volume_properties': {'project_id': 1, + 'size': 1}, + 'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_id': ['fake-id1']} + self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, + fake_context, request_spec, {}) + self.assertTrue(self.was_admin) + + @mock.patch('cinder.db.service_get_all_by_topic') + def test_schedule_happy_day(self, _mock_service_get_all_by_topic): + # Make sure there's nothing glaringly wrong with _schedule() + # by doing a happy day pass through. + sched = fakes.FakeFilterScheduler() + sched.host_manager = fakes.FakeHostManager() + fake_context = context.RequestContext('user', 'project', + is_admin=True) + + fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) + + request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1}} + weighed_host = sched._schedule(fake_context, request_spec, {}) + self.assertIsNotNone(weighed_host.obj) + self.assertTrue(_mock_service_get_all_by_topic.called) + + def test_max_attempts(self): + self.flags(scheduler_max_attempts=4) + + sched = fakes.FakeFilterScheduler() + self.assertEqual(4, sched._max_attempts()) + + def test_invalid_max_attempts(self): + self.flags(scheduler_max_attempts=0) + + self.assertRaises(exception.InvalidParameterValue, + fakes.FakeFilterScheduler) + + def test_retry_disabled(self): + # Retry info should not get populated when re-scheduling is off. + self.flags(scheduler_max_attempts=1) + sched = fakes.FakeFilterScheduler() + + request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1}} + filter_properties = {} + + sched._schedule(self.context, request_spec, + filter_properties=filter_properties) + + # Should not have retry info in the populated filter properties. + self.assertNotIn("retry", filter_properties) + + def test_retry_attempt_one(self): + # Test retry logic on initial scheduling attempt. + self.flags(scheduler_max_attempts=2) + sched = fakes.FakeFilterScheduler() + + request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1}} + filter_properties = {} + + sched._schedule(self.context, request_spec, + filter_properties=filter_properties) + + num_attempts = filter_properties['retry']['num_attempts'] + self.assertEqual(1, num_attempts) + + def test_retry_attempt_two(self): + # Test retry logic when re-scheduling. + self.flags(scheduler_max_attempts=2) + sched = fakes.FakeFilterScheduler() + + request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1}} + + retry = dict(num_attempts=1) + filter_properties = dict(retry=retry) + + sched._schedule(self.context, request_spec, + filter_properties=filter_properties) + + num_attempts = filter_properties['retry']['num_attempts'] + self.assertEqual(2, num_attempts) + + def test_retry_exceeded_max_attempts(self): + # Test for necessary explosion when max retries is exceeded. + self.flags(scheduler_max_attempts=2) + sched = fakes.FakeFilterScheduler() + + request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1}} + + retry = dict(num_attempts=2) + filter_properties = dict(retry=retry) + + self.assertRaises(exception.NoValidHost, sched._schedule, self.context, + request_spec, filter_properties=filter_properties) + + def test_add_retry_host(self): + retry = dict(num_attempts=1, hosts=[]) + filter_properties = dict(retry=retry) + host = "fakehost" + + sched = fakes.FakeFilterScheduler() + sched._add_retry_host(filter_properties, host) + + hosts = filter_properties['retry']['hosts'] + self.assertEqual(1, len(hosts)) + self.assertEqual(host, hosts[0]) + + def test_post_select_populate(self): + # Test addition of certain filter props after a node is selected. + retry = {'hosts': [], 'num_attempts': 1} + filter_properties = {'retry': retry} + sched = fakes.FakeFilterScheduler() + + host_state = host_manager.HostState('host') + host_state.total_capacity_gb = 1024 + sched._post_select_populate_filter_properties(filter_properties, + host_state) + + self.assertEqual('host', + filter_properties['retry']['hosts'][0]) + + self.assertEqual(1024, host_state.total_capacity_gb) + + def _host_passes_filters_setup(self, mock_obj): + sched = fakes.FakeFilterScheduler() + sched.host_manager = fakes.FakeHostManager() + fake_context = context.RequestContext('user', 'project', + is_admin=True) + + fakes.mock_host_manager_db_calls(mock_obj) + + return (sched, fake_context) + + @mock.patch('cinder.db.service_get_all_by_topic') + def test_host_passes_filters_happy_day(self, _mock_service_get_topic): + """Do a successful pass through of with host_passes_filters().""" + sched, ctx = self._host_passes_filters_setup( + _mock_service_get_topic) + request_spec = {'volume_id': 1, + 'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1}} + ret_host = sched.host_passes_filters(ctx, 'host1', request_spec, {}) + self.assertEqual(ret_host.host, 'host1') + self.assertTrue(_mock_service_get_topic.called) + + @mock.patch('cinder.db.service_get_all_by_topic') + def test_host_passes_filters_no_capacity(self, _mock_service_get_topic): + """Fail the host due to insufficient capacity.""" + sched, ctx = self._host_passes_filters_setup( + _mock_service_get_topic) + request_spec = {'volume_id': 1, + 'volume_type': {'name': 'LVM_iSCSI'}, + 'volume_properties': {'project_id': 1, + 'size': 1024}} + self.assertRaises(exception.NoValidHost, + sched.host_passes_filters, + ctx, 'host1', request_spec, {}) + self.assertTrue(_mock_service_get_topic.called) + + @mock.patch('cinder.db.service_get_all_by_topic') + def test_retype_policy_never_migrate_pass(self, _mock_service_get_topic): + # Retype should pass if current host passes filters and + # policy=never. host4 doesn't have enough space to hold an additional + # 200GB, but it is already the host of this volume and should not be + # counted twice. + sched, ctx = self._host_passes_filters_setup( + _mock_service_get_topic) + extra_specs = {'volume_backend_name': 'lvm4'} + request_spec = {'volume_id': 1, + 'volume_type': {'name': 'LVM_iSCSI', + 'extra_specs': extra_specs}, + 'volume_properties': {'project_id': 1, + 'size': 200, + 'host': 'host4'}} + host_state = sched.find_retype_host(ctx, request_spec, + filter_properties={}, + migration_policy='never') + self.assertEqual(host_state.host, 'host4') + + @mock.patch('cinder.db.service_get_all_by_topic') + def test_retype_policy_never_migrate_fail(self, _mock_service_get_topic): + # Retype should fail if current host doesn't pass filters and + # policy=never. + sched, ctx = self._host_passes_filters_setup( + _mock_service_get_topic) + extra_specs = {'volume_backend_name': 'lvm1'} + request_spec = {'volume_id': 1, + 'volume_type': {'name': 'LVM_iSCSI', + 'extra_specs': extra_specs}, + 'volume_properties': {'project_id': 1, + 'size': 200, + 'host': 'host4'}} + self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx, + request_spec, filter_properties={}, + migration_policy='never') + + @mock.patch('cinder.db.service_get_all_by_topic') + def test_retype_policy_demand_migrate_pass(self, _mock_service_get_topic): + # Retype should pass if current host fails filters but another host + # is suitable when policy=on-demand. + sched, ctx = self._host_passes_filters_setup( + _mock_service_get_topic) + extra_specs = {'volume_backend_name': 'lvm1'} + request_spec = {'volume_id': 1, + 'volume_type': {'name': 'LVM_iSCSI', + 'extra_specs': extra_specs}, + 'volume_properties': {'project_id': 1, + 'size': 200, + 'host': 'host4'}} + host_state = sched.find_retype_host(ctx, request_spec, + filter_properties={}, + migration_policy='on-demand') + self.assertEqual(host_state.host, 'host1') + + @mock.patch('cinder.db.service_get_all_by_topic') + def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic): + # Retype should fail if current host doesn't pass filters and + # no other suitable candidates exist even if policy=on-demand. + sched, ctx = self._host_passes_filters_setup( + _mock_service_get_topic) + extra_specs = {'volume_backend_name': 'lvm1'} + request_spec = {'volume_id': 1, + 'volume_type': {'name': 'LVM_iSCSI', + 'extra_specs': extra_specs}, + 'volume_properties': {'project_id': 1, + 'size': 2048, + 'host': 'host4'}} + self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx, + request_spec, filter_properties={}, + migration_policy='on-demand') diff --git a/cinder/tests/scheduler/test_host_filters.py b/cinder/tests/scheduler/test_host_filters.py new file mode 100644 index 0000000000..231943aae2 --- /dev/null +++ b/cinder/tests/scheduler/test_host_filters.py @@ -0,0 +1,92 @@ +# Copyright 2011 OpenStack Foundation # All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler Host Filters. +""" + +import mock + +from cinder import context +from cinder.openstack.common import jsonutils +from cinder.openstack.common.scheduler import filters +from cinder import test +from cinder.tests.scheduler import fakes + + +class HostFiltersTestCase(test.TestCase): + """Test case for host filters.""" + + def setUp(self): + super(HostFiltersTestCase, self).setUp() + self.context = context.RequestContext('fake', 'fake') + self.json_query = jsonutils.dumps( + ['and', + ['>=', '$free_capacity_gb', 1024], + ['>=', '$total_capacity_gb', 10 * 1024]]) + # This has a side effect of testing 'get_filter_classes' + # when specifying a method (in this case, our standard filters) + filter_handler = filters.HostFilterHandler('cinder.scheduler.filters') + classes = filter_handler.get_all_classes() + self.class_map = {} + for cls in classes: + self.class_map[cls.__name__] = cls + + @mock.patch('cinder.utils.service_is_up') + def test_capacity_filter_passes(self, _mock_serv_is_up): + _mock_serv_is_up.return_value = True + filt_cls = self.class_map['CapacityFilter']() + filter_properties = {'size': 100} + service = {'disabled': False} + host = fakes.FakeHostState('host1', + {'free_capacity_gb': 200, + 'updated_at': None, + 'service': service}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + @mock.patch('cinder.utils.service_is_up') + def test_capacity_filter_fails(self, _mock_serv_is_up): + _mock_serv_is_up.return_value = True + filt_cls = self.class_map['CapacityFilter']() + filter_properties = {'size': 100} + service = {'disabled': False} + host = fakes.FakeHostState('host1', + {'free_capacity_gb': 120, + 'reserved_percentage': 20, + 'updated_at': None, + 'service': service}) + self.assertFalse(filt_cls.host_passes(host, filter_properties)) + + @mock.patch('cinder.utils.service_is_up') + def test_capacity_filter_passes_infinite(self, _mock_serv_is_up): + _mock_serv_is_up.return_value = True + filt_cls = self.class_map['CapacityFilter']() + filter_properties = {'size': 100} + service = {'disabled': False} + host = fakes.FakeHostState('host1', + {'free_capacity_gb': 'infinite', + 'updated_at': None, + 'service': service}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + @mock.patch('cinder.utils.service_is_up') + def test_capacity_filter_passes_unknown(self, _mock_serv_is_up): + _mock_serv_is_up.return_value = True + filt_cls = self.class_map['CapacityFilter']() + filter_properties = {'size': 100} + service = {'disabled': False} + host = fakes.FakeHostState('host1', + {'free_capacity_gb': 'unknown', + 'updated_at': None, + 'service': service}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) diff --git a/cinder/tests/scheduler/test_host_manager.py b/cinder/tests/scheduler/test_host_manager.py new file mode 100644 index 0000000000..04ba8506df --- /dev/null +++ b/cinder/tests/scheduler/test_host_manager.py @@ -0,0 +1,233 @@ +# Copyright (c) 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For HostManager +""" + +import mock + +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common.scheduler import filters +from cinder.openstack.common import timeutils +from cinder.scheduler import host_manager +from cinder import test + + +CONF = cfg.CONF + + +class FakeFilterClass1(filters.BaseHostFilter): + def host_passes(self, host_state, filter_properties): + pass + + +class FakeFilterClass2(filters.BaseHostFilter): + def host_passes(self, host_state, filter_properties): + pass + + +class HostManagerTestCase(test.TestCase): + """Test case for HostManager class.""" + + def setUp(self): + super(HostManagerTestCase, self).setUp() + self.host_manager = host_manager.HostManager() + self.fake_hosts = [host_manager.HostState('fake_host%s' % x) + for x in xrange(1, 5)] + + def test_choose_host_filters_not_found(self): + self.flags(scheduler_default_filters='FakeFilterClass3') + self.host_manager.filter_classes = [FakeFilterClass1, + FakeFilterClass2] + self.assertRaises(exception.SchedulerHostFilterNotFound, + self.host_manager._choose_host_filters, None) + + def test_choose_host_filters(self): + self.flags(scheduler_default_filters=['FakeFilterClass2']) + self.host_manager.filter_classes = [FakeFilterClass1, + FakeFilterClass2] + + # Test 'volume' returns 1 correct function + filter_classes = self.host_manager._choose_host_filters(None) + self.assertEqual(len(filter_classes), 1) + self.assertEqual(filter_classes[0].__name__, 'FakeFilterClass2') + + @mock.patch('cinder.scheduler.host_manager.HostManager.' + '_choose_host_filters') + def test_get_filtered_hosts(self, _mock_choose_host_filters): + filter_class = FakeFilterClass1 + mock_func = mock.Mock() + mock_func.return_value = True + filter_class._filter_one = mock_func + _mock_choose_host_filters.return_value = [filter_class] + + fake_properties = {'moo': 1, 'cow': 2} + expected = [] + for fake_host in self.fake_hosts: + expected.append(mock.call(fake_host, fake_properties)) + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self.assertEqual(expected, mock_func.call_args_list) + self.assertEqual(set(result), set(self.fake_hosts)) + + @mock.patch('cinder.openstack.common.timeutils.utcnow') + def test_update_service_capabilities(self, _mock_utcnow): + service_states = self.host_manager.service_states + self.assertDictMatch(service_states, {}) + _mock_utcnow.side_effect = [31337, 31338, 31339] + + host1_volume_capabs = dict(free_capacity_gb=4321, timestamp=1) + host2_volume_capabs = dict(free_capacity_gb=5432, timestamp=1) + host3_volume_capabs = dict(free_capacity_gb=6543, timestamp=1) + + service_name = 'volume' + self.host_manager.update_service_capabilities(service_name, 'host1', + host1_volume_capabs) + self.host_manager.update_service_capabilities(service_name, 'host2', + host2_volume_capabs) + self.host_manager.update_service_capabilities(service_name, 'host3', + host3_volume_capabs) + + # Make sure dictionary isn't re-assigned + self.assertEqual(self.host_manager.service_states, service_states) + # Make sure original dictionary wasn't copied + self.assertEqual(host1_volume_capabs['timestamp'], 1) + + host1_volume_capabs['timestamp'] = 31337 + host2_volume_capabs['timestamp'] = 31338 + host3_volume_capabs['timestamp'] = 31339 + + expected = {'host1': host1_volume_capabs, + 'host2': host2_volume_capabs, + 'host3': host3_volume_capabs} + self.assertDictMatch(service_states, expected) + + @mock.patch('cinder.db.service_get_all_by_topic') + @mock.patch('cinder.utils.service_is_up') + def test_get_all_host_states(self, _mock_service_is_up, + _mock_service_get_all_by_topic): + context = 'fake_context' + topic = CONF.volume_topic + + services = [ + dict(id=1, host='host1', topic='volume', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()), + dict(id=2, host='host2', topic='volume', disabled=False, + availability_zone='zone1', updated_at=timeutils.utcnow()), + dict(id=3, host='host3', topic='volume', disabled=False, + availability_zone='zone2', updated_at=timeutils.utcnow()), + dict(id=4, host='host4', topic='volume', disabled=False, + availability_zone='zone3', updated_at=timeutils.utcnow()), + # service on host5 is disabled + dict(id=5, host='host5', topic='volume', disabled=True, + availability_zone='zone4', updated_at=timeutils.utcnow()), + ] + + # First test: service_is_up is always True, host5 is disabled + _mock_service_get_all_by_topic.return_value = services + _mock_service_is_up.return_value = True + _mock_warning = mock.Mock() + host_manager.LOG.warn = _mock_warning + + # Get all states, make sure host5 is reported as down/disabled + self.host_manager.get_all_host_states(context) + _mock_service_get_all_by_topic.assert_called_with(context, topic) + expected = [] + for service in services: + expected.append(mock.call(service)) + self.assertEqual(expected, _mock_service_is_up.call_args_list) + _mock_warning.assert_called_with("volume service is down or disabled. " + "(host: host5)") + + # Get host_state_map and make sure we have the first 4 hosts + host_state_map = self.host_manager.host_state_map + self.assertEqual(len(host_state_map), 4) + for i in xrange(4): + volume_node = services[i] + host = volume_node['host'] + self.assertEqual(host_state_map[host].service, volume_node) + + # Second test: Now service_is_up returns False for host4 + _mock_service_is_up.reset_mock() + _mock_service_is_up.side_effect = [True, True, True, False, True] + _mock_service_get_all_by_topic.reset_mock() + _mock_warning.reset_mock() + + # Get all states, make sure hosts 4 and 5 is reported as down/disabled + self.host_manager.get_all_host_states(context) + _mock_service_get_all_by_topic.assert_called_with(context, topic) + expected = [] + for service in services: + expected.append(mock.call(service)) + self.assertEqual(expected, _mock_service_is_up.call_args_list) + expected = [] + for num in ['4', '5']: + expected.append(mock.call("volume service is down or disabled. " + "(host: host" + num + ")")) + self.assertEqual(expected, _mock_warning.call_args_list) + + # Get host_state_map and make sure we have the first 4 hosts + host_state_map = self.host_manager.host_state_map + self.assertEqual(len(host_state_map), 3) + for i in xrange(3): + volume_node = services[i] + host = volume_node['host'] + self.assertEqual(host_state_map[host].service, + volume_node) + + +class HostStateTestCase(test.TestCase): + """Test case for HostState class.""" + + def test_update_from_volume_capability(self): + fake_host = host_manager.HostState('host1') + self.assertIsNone(fake_host.free_capacity_gb) + + volume_capability = {'total_capacity_gb': 1024, + 'free_capacity_gb': 512, + 'reserved_percentage': 0, + 'timestamp': None} + + fake_host.update_from_volume_capability(volume_capability) + self.assertEqual(fake_host.free_capacity_gb, 512) + + def test_update_from_volume_infinite_capability(self): + fake_host = host_manager.HostState('host1') + self.assertIsNone(fake_host.free_capacity_gb) + + volume_capability = {'total_capacity_gb': 'infinite', + 'free_capacity_gb': 'infinite', + 'reserved_percentage': 0, + 'timestamp': None} + + fake_host.update_from_volume_capability(volume_capability) + self.assertEqual(fake_host.total_capacity_gb, 'infinite') + self.assertEqual(fake_host.free_capacity_gb, 'infinite') + + def test_update_from_volume_unknown_capability(self): + fake_host = host_manager.HostState('host1') + self.assertIsNone(fake_host.free_capacity_gb) + + volume_capability = {'total_capacity_gb': 'infinite', + 'free_capacity_gb': 'unknown', + 'reserved_percentage': 0, + 'timestamp': None} + + fake_host.update_from_volume_capability(volume_capability) + self.assertEqual(fake_host.total_capacity_gb, 'infinite') + self.assertEqual(fake_host.free_capacity_gb, 'unknown') diff --git a/cinder/tests/scheduler/test_rpcapi.py b/cinder/tests/scheduler/test_rpcapi.py index a6ba60d45b..97759f9a69 100644 --- a/cinder/tests/scheduler/test_rpcapi.py +++ b/cinder/tests/scheduler/test_rpcapi.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012, Red Hat, Inc. # @@ -18,14 +17,17 @@ Unit Tests for cinder.scheduler.rpcapi """ + +import mock + +from oslo.config import cfg + from cinder import context -from cinder import flags -from cinder.openstack.common import rpc from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import test -FLAGS = flags.FLAGS +CONF = cfg.CONF class SchedulerRpcAPITestCase(test.TestCase): @@ -36,12 +38,13 @@ def setUp(self): def tearDown(self): super(SchedulerRpcAPITestCase, self).tearDown() - def _test_scheduler_api(self, method, rpc_method, **kwargs): + def _test_scheduler_api(self, method, rpc_method, _mock_method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = scheduler_rpcapi.SchedulerAPI() - expected_retval = 'foo' if method == 'call' else None + expected_retval = 'foo' if rpc_method == 'call' else None + expected_version = kwargs.pop('version', rpcapi.RPC_API_VERSION) expected_msg = rpcapi.make_msg(method, **kwargs) - expected_msg['version'] = rpcapi.RPC_API_VERSION + expected_msg['version'] = expected_version self.fake_args = None self.fake_kwargs = None @@ -52,16 +55,57 @@ def _fake_rpc_method(*args, **kwargs): if expected_retval: return expected_retval - self.stubs.Set(rpc, rpc_method, _fake_rpc_method) + _mock_method.side_effect = _fake_rpc_method retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, expected_retval) - expected_args = [ctxt, FLAGS.scheduler_topic, expected_msg] + expected_args = [ctxt, CONF.scheduler_topic, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(arg, expected_arg) - def test_update_service_capabilities(self): + @mock.patch('cinder.openstack.common.rpc.fanout_cast') + def test_update_service_capabilities(self, _mock_rpc_method): self._test_scheduler_api('update_service_capabilities', - rpc_method='fanout_cast', service_name='fake_name', - host='fake_host', capabilities='fake_capabilities') + rpc_method='fanout_cast', + _mock_method=_mock_rpc_method, + service_name='fake_name', + host='fake_host', + capabilities='fake_capabilities') + + @mock.patch('cinder.openstack.common.rpc.cast') + def test_create_volume(self, _mock_rpc_method): + self._test_scheduler_api('create_volume', + rpc_method='cast', + _mock_method=_mock_rpc_method, + topic='topic', + volume_id='volume_id', + snapshot_id='snapshot_id', + image_id='image_id', + request_spec='fake_request_spec', + filter_properties='filter_properties', + version='1.2') + + @mock.patch('cinder.openstack.common.rpc.cast') + def test_migrate_volume_to_host(self, _mock_rpc_method): + self._test_scheduler_api('migrate_volume_to_host', + rpc_method='cast', + _mock_method=_mock_rpc_method, + topic='topic', + volume_id='volume_id', + host='host', + force_host_copy=True, + request_spec='fake_request_spec', + filter_properties='filter_properties', + version='1.3') + + @mock.patch('cinder.openstack.common.rpc.cast') + def test_retype(self, _mock_rpc_method): + self._test_scheduler_api('retype', + rpc_method='cast', + _mock_method=_mock_rpc_method, + topic='topic', + volume_id='volume_id', + request_spec='fake_request_spec', + filter_properties='filter_properties', + version='1.4') diff --git a/cinder/tests/scheduler/test_scheduler.py b/cinder/tests/scheduler/test_scheduler.py index f6a74b58d3..42c7180eea 100644 --- a/cinder/tests/scheduler/test_scheduler.py +++ b/cinder/tests/scheduler/test_scheduler.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -19,22 +18,22 @@ Tests For Scheduler """ +import mock +from oslo.config import cfg from cinder import context -from cinder import db -from cinder import flags -from cinder.openstack.common import rpc -from cinder.openstack.common import timeutils +from cinder import exception from cinder.scheduler import driver +from cinder.scheduler import filter_scheduler from cinder.scheduler import manager from cinder import test -from cinder import utils -FLAGS = flags.FLAGS + +CONF = cfg.CONF class SchedulerManagerTestCase(test.TestCase): - """Test case for scheduler manager""" + """Test case for scheduler manager.""" manager_cls = manager.SchedulerManager driver_cls = driver.Scheduler @@ -55,91 +54,143 @@ def setUp(self): def test_1_correct_init(self): # Correct scheduler driver manager = self.manager - self.assertTrue(isinstance(manager.driver, self.driver_cls)) - - def test_get_host_list(self): - expected = 'fake_hosts' - - self.mox.StubOutWithMock(self.manager.driver, 'get_host_list') - self.manager.driver.get_host_list().AndReturn(expected) - - self.mox.ReplayAll() - result = self.manager.get_host_list(self.context) - self.assertEqual(result, expected) - - def test_get_service_capabilities(self): - expected = 'fake_service_capabs' + self.assertIsInstance(manager.driver, self.driver_cls) - self.mox.StubOutWithMock(self.manager.driver, - 'get_service_capabilities') - self.manager.driver.get_service_capabilities().AndReturn( - expected) - - self.mox.ReplayAll() - result = self.manager.get_service_capabilities(self.context) - self.assertEqual(result, expected) - - def test_update_service_capabilities(self): - service_name = 'fake_service' + @mock.patch('cinder.scheduler.driver.Scheduler.' + 'update_service_capabilities') + def test_update_service_capabilities_empty_dict(self, _mock_update_cap): + # Test no capabilities passes empty dictionary + service = 'fake_service' host = 'fake_host' - self.mox.StubOutWithMock(self.manager.driver, - 'update_service_capabilities') + self.manager.update_service_capabilities(self.context, + service_name=service, + host=host) + _mock_update_cap.assert_called_once_with(service, host, {}) - # Test no capabilities passes empty dictionary - self.manager.driver.update_service_capabilities(service_name, - host, {}) - self.mox.ReplayAll() - result = self.manager.update_service_capabilities(self.context, - service_name=service_name, host=host) - self.mox.VerifyAll() - - self.mox.ResetAll() + @mock.patch('cinder.scheduler.driver.Scheduler.' + 'update_service_capabilities') + def test_update_service_capabilities_correct(self, _mock_update_cap): # Test capabilities passes correctly + service = 'fake_service' + host = 'fake_host' capabilities = {'fake_capability': 'fake_value'} - self.manager.driver.update_service_capabilities( - service_name, host, capabilities) - self.mox.ReplayAll() - result = self.manager.update_service_capabilities(self.context, - service_name=service_name, host=host, - capabilities=capabilities) - - def test_existing_method(self): - def stub_method(self, *args, **kwargs): - pass - setattr(self.manager.driver, 'schedule_stub_method', stub_method) - - self.mox.StubOutWithMock(self.manager.driver, - 'schedule_stub_method') - self.manager.driver.schedule_stub_method(self.context, - *self.fake_args, **self.fake_kwargs) - - self.mox.ReplayAll() - self.manager.stub_method(self.context, self.topic, - *self.fake_args, **self.fake_kwargs) - - def test_missing_method_fallback(self): - self.mox.StubOutWithMock(self.manager.driver, 'schedule') - self.manager.driver.schedule(self.context, self.topic, - 'noexist', *self.fake_args, **self.fake_kwargs) - - self.mox.ReplayAll() - self.manager.noexist(self.context, self.topic, - *self.fake_args, **self.fake_kwargs) - def _mox_schedule_method_helper(self, method_name): - # Make sure the method exists that we're going to test call - def stub_method(*args, **kwargs): - pass - - setattr(self.manager.driver, method_name, stub_method) - - self.mox.StubOutWithMock(self.manager.driver, - method_name) + self.manager.update_service_capabilities(self.context, + service_name=service, + host=host, + capabilities=capabilities) + _mock_update_cap.assert_called_once_with(service, host, capabilities) + + @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') + @mock.patch('cinder.db.volume_update') + def test_create_volume_exception_puts_volume_in_error_state( + self, _mock_volume_update, _mock_sched_create): + # Test NoValidHost exception behavior for create_volume. + # Puts the volume in 'error' state and eats the exception. + _mock_sched_create.side_effect = exception.NoValidHost(reason="") + fake_volume_id = 1 + topic = 'fake_topic' + request_spec = {'volume_id': fake_volume_id} + + self.manager.create_volume(self.context, topic, fake_volume_id, + request_spec=request_spec, + filter_properties={}) + _mock_volume_update.assert_called_once_with(self.context, + fake_volume_id, + {'status': 'error'}) + _mock_sched_create.assert_called_once_with(self.context, request_spec, + {}) + + @mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters') + @mock.patch('cinder.db.volume_update') + def test_migrate_volume_exception_returns_volume_state( + self, _mock_volume_update, _mock_host_passes): + # Test NoValidHost exception behavior for migrate_volume_to_host. + # Puts the volume in 'error_migrating' state and eats the exception. + _mock_host_passes.side_effect = exception.NoValidHost(reason="") + fake_volume_id = 1 + topic = 'fake_topic' + request_spec = {'volume_id': fake_volume_id} + + self.manager.migrate_volume_to_host(self.context, topic, + fake_volume_id, 'host', True, + request_spec=request_spec, + filter_properties={}) + _mock_volume_update.assert_called_once_with(self.context, + fake_volume_id, + {'migration_status': None}) + _mock_host_passes.assert_called_once_with(self.context, 'host', + request_spec, {}) + + def test_chance_simple_scheduler_mocked(self): + # Test FilterScheduler is loaded and predefined combination + # of filters and weighers overrides the default value of config option + # scheduler_default_filters and scheduler_default_weighers when + # ChanceScheduler or SimpleScheduler is configured as scheduler_driver. + chance = 'cinder.scheduler.chance.ChanceScheduler' + simple = 'cinder.scheduler.simple.SimpleScheduler' + default_filters = ['AvailabilityZoneFilter', + 'CapacityFilter', + 'CapabilitiesFilter'] + self.flags(scheduler_driver=chance, + scheduler_default_filters=['CapacityFilter'], + scheduler_default_weighers=['CapacityWeigher']) + self.manager = self.manager_cls() + self.assertTrue(isinstance(self.manager.driver, + filter_scheduler.FilterScheduler)) + self.assertEqual(CONF.scheduler_default_filters, + default_filters) + self.assertEqual(CONF.scheduler_default_weighers, + ['ChanceWeigher']) + + self.flags(scheduler_driver=simple, + scheduler_default_filters=['CapacityFilter'], + scheduler_default_weighers=['CapacityWeigher']) + self.manager = self.manager_cls() + self.assertTrue(isinstance(self.manager.driver, + filter_scheduler.FilterScheduler)) + self.assertEqual(CONF.scheduler_default_filters, + default_filters) + self.assertEqual(CONF.scheduler_default_weighers, + ['AllocatedCapacityWeigher']) + + @mock.patch('cinder.db.volume_update') + @mock.patch('cinder.db.volume_get') + def test_retype_volume_exception_returns_volume_state(self, _mock_vol_get, + _mock_vol_update): + # Test NoValidHost exception behavior for retype. + # Puts the volume in original state and eats the exception. + fake_volume_id = 1 + topic = 'fake_topic' + volume_id = fake_volume_id + request_spec = {'volume_id': fake_volume_id, 'volume_type': {'id': 3}, + 'migration_policy': 'on-demand'} + vol_info = {'id': fake_volume_id, 'status': 'in-use', + 'instance_uuid': 'foo', 'attached_host': None} + + _mock_vol_get.return_value = vol_info + _mock_vol_update.return_value = {'status': 'in-use'} + _mock_find_retype_host = mock.Mock( + side_effect=exception.NoValidHost(reason="")) + orig_retype = self.manager.driver.find_retype_host + self.manager.driver.find_retype_host = _mock_find_retype_host + + self.manager.retype(self.context, topic, volume_id, + request_spec=request_spec, + filter_properties={}) + + _mock_vol_get.assert_called_once_with(self.context, fake_volume_id) + _mock_find_retype_host.assert_called_once_with(self.context, + request_spec, {}, + 'on-demand') + _mock_vol_update.assert_called_once_with(self.context, fake_volume_id, + {'status': 'in-use'}) + self.manager.driver.find_retype_host = orig_retype class SchedulerTestCase(test.TestCase): - """Test case for base scheduler driver class""" + """Test case for base scheduler driver class.""" # So we can subclass this test and re-use tests if we need. driver_cls = driver.Scheduler @@ -150,171 +201,44 @@ def setUp(self): self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' - def test_get_host_list(self): - expected = 'fake_hosts' - - self.mox.StubOutWithMock(self.driver.host_manager, 'get_host_list') - self.driver.host_manager.get_host_list().AndReturn(expected) - - self.mox.ReplayAll() - result = self.driver.get_host_list() - self.assertEqual(result, expected) - - def test_get_service_capabilities(self): - expected = 'fake_service_capabs' - - self.mox.StubOutWithMock(self.driver.host_manager, - 'get_service_capabilities') - self.driver.host_manager.get_service_capabilities().AndReturn( - expected) - - self.mox.ReplayAll() - result = self.driver.get_service_capabilities() - self.assertEqual(result, expected) - - def test_update_service_capabilities(self): + @mock.patch('cinder.scheduler.driver.Scheduler.' + 'update_service_capabilities') + def test_update_service_capabilities(self, _mock_update_cap): service_name = 'fake_service' host = 'fake_host' - - self.mox.StubOutWithMock(self.driver.host_manager, - 'update_service_capabilities') - capabilities = {'fake_capability': 'fake_value'} - self.driver.host_manager.update_service_capabilities( - service_name, host, capabilities) - self.mox.ReplayAll() - result = self.driver.update_service_capabilities(service_name, - host, capabilities) - - def test_hosts_up(self): - service1 = {'host': 'host1'} - service2 = {'host': 'host2'} - services = [service1, service2] - - self.mox.StubOutWithMock(db, 'service_get_all_by_topic') - self.mox.StubOutWithMock(utils, 'service_is_up') - - db.service_get_all_by_topic(self.context, - self.topic).AndReturn(services) - utils.service_is_up(service1).AndReturn(False) - utils.service_is_up(service2).AndReturn(True) - - self.mox.ReplayAll() - result = self.driver.hosts_up(self.context, self.topic) - self.assertEqual(result, ['host2']) + self.driver.update_service_capabilities(service_name, host, + capabilities) + _mock_update_cap.assert_called_once_with(service_name, host, + capabilities) class SchedulerDriverBaseTestCase(SchedulerTestCase): """Test cases for base scheduler driver class methods - that can't will fail if the driver is changed""" + that can't will fail if the driver is changed. + """ def test_unimplemented_schedule(self): fake_args = (1, 2, 3) fake_kwargs = {'cat': 'meow'} self.assertRaises(NotImplementedError, self.driver.schedule, - self.context, self.topic, 'schedule_something', - *fake_args, **fake_kwargs) + self.context, self.topic, 'schedule_something', + *fake_args, **fake_kwargs) class SchedulerDriverModuleTestCase(test.TestCase): - """Test case for scheduler driver module methods""" + """Test case for scheduler driver module methods.""" def setUp(self): super(SchedulerDriverModuleTestCase, self).setUp() self.context = context.RequestContext('fake_user', 'fake_project') - def test_cast_to_volume_host_update_db_with_volume_id(self): - host = 'fake_host1' - method = 'fake_method' - fake_kwargs = {'volume_id': 31337, - 'extra_arg': 'meow'} - queue = 'fake_queue' - - self.mox.StubOutWithMock(timeutils, 'utcnow') - self.mox.StubOutWithMock(db, 'volume_update') - self.mox.StubOutWithMock(rpc, 'queue_get_for') - self.mox.StubOutWithMock(rpc, 'cast') - - timeutils.utcnow().AndReturn('fake-now') - db.volume_update(self.context, 31337, - {'host': host, 'scheduled_at': 'fake-now'}) - rpc.queue_get_for(self.context, - FLAGS.volume_topic, host).AndReturn(queue) - rpc.cast(self.context, queue, - {'method': method, - 'args': fake_kwargs}) - - self.mox.ReplayAll() - driver.cast_to_volume_host(self.context, host, method, - update_db=True, **fake_kwargs) - - def test_cast_to_volume_host_update_db_without_volume_id(self): - host = 'fake_host1' - method = 'fake_method' - fake_kwargs = {'extra_arg': 'meow'} - queue = 'fake_queue' - - self.mox.StubOutWithMock(rpc, 'queue_get_for') - self.mox.StubOutWithMock(rpc, 'cast') - - rpc.queue_get_for(self.context, - FLAGS.volume_topic, host).AndReturn(queue) - rpc.cast(self.context, queue, - {'method': method, - 'args': fake_kwargs}) - - self.mox.ReplayAll() - driver.cast_to_volume_host(self.context, host, method, - update_db=True, **fake_kwargs) - - def test_cast_to_volume_host_no_update_db(self): - host = 'fake_host1' - method = 'fake_method' - fake_kwargs = {'extra_arg': 'meow'} - queue = 'fake_queue' - - self.mox.StubOutWithMock(rpc, 'queue_get_for') - self.mox.StubOutWithMock(rpc, 'cast') - - rpc.queue_get_for(self.context, - FLAGS.volume_topic, host).AndReturn(queue) - rpc.cast(self.context, queue, - {'method': method, - 'args': fake_kwargs}) - - self.mox.ReplayAll() - driver.cast_to_volume_host(self.context, host, method, - update_db=False, **fake_kwargs) - - def test_cast_to_host_volume_topic(self): - host = 'fake_host1' - method = 'fake_method' - fake_kwargs = {'extra_arg': 'meow'} - - self.mox.StubOutWithMock(driver, 'cast_to_volume_host') - driver.cast_to_volume_host(self.context, host, method, - update_db=False, **fake_kwargs) - - self.mox.ReplayAll() - driver.cast_to_host(self.context, 'volume', host, method, - update_db=False, **fake_kwargs) - - def test_cast_to_host_unknown_topic(self): - host = 'fake_host1' - method = 'fake_method' - fake_kwargs = {'extra_arg': 'meow'} - topic = 'unknown' - queue = 'fake_queue' - - self.mox.StubOutWithMock(rpc, 'queue_get_for') - self.mox.StubOutWithMock(rpc, 'cast') - - rpc.queue_get_for(self.context, topic, host).AndReturn(queue) - rpc.cast(self.context, queue, - {'method': method, - 'args': fake_kwargs}) - - self.mox.ReplayAll() - driver.cast_to_host(self.context, topic, host, method, - update_db=False, **fake_kwargs) + @mock.patch('cinder.db.volume_update') + @mock.patch('cinder.openstack.common.timeutils.utcnow') + def test_volume_host_update_db(self, _mock_utcnow, _mock_vol_update): + _mock_utcnow.return_value = 'fake-now' + driver.volume_update_db(self.context, 31337, 'fake_host') + _mock_vol_update.assert_called_once_with(self.context, 31337, + {'host': 'fake_host', + 'scheduled_at': 'fake-now'}) diff --git a/cinder/tests/scheduler/test_scheduler_options.py b/cinder/tests/scheduler/test_scheduler_options.py new file mode 100644 index 0000000000..c670c3a511 --- /dev/null +++ b/cinder/tests/scheduler/test_scheduler_options.py @@ -0,0 +1,138 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For PickledScheduler. +""" + +import datetime +import StringIO + +from cinder.openstack.common import jsonutils +from cinder.scheduler import scheduler_options +from cinder import test + + +class FakeSchedulerOptions(scheduler_options.SchedulerOptions): + def __init__(self, last_checked, now, file_old, file_now, data, filedata): + super(FakeSchedulerOptions, self).__init__() + # Change internals ... + self.last_modified = file_old + self.last_checked = last_checked + self.data = data + + # For overrides ... + self._time_now = now + self._file_now = file_now + self._file_data = filedata + + self.file_was_loaded = False + + def _get_file_timestamp(self, filename): + return self._file_now + + def _get_file_handle(self, filename): + self.file_was_loaded = True + return StringIO.StringIO(self._file_data) + + def _get_time_now(self): + return self._time_now + + +class SchedulerOptionsTestCase(test.TestCase): + def test_get_configuration_first_time_no_flag(self): + last_checked = None + now = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_old = None + file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) + + data = dict(a=1, b=2, c=3) + jdata = jsonutils.dumps(data) + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + {}, jdata) + self.assertEqual({}, fake.get_configuration()) + self.assertFalse(fake.file_was_loaded) + + def test_get_configuration_first_time_empty_file(self): + last_checked = None + now = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_old = None + file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) + + data = dict(a=1, b=2, c=3) + jdata = "" + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + {}, jdata) + self.assertEqual({}, fake.get_configuration('foo.json')) + self.assertTrue(fake.file_was_loaded) + + def test_get_configuration_first_time_happy_day(self): + last_checked = None + now = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_old = None + file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) + + data = dict(a=1, b=2, c=3) + jdata = jsonutils.dumps(data) + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + {}, jdata) + self.assertEqual(data, fake.get_configuration('foo.json')) + self.assertTrue(fake.file_was_loaded) + + def test_get_configuration_second_time_no_change(self): + last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) + now = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) + + data = dict(a=1, b=2, c=3) + jdata = jsonutils.dumps(data) + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + data, jdata) + self.assertEqual(data, fake.get_configuration('foo.json')) + self.assertFalse(fake.file_was_loaded) + + def test_get_configuration_second_time_too_fast(self): + last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) + now = datetime.datetime(2011, 1, 1, 1, 1, 2) + file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) + + old_data = dict(a=1, b=2, c=3) + data = dict(a=11, b=12, c=13) + jdata = jsonutils.dumps(data) + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + old_data, jdata) + self.assertEqual(old_data, fake.get_configuration('foo.json')) + self.assertFalse(fake.file_was_loaded) + + def test_get_configuration_second_time_change(self): + last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) + now = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) + file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) + + old_data = dict(a=1, b=2, c=3) + data = dict(a=11, b=12, c=13) + jdata = jsonutils.dumps(data) + + fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, + old_data, jdata) + self.assertEqual(data, fake.get_configuration('foo.json')) + self.assertTrue(fake.file_was_loaded) diff --git a/cinder/tests/test_HpSanISCSIDriver.py b/cinder/tests/test_HpSanISCSIDriver.py index e2b802ffe6..6541d26a57 100644 --- a/cinder/tests/test_HpSanISCSIDriver.py +++ b/cinder/tests/test_HpSanISCSIDriver.py @@ -1,4 +1,4 @@ -# Copyright 2012 OpenStack LLC +# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -11,10 +11,14 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +import mox + from cinder import exception from cinder.openstack.common import log as logging from cinder import test -from cinder.volume import san +from cinder.volume import configuration as conf +from cinder.volume.drivers.san.hp_lefthand import HpSanISCSIDriver LOG = logging.getLogger(__name__) @@ -23,20 +27,32 @@ class HpSanISCSITestCase(test.TestCase): def setUp(self): super(HpSanISCSITestCase, self).setUp() - self.stubs.Set(san.HpSanISCSIDriver, "_cliq_run", + self.stubs.Set(HpSanISCSIDriver, "_cliq_run", self._fake_cliq_run) - self.stubs.Set(san.HpSanISCSIDriver, "_get_iscsi_properties", + self.stubs.Set(HpSanISCSIDriver, "_get_iscsi_properties", self._fake_get_iscsi_properties) - self.driver = san.HpSanISCSIDriver() + configuration = mox.MockObject(conf.Configuration) + configuration.san_is_local = False + configuration.san_ip = "10.0.0.1" + configuration.san_login = "foo" + configuration.san_password = "bar" + configuration.san_ssh_port = 16022 + configuration.san_clustername = "CloudCluster1" + configuration.san_thin_provision = True + configuration.append_config_values(mox.IgnoreArg()) + + self.driver = HpSanISCSIDriver(configuration=configuration) self.volume_name = "fakevolume" + self.snapshot_name = "fakeshapshot" self.connector = {'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'host': 'fakehost'} - self.properties = {'target_discoverd': True, - 'target_portal': '10.0.1.6:3260', - 'target_iqn': - 'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev', - 'volume_id': 1} + self.properties = { + 'target_discoverd': True, + 'target_portal': '10.0.1.6:3260', + 'target_iqn': + 'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev', + 'volume_id': 1} def tearDown(self): super(HpSanISCSITestCase, self).tearDown() @@ -44,11 +60,12 @@ def tearDown(self): def _fake_get_iscsi_properties(self, volume): return self.properties - def _fake_cliq_run(self, verb, cliq_args): + def _fake_cliq_run(self, verb, cliq_args, check_exit_code=True): """Return fake results for the various methods.""" def create_volume(cliq_args): - """ + """Create volume CLIQ input for test. + input = "createVolume description="fake description" clusterName=Cluster01 volumeName=fakevolume thinProvision=0 output=XML size=1GB" @@ -63,7 +80,8 @@ def create_volume(cliq_args): return output, None def delete_volume(cliq_args): - """ + """Delete volume CLIQ input for test. + input = "deleteVolume volumeName=fakevolume prompt=false output=XML" """ @@ -75,8 +93,24 @@ def delete_volume(cliq_args): self.assertEqual(cliq_args['prompt'], 'false') return output, None - def assign_volume(cliq_args): + def extend_volume(cliq_args): + """Extend volume CLIQ input for test. + + input = "modifyVolume description="fake description" + volumeName=fakevolume + output=XML size=2GB" """ + output = """ + + """ + self.assertEqual(cliq_args['volumeName'], self.volume_name) + self.assertEqual(cliq_args['size'], '2GB') + return output, None + + def assign_volume(cliq_args): + """Assign volume CLIQ input for test. + input = "assignVolumeToServer volumeName=fakevolume serverName=fakehost output=XML" @@ -90,7 +124,8 @@ def assign_volume(cliq_args): return output, None def unassign_volume(cliq_args): - """ + """Unassign volume CLIQ input for test. + input = "unassignVolumeToServer volumeName=fakevolume serverName=fakehost output=XML """ @@ -102,8 +137,55 @@ def unassign_volume(cliq_args): self.assertEqual(cliq_args['serverName'], self.connector['host']) return output, None - def get_cluster_info(cliq_args): + def create_snapshot(cliq_args): + """Create snapshot CLIQ input for test. + + input = "createSnapshot description="fake description" + snapshotName=fakesnapshot + volumeName=fakevolume + output=XML" """ + output = """ + + """ + self.assertEqual(cliq_args['snapshotName'], self.snapshot_name) + self.assertEqual(cliq_args['volumeName'], self.volume_name) + return output, None + + def delete_snapshot(cliq_args): + """Delete shapshot CLIQ input for test. + + input = "deleteSnapshot snapshotName=fakesnapshot prompt=false + output=XML" + """ + output = """ + + """ + self.assertEqual(cliq_args['snapshotName'], self.snapshot_name) + self.assertEqual(cliq_args['prompt'], 'false') + return output, None + + def create_volume_from_snapshot(cliq_args): + """Create volume from snapshot CLIQ input for test. + + input = "cloneSnapshot description="fake description" + snapshotName=fakesnapshot + volumeName=fakevolume + output=XML" + """ + output = """ + + """ + self.assertEqual(cliq_args['snapshotName'], self.snapshot_name) + self.assertEqual(cliq_args['volumeName'], self.volume_name) + return output, None + + def get_cluster_info(cliq_args): + """Get cluster info CLIQ input for test. + input = "getClusterInfo clusterName=Cluster01 searchDepth=1 verbose=0 output=XML" """ @@ -124,7 +206,8 @@ def get_cluster_info(cliq_args): return output, None def get_volume_info(cliq_args): - """ + """Get volume info CLIQ input for test. + input = "getVolumeInfo volumeName=fakevolume output=XML" """ output = """ @@ -150,6 +233,54 @@ def get_volume_info(cliq_args): """ return output, None + def get_snapshot_info(cliq_args): + """Get snapshot info CLIQ input for test. + + input = "getSnapshotInfo snapshotName=fakesnapshot output=XML" + """ + output = """ + + + + + """ + return output, None + + def get_server_info(cliq_args): + """Get server info CLIQ input for test. + + input = "getServerInfo serverName=fakeName" + """ + output = """ + """ + return output, None + + def create_server(cliq_args): + """Create server CLIQ input for test. + + input = "createServer serverName=fakeName initiator=something" + """ + output = """ + """ + return output, None + def test_error(cliq_args): output = """ {0}'.format(repository))) + + +class CoraidDriverLoginSuccessTestCase(CoraidDriverTestCase): + def setUp(self): + super(CoraidDriverLoginSuccessTestCase, self).setUp() + + login_results = {'state': 'adminSucceed', + 'values': [ + {'fullPath': + 'admin group:{0}'.format(fake_esm_group), + 'groupId': fake_esm_group_id + }]} + + self.fake_rpc.handle('admin', {'op': 'login', + 'username': fake_esm_username, + 'password': fake_esm_password}, + 'Login', login_results) + + self.fake_rpc.handle('admin', {'op': 'setRbacGroup', + 'groupId': fake_esm_group_id}, + 'Group', {'state': 'adminSucceed'}) + + +class CoraidDriverApplianceTestCase(CoraidDriverLoginSuccessTestCase): + def test_resize_volume(self): + new_volume_size = int(fake_volume_size) + 1 + + fetch_request = {'shelf': 'cms', + 'orchStrRepo': '', + 'lv': fake_volume_name} + self.fake_rpc.handle('fetch', fetch_request, None, + fake_esm_fetch) + + reply = {'configState': 'completedSuccessfully'} + + resize_volume_request = {'addr': 'cms', + 'data': { + 'lvName': fake_volume_name, + 'newLvName': fake_volume_name + '-resize', + 'size': + coraid_volume_size(new_volume_size), + 'repoName': fake_repository_name}, + 'op': 'orchStrLunMods', + 'args': 'resize'} + pack_data(resize_volume_request) + self.fake_rpc.handle('configure', {}, [resize_volume_request], + reply) + + real_reply = self.driver.appliance.resize_volume(fake_volume_name, + new_volume_size) + + self.assertEqual(reply['configState'], real_reply['configState']) + + +class CoraidDriverIntegrationalTestCase(CoraidDriverLoginSuccessTestCase): + def setUp(self): + super(CoraidDriverIntegrationalTestCase, self).setUp() + self.appliance = self.driver.appliance + # NOTE(nsobolevsky) prevent re-creation esm appliance + self.stubs.Set(coraid.CoraidDriver, 'appliance', self.appliance) + + def test_create_volume(self): + self.mock_volume_types() + + create_volume_request = {'addr': 'cms', + 'data': { + 'servers': [], + 'size': + coraid_volume_size(fake_volume_size), + 'repoName': fake_repository_name, + 'lvName': fake_volume_name}, + 'op': 'orchStrLun', + 'args': 'add'} + pack_data(create_volume_request) + + self.fake_rpc.handle('configure', {}, [create_volume_request], + {'configState': 'completedSuccessfully', + 'firstParam': 'fake_first_param'}) + + self.mox.ReplayAll() + + self.driver.create_volume(fake_volume) + + self.mox.VerifyAll() + + def test_delete_volume(self): + delete_volume_request = {'addr': 'cms', + 'data': { + 'repoName': fake_repository_name, + 'lvName': fake_volume_name}, + 'op': 'orchStrLun/verified', + 'args': 'delete'} + pack_data(delete_volume_request) + + self.fake_rpc.handle('configure', {}, [delete_volume_request], + {'configState': 'completedSuccessfully'}) + + self.fake_rpc.handle('fetch', {'orchStrRepo': '', + 'shelf': 'cms', + 'lv': fake_volume_name}, + None, + fake_esm_fetch) + + self.mox.ReplayAll() + + self.driver.delete_volume(fake_volume) + + self.mox.VerifyAll() + + def test_ping_ok(self): + self.fake_rpc.handle('fetch', {}, None, '') + + self.mox.ReplayAll() + + self.driver.appliance.ping() + + self.mox.VerifyAll() + + def test_ping_failed(self): + def rpc(handle, url_params, data, + allow_empty_response=True): + raise test.TestingException("Some exception") + + self.stubs.Set(self.driver.appliance, 'rpc', rpc) + self.mox.ReplayAll() + + self.assertRaises(exception.CoraidESMNotAvailable, + self.driver.appliance.ping) + + self.mox.VerifyAll() + + def test_delete_not_existing_lun(self): + delete_volume_request = {'addr': 'cms', + 'data': { + 'repoName': fake_repository_name, + 'lvName': fake_volume_name}, + 'op': 'orchStrLun/verified', + 'args': 'delete'} + pack_data(delete_volume_request) + + self.fake_rpc.handle('configure', {}, [delete_volume_request], + {'configState': 'completedSuccessfully'}) + + self.fake_rpc.handle('fetch', {'orchStrRepo': '', + 'shelf': 'cms', + 'lv': fake_volume_name}, + None, + fake_esm_fetch_no_volume) + + self.mox.ReplayAll() + + self.assertRaises( + exception.VolumeNotFound, + self.driver.appliance.delete_lun, + fake_volume['name']) + + self.mox.VerifyAll() + + def test_delete_not_existing_volumeappliance_is_ok(self): + def delete_lun(volume_name): + raise exception.VolumeNotFound(volume_id=fake_volume['name']) + + self.stubs.Set(self.driver.appliance, 'delete_lun', delete_lun) + + def ping(): + pass + + self.stubs.Set(self.driver.appliance, 'ping', ping) + + self.mox.ReplayAll() + + self.driver.delete_volume(fake_volume) + + self.mox.VerifyAll() + + def test_delete_not_existing_volume_sleepingappliance(self): + def delete_lun(volume_name): + raise exception.VolumeNotFound(volume_id=fake_volume['name']) + + self.stubs.Set(self.driver.appliance, 'delete_lun', delete_lun) + + def ping(): + raise exception.CoraidESMNotAvailable(reason="Any reason") + + self.stubs.Set(self.driver.appliance, 'ping', ping) + + self.driver.appliance.ping = ping + + self.mox.ReplayAll() + + self.assertRaises(exception.CoraidESMNotAvailable, + self.driver.delete_volume, + fake_volume) + + self.mox.VerifyAll() + + def test_create_snapshot(self): + fetch_request = {'shelf': 'cms', + 'orchStrRepo': '', + 'lv': fake_volume_name} + self.fake_rpc.handle('fetch', fetch_request, None, + fake_esm_fetch) + + create_snapshot_request = {'addr': 'cms', + 'data': { + 'repoName': fake_repository_name, + 'lvName': fake_volume_name, + 'newLvName': fake_snapshot_name}, + 'op': 'orchStrLunMods', + 'args': 'addClSnap'} + pack_data(create_snapshot_request) + self.fake_rpc.handle('configure', {}, [create_snapshot_request], + {'configState': 'completedSuccessfully'}) + + self.mox.ReplayAll() + + self.driver.create_snapshot(fake_snapshot) + + self.mox.VerifyAll() + + def test_delete_snapshot(self): + fetch_request = {'shelf': 'cms', + 'orchStrRepo': '', + 'lv': fake_snapshot_name} + self.fake_rpc.handle('fetch', fetch_request, None, + fake_esm_fetch) + + delete_snapshot_request = {'addr': 'cms', + 'data': { + 'repoName': fake_repository_name, + 'lvName': fake_snapshot_name}, + 'op': 'orchStrLunMods', + 'args': 'delClSnap'} + pack_data(delete_snapshot_request) + self.fake_rpc.handle('configure', {}, [delete_snapshot_request], + {'configState': 'completedSuccessfully'}) + + self.mox.ReplayAll() + + self.driver.delete_snapshot(fake_snapshot) + + self.mox.VerifyAll() + + def test_create_volume_from_snapshot(self): + self.mock_volume_types() + + self.mox.StubOutWithMock(self.driver.appliance, 'resize_volume') + self.driver.appliance.resize_volume(fake_volume_name, + fake_volume['size'])\ + .AndReturn(None) + + fetch_request = {'shelf': 'cms', + 'orchStrRepo': '', + 'lv': fake_snapshot_name} + self.fake_rpc.handle('fetch', fetch_request, None, + fake_esm_fetch) + + create_clone_request = {'addr': 'cms', + 'data': { + 'lvName': fake_snapshot_name, + 'repoName': fake_repository_name, + 'newLvName': fake_volume_name, + 'newRepoName': fake_repository_name}, + 'op': 'orchStrLunMods', + 'args': 'addClone'} + pack_data(create_clone_request) + self.fake_rpc.handle('configure', {}, [create_clone_request], + {'configState': 'completedSuccessfully'}) + + self.mox.ReplayAll() + + self.driver.create_volume_from_snapshot(fake_volume, fake_snapshot) + + self.mox.VerifyAll() + + def test_initialize_connection(self): + fetch_request = {'shelf': 'cms', + 'orchStrRepo': '', + 'lv': fake_volume_name} + self.fake_rpc.handle('fetch', fetch_request, None, + fake_esm_fetch) + + self.mox.ReplayAll() + + connection = self.driver.initialize_connection(fake_volume, {}) + + self.mox.VerifyAll() + + self.assertEqual(connection['driver_volume_type'], 'aoe') + self.assertEqual(connection['data']['target_shelf'], fake_shelf) + self.assertEqual(connection['data']['target_lun'], fake_lun) + + def test_get_repository_capabilities(self): + reply = [[{}, {'reply': [ + {'name': 'repo1', + 'profile': + {'fullName': 'Bronze-Bronze:Profile1'}}, + {'name': 'repo2', + 'profile': + {'fullName': 'Bronze-Bronze:Profile2'}}]}]] + + self.fake_rpc.handle('fetch', {'orchStrRepo': ''}, None, + reply) + + self.mox.ReplayAll() + + capabilities = self.driver.get_volume_stats(refresh=True) + + self.mox.VerifyAll() + + self.assertEqual( + capabilities[fake_coraid_repository_key], + 'Bronze-Bronze:Profile1:repo1 Bronze-Bronze:Profile2:repo2') + + def test_create_cloned_volume(self): + self.mock_volume_types([fake_repository_name]) + + fetch_request = {'shelf': 'cms', + 'orchStrRepo': '', + 'lv': fake_volume_name} + self.fake_rpc.handle('fetch', fetch_request, None, + fake_esm_fetch) + + shelf_lun = '{0}.{1}'.format(fake_shelf, fake_lun) + create_clone_request = {'addr': 'cms', + 'data': { + 'shelfLun': shelf_lun, + 'lvName': fake_volume_name, + 'repoName': fake_repository_name, + 'newLvName': fake_clone_name, + 'newRepoName': fake_repository_name}, + 'op': 'orchStrLunMods', + 'args': 'addClone'} + pack_data(create_clone_request) + self.fake_rpc.handle('configure', {}, [create_clone_request], + {'configState': 'completedSuccessfully'}) + + self.mox.ReplayAll() + + self.driver.create_cloned_volume(fake_clone_volume, fake_volume) + + self.mox.VerifyAll() + + def test_create_cloned_volume_with_resize(self): + self.mock_volume_types([fake_repository_name]) + + self.mox.StubOutWithMock(self.driver.appliance, 'resize_volume') + self.driver.appliance.resize_volume(fake_big_clone_volume['name'], + fake_big_clone_volume['size'])\ + .AndReturn(None) + + fetch_request = {'shelf': 'cms', + 'orchStrRepo': '', + 'lv': fake_volume_name} + self.fake_rpc.handle('fetch', fetch_request, None, + fake_esm_fetch) + + shelf_lun = '{0}.{1}'.format(fake_shelf, fake_lun) + create_clone_request = {'addr': 'cms', + 'data': { + 'shelfLun': shelf_lun, + 'lvName': fake_volume_name, + 'repoName': fake_repository_name, + 'newLvName': fake_clone_name, + 'newRepoName': fake_repository_name}, + 'op': 'orchStrLunMods', + 'args': 'addClone'} + pack_data(create_clone_request) + self.fake_rpc.handle('configure', {}, [create_clone_request], + {'configState': 'completedSuccessfully'}) + + self.mox.ReplayAll() + + self.driver.create_cloned_volume(fake_big_clone_volume, fake_volume) + + self.mox.VerifyAll() + + def test_create_cloned_volume_in_different_repository(self): + self.mock_volume_types([fake_repository_name + '_another']) + + fetch_request = {'shelf': 'cms', + 'orchStrRepo': '', + 'lv': fake_volume_name} + self.fake_rpc.handle('fetch', fetch_request, None, + fake_esm_fetch) + + self.mox.ReplayAll() + + self.assertRaises( + exception.CoraidException, + self.driver.create_cloned_volume, + fake_clone_volume, + fake_volume) + + self.mox.VerifyAll() + + def test_extend_volume(self): + self.mox.StubOutWithMock(self.driver.appliance, 'resize_volume') + self.driver.appliance.resize_volume(fake_volume_name, 10)\ + .AndReturn(None) + + self.mox.ReplayAll() + + self.driver.extend_volume(fake_volume, 10) + + self.mox.VerifyAll() + + +class AutoReloginCoraidTestCase(test.TestCase): + def setUp(self): + super(AutoReloginCoraidTestCase, self).setUp() + self.mox = mox.Mox() + + self.rest_client = coraid.CoraidRESTClient('https://fake') + self.appliance = coraid.CoraidAppliance(self.rest_client, + 'fake_username', + 'fake_password', + 'fake_group') + + def tearDown(self): + self.mox.UnsetStubs() + super(AutoReloginCoraidTestCase, self).tearDown() + + def _test_auto_relogin_fail(self, state): + self.mox.StubOutWithMock(self.rest_client, 'rpc') + + self.rest_client.rpc('fake_handle', {}, None, False).\ + AndReturn({'state': state, + 'metaCROp': 'reboot'}) + + self.rest_client.rpc('fake_handle', {}, None, False).\ + AndReturn({'state': state, + 'metaCROp': 'reboot'}) + + self.rest_client.rpc('fake_handle', {}, None, False).\ + AndReturn({'state': state, + 'metaCROp': 'reboot'}) + + self.mox.StubOutWithMock(self.appliance, '_ensure_session') + self.appliance._ensure_session().AndReturn(None) + + self.mox.StubOutWithMock(self.appliance, '_relogin') + self.appliance._relogin().AndReturn(None) + self.appliance._relogin().AndReturn(None) + + self.mox.ReplayAll() + + self.assertRaises(exception.CoraidESMReloginFailed, + self.appliance.rpc, + 'fake_handle', {}, None, False) + + self.mox.VerifyAll() + + def test_auto_relogin_fail_admin(self): + self._test_auto_relogin_fail('GeneralAdminFailure') + + def test_auto_relogin_fail_inactivity(self): + self._test_auto_relogin_fail('passwordInactivityTimeout') + + def test_auto_relogin_fail_absolute(self): + self._test_auto_relogin_fail('passwordAbsoluteTimeout') + + def test_auto_relogin_success(self): + self.mox.StubOutWithMock(self.rest_client, 'rpc') + + self.rest_client.rpc('fake_handle', {}, None, False).\ + AndReturn({'state': 'GeneralAdminFailure', + 'metaCROp': 'reboot'}) + + self.rest_client.rpc('fake_handle', {}, None, False).\ + AndReturn({'state': 'ok'}) + + self.mox.StubOutWithMock(self.appliance, '_ensure_session') + self.appliance._ensure_session().AndReturn(None) + + self.mox.StubOutWithMock(self.appliance, '_relogin') + self.appliance._relogin().AndReturn(None) + + self.mox.ReplayAll() + + reply = self.appliance.rpc('fake_handle', {}, None, False) + + self.mox.VerifyAll() + + self.assertEqual(reply['state'], 'ok') + + +class CoraidDriverImageTestCases(CoraidDriverTestCase): + def setUp(self): + super(CoraidDriverImageTestCases, self).setUp() + + self.fake_dev_path = '/dev/ether/fake_dev' + + self.fake_connection = {'driver_volume_type': 'aoe', + 'data': {'target_shelf': fake_shelf, + 'target_lun': fake_lun}} + + self.fake_volume_info = { + 'shelf': self.fake_connection['data']['target_shelf'], + 'lun': self.fake_connection['data']['target_lun']} + + self.mox.StubOutWithMock(self.driver, 'initialize_connection') + self.driver.initialize_connection(fake_volume, {})\ + .AndReturn(self.fake_connection) + + self.mox.StubOutWithMock(self.driver, 'terminate_connection') + self.driver.terminate_connection(fake_volume, mox.IgnoreArg())\ + .AndReturn(None) + + root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf' + + self.mox.StubOutWithMock(connector, 'get_connector_properties') + connector.get_connector_properties(root_helper, + CONF.my_ip).\ + AndReturn({}) + + self.mox.StubOutWithMock(utils, 'brick_get_connector') + + aoe_initiator = self.mox.CreateMockAnything() + + utils.brick_get_connector('aoe', + device_scan_attempts=3, + use_multipath=False, + conn=mox.IgnoreArg()).\ + AndReturn(aoe_initiator) + + aoe_initiator\ + .connect_volume(self.fake_connection['data'])\ + .AndReturn({'path': self.fake_dev_path}) + + aoe_initiator.check_valid_device(self.fake_dev_path)\ + .AndReturn(True) + + aoe_initiator.disconnect_volume( + {'target_shelf': self.fake_volume_info['shelf'], + 'target_lun': self.fake_volume_info['lun']}, mox.IgnoreArg()) + + def test_copy_volume_to_image(self): + fake_image_service = 'fake-image-service' + fake_image_meta = 'fake-image-meta' + + self.mox.StubOutWithMock(image_utils, 'upload_volume') + image_utils.upload_volume({}, + fake_image_service, + fake_image_meta, + self.fake_dev_path) + + self.mox.ReplayAll() + self.driver.copy_volume_to_image({}, + fake_volume, + fake_image_service, + fake_image_meta) + + self.mox.VerifyAll() + + def test_copy_image_to_volume(self): + fake_image_service = 'fake-image-service' + fake_image_id = 'fake-image-id;' + + self.mox.StubOutWithMock(image_utils, 'fetch_to_raw') + image_utils.fetch_to_raw({}, + fake_image_service, + fake_image_id, + self.fake_dev_path, + mox.IgnoreArg(), + size=fake_volume_size) + + self.mox.ReplayAll() + + self.driver.copy_image_to_volume({}, + fake_volume, + fake_image_service, + fake_image_id) + + self.mox.VerifyAll() + + +class CoraidResetConnectionTestCase(CoraidDriverTestCase): + def test_create_new_appliance_for_every_request(self): + self.mox.StubOutWithMock(coraid, 'CoraidRESTClient') + self.mox.StubOutWithMock(coraid, 'CoraidAppliance') + + coraid.CoraidRESTClient(mox.IgnoreArg()) + coraid.CoraidRESTClient(mox.IgnoreArg()) + + coraid.CoraidAppliance(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn('fake_app1') + coraid.CoraidAppliance(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn('fake_app2') + self.mox.ReplayAll() + + self.assertEqual(self.driver.appliance, 'fake_app1') + self.assertEqual(self.driver.appliance, 'fake_app2') + + self.mox.VerifyAll() diff --git a/cinder/tests/test_create_volume_flow.py b/cinder/tests/test_create_volume_flow.py new file mode 100644 index 0000000000..13a3b8691b --- /dev/null +++ b/cinder/tests/test_create_volume_flow.py @@ -0,0 +1,110 @@ +# Copyright 2013 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" Tests for create_volume TaskFlow """ + +import time + +from cinder import context +from cinder import test +from cinder.volume.flows.api import create_volume + + +class fake_scheduler_rpc_api(object): + def __init__(self, expected_spec, test_inst): + self.expected_spec = expected_spec + self.test_inst = test_inst + + def create_volume(self, ctxt, topic, volume_id, snapshot_id=None, + image_id=None, request_spec=None, + filter_properties=None): + + self.test_inst.assertEqual(self.expected_spec, request_spec) + + +class fake_volume_api(object): + def __init__(self, expected_spec, test_inst): + self.expected_spec = expected_spec + self.test_inst = test_inst + + def create_volume(self, ctxt, volume, host, + request_spec, filter_properties, + allow_reschedule=True, + snapshot_id=None, image_id=None, + source_volid=None): + + self.test_inst.assertEqual(self.expected_spec, request_spec) + self.test_inst.assertEqual(request_spec['source_volid'], source_volid) + self.test_inst.assertEqual(request_spec['snapshot_id'], snapshot_id) + self.test_inst.assertEqual(request_spec['image_id'], image_id) + + +class fake_db(object): + + def volume_get(self, *args, **kwargs): + return {'host': 'barf'} + + def volume_update(self, *args, **kwargs): + return {'host': 'farb'} + + def snapshot_get(self, *args, **kwargs): + return {'volume_id': 1} + + +class CreateVolumeFlowTestCase(test.TestCase): + + def time_inc(self): + self.counter += 1 + return self.counter + + def setUp(self): + super(CreateVolumeFlowTestCase, self).setUp() + self.ctxt = context.get_admin_context() + self.counter = float(0) + + # Ensure that time.time() always returns more than the last time it was + # called to avoid div by zero errors. + self.counter = float(0) + self.stubs.Set(time, 'time', self.time_inc) + + def test_cast_create_volume(self): + + props = {} + spec = {'volume_id': None, + 'source_volid': None, + 'snapshot_id': None, + 'image_id': None} + + task = create_volume.VolumeCastTask( + fake_scheduler_rpc_api(spec, self), + fake_volume_api(spec, self), + fake_db()) + + task._cast_create_volume(self.ctxt, spec, props) + + spec = {'volume_id': 1, + 'source_volid': 2, + 'snapshot_id': 3, + 'image_id': 4} + + task = create_volume.VolumeCastTask( + fake_scheduler_rpc_api(spec, self), + fake_volume_api(spec, self), + fake_db()) + + task._cast_create_volume(self.ctxt, spec, props) + + def tearDown(self): + self.stubs.UnsetAll() + super(CreateVolumeFlowTestCase, self).tearDown() diff --git a/cinder/tests/test_db_api.py b/cinder/tests/test_db_api.py new file mode 100644 index 0000000000..a46e9ecc0e --- /dev/null +++ b/cinder/tests/test_db_api.py @@ -0,0 +1,1006 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for cinder.db.api.""" + + +import datetime + +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import uuidutils +from cinder.quota import ReservableResource +from cinder import test + + +CONF = cfg.CONF + + +def _quota_reserve(context, project_id): + """Create sample Quota, QuotaUsage and Reservation objects. + + There is no method db.quota_usage_create(), so we have to use + db.quota_reserve() for creating QuotaUsage objects. + + Returns reservations uuids. + + """ + def get_sync(resource, usage): + def sync(elevated, project_id, session): + return {resource: usage} + return sync + quotas = {} + resources = {} + deltas = {} + for i, resource in enumerate(('volumes', 'gigabytes')): + quotas[resource] = db.quota_create(context, project_id, + resource, i + 1) + resources[resource] = ReservableResource(resource, + '_sync_%s' % resource) + deltas[resource] = i + 1 + return db.quota_reserve( + context, resources, quotas, deltas, + datetime.datetime.utcnow(), datetime.datetime.utcnow(), + datetime.timedelta(days=1), project_id + ) + + +class ModelsObjectComparatorMixin(object): + def _dict_from_object(self, obj, ignored_keys): + if ignored_keys is None: + ignored_keys = [] + return dict([(k, v) for k, v in obj.iteritems() + if k not in ignored_keys]) + + def _assertEqualObjects(self, obj1, obj2, ignored_keys=None): + obj1 = self._dict_from_object(obj1, ignored_keys) + obj2 = self._dict_from_object(obj2, ignored_keys) + + self.assertEqual( + len(obj1), len(obj2), + "Keys mismatch: %s" % str(set(obj1.keys()) ^ set(obj2.keys()))) + for key, value in obj1.iteritems(): + self.assertEqual(value, obj2[key]) + + def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None): + obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) + sort_key = lambda d: [d[k] for k in sorted(d)] + conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key) + + self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2)) + + def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2): + self.assertEqual(len(primitives1), len(primitives2)) + for primitive in primitives1: + self.assertIn(primitive, primitives2) + + for primitive in primitives2: + self.assertIn(primitive, primitives1) + + +class BaseTest(test.TestCase, ModelsObjectComparatorMixin): + def setUp(self): + super(BaseTest, self).setUp() + self.ctxt = context.get_admin_context() + + +class DBAPIServiceTestCase(BaseTest): + + """Unit tests for cinder.db.api.service_*.""" + + def _get_base_values(self): + return { + 'host': 'fake_host', + 'binary': 'fake_binary', + 'topic': 'fake_topic', + 'report_count': 3, + 'disabled': False + } + + def _create_service(self, values): + v = self._get_base_values() + v.update(values) + return db.service_create(self.ctxt, v) + + def test_service_create(self): + service = self._create_service({}) + self.assertFalse(service['id'] is None) + for key, value in self._get_base_values().iteritems(): + self.assertEqual(value, service[key]) + + def test_service_destroy(self): + service1 = self._create_service({}) + service2 = self._create_service({'host': 'fake_host2'}) + + db.service_destroy(self.ctxt, service1['id']) + self.assertRaises(exception.ServiceNotFound, + db.service_get, self.ctxt, service1['id']) + self._assertEqualObjects(db.service_get(self.ctxt, service2['id']), + service2) + + def test_service_update(self): + service = self._create_service({}) + new_values = { + 'host': 'fake_host1', + 'binary': 'fake_binary1', + 'topic': 'fake_topic1', + 'report_count': 4, + 'disabled': True + } + db.service_update(self.ctxt, service['id'], new_values) + updated_service = db.service_get(self.ctxt, service['id']) + for key, value in new_values.iteritems(): + self.assertEqual(value, updated_service[key]) + + def test_service_update_not_found_exception(self): + self.assertRaises(exception.ServiceNotFound, + db.service_update, self.ctxt, 100500, {}) + + def test_service_get(self): + service1 = self._create_service({}) + service2 = self._create_service({'host': 'some_other_fake_host'}) + real_service1 = db.service_get(self.ctxt, service1['id']) + self._assertEqualObjects(service1, real_service1) + + def test_service_get_not_found_exception(self): + self.assertRaises(exception.ServiceNotFound, + db.service_get, self.ctxt, 100500) + + def test_service_get_by_host_and_topic(self): + service1 = self._create_service({'host': 'host1', 'topic': 'topic1'}) + service2 = self._create_service({'host': 'host2', 'topic': 'topic2'}) + + real_service1 = db.service_get_by_host_and_topic(self.ctxt, + host='host1', + topic='topic1') + self._assertEqualObjects(service1, real_service1) + + def test_service_get_all(self): + values = [ + {'host': 'host1', 'topic': 'topic1'}, + {'host': 'host2', 'topic': 'topic2'}, + {'disabled': True} + ] + services = [self._create_service(vals) for vals in values] + disabled_services = [services[-1]] + non_disabled_services = services[:-1] + + compares = [ + (services, db.service_get_all(self.ctxt)), + (disabled_services, db.service_get_all(self.ctxt, True)), + (non_disabled_services, db.service_get_all(self.ctxt, False)) + ] + for comp in compares: + self._assertEqualListsOfObjects(*comp) + + def test_service_get_all_by_topic(self): + values = [ + {'host': 'host1', 'topic': 't1'}, + {'host': 'host2', 'topic': 't1'}, + {'disabled': True, 'topic': 't1'}, + {'host': 'host3', 'topic': 't2'} + ] + services = [self._create_service(vals) for vals in values] + expected = services[:2] + real = db.service_get_all_by_topic(self.ctxt, 't1') + self._assertEqualListsOfObjects(expected, real) + + def test_service_get_all_by_host(self): + values = [ + {'host': 'host1', 'topic': 't1'}, + {'host': 'host1', 'topic': 't1'}, + {'host': 'host2', 'topic': 't1'}, + {'host': 'host3', 'topic': 't2'} + ] + services = [self._create_service(vals) for vals in values] + + expected = services[:2] + real = db.service_get_all_by_host(self.ctxt, 'host1') + self._assertEqualListsOfObjects(expected, real) + + def test_service_get_by_args(self): + values = [ + {'host': 'host1', 'binary': 'a'}, + {'host': 'host2', 'binary': 'b'} + ] + services = [self._create_service(vals) for vals in values] + + service1 = db.service_get_by_args(self.ctxt, 'host1', 'a') + self._assertEqualObjects(services[0], service1) + + service2 = db.service_get_by_args(self.ctxt, 'host2', 'b') + self._assertEqualObjects(services[1], service2) + + def test_service_get_by_args_not_found_exception(self): + self.assertRaises(exception.HostBinaryNotFound, + db.service_get_by_args, + self.ctxt, 'non-exists-host', 'a') + + def test_service_get_all_volume_sorted(self): + values = [ + ({'host': 'h1', 'binary': 'a', 'topic': CONF.volume_topic}, + 100), + ({'host': 'h2', 'binary': 'b', 'topic': CONF.volume_topic}, + 200), + ({'host': 'h3', 'binary': 'b', 'topic': CONF.volume_topic}, + 300)] + services = [] + for vals, size in values: + services.append(self._create_service(vals)) + db.volume_create(self.ctxt, {'host': vals['host'], 'size': size}) + for service, size in db.service_get_all_volume_sorted(self.ctxt): + self._assertEqualObjects(services.pop(0), service) + self.assertEqual(values.pop(0)[1], size) + + +class DBAPIVolumeTestCase(BaseTest): + + """Unit tests for cinder.db.api.volume_*.""" + + def test_volume_create(self): + volume = db.volume_create(self.ctxt, {'host': 'host1'}) + self.assertTrue(uuidutils.is_uuid_like(volume['id'])) + self.assertEqual(volume.host, 'host1') + + def test_volume_allocate_iscsi_target_no_more_targets(self): + self.assertRaises(exception.NoMoreTargets, + db.volume_allocate_iscsi_target, + self.ctxt, 42, 'host1') + + def test_volume_allocate_iscsi_target(self): + host = 'host1' + volume = db.volume_create(self.ctxt, {'host': host}) + db.iscsi_target_create_safe(self.ctxt, {'host': host, + 'target_num': 42}) + target_num = db.volume_allocate_iscsi_target(self.ctxt, volume['id'], + host) + self.assertEqual(target_num, 42) + + def test_volume_attached_invalid_uuid(self): + self.assertRaises(exception.InvalidUUID, db.volume_attached, self.ctxt, + 42, 'invalid-uuid', None, '/tmp') + + def test_volume_attached_to_instance(self): + volume = db.volume_create(self.ctxt, {'host': 'host1'}) + instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + db.volume_attached(self.ctxt, volume['id'], + instance_uuid, None, '/tmp') + volume = db.volume_get(self.ctxt, volume['id']) + self.assertEqual(volume['status'], 'in-use') + self.assertEqual(volume['mountpoint'], '/tmp') + self.assertEqual(volume['attach_status'], 'attached') + self.assertEqual(volume['instance_uuid'], instance_uuid) + self.assertIsNone(volume['attached_host']) + + def test_volume_attached_to_host(self): + volume = db.volume_create(self.ctxt, {'host': 'host1'}) + host_name = 'fake_host' + db.volume_attached(self.ctxt, volume['id'], + None, host_name, '/tmp') + volume = db.volume_get(self.ctxt, volume['id']) + self.assertEqual(volume['status'], 'in-use') + self.assertEqual(volume['mountpoint'], '/tmp') + self.assertEqual(volume['attach_status'], 'attached') + self.assertIsNone(volume['instance_uuid']) + self.assertEqual(volume['attached_host'], host_name) + + def test_volume_data_get_for_host(self): + for i in xrange(3): + for j in xrange(3): + db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': 100}) + for i in xrange(3): + self.assertEqual((3, 300), + db.volume_data_get_for_host( + self.ctxt, 'h%d' % i)) + + def test_volume_data_get_for_project(self): + for i in xrange(3): + for j in xrange(3): + db.volume_create(self.ctxt, {'project_id': 'p%d' % i, + 'size': 100, + 'host': 'h-%d-%d' % (i, j), + }) + for i in xrange(3): + self.assertEqual((3, 300), + db.volume_data_get_for_project( + self.ctxt, 'p%d' % i)) + + def test_volume_detached_from_instance(self): + volume = db.volume_create(self.ctxt, {}) + db.volume_attached(self.ctxt, volume['id'], + 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', + None, '/tmp') + db.volume_detached(self.ctxt, volume['id']) + volume = db.volume_get(self.ctxt, volume['id']) + self.assertEqual('available', volume['status']) + self.assertEqual('detached', volume['attach_status']) + self.assertIsNone(volume['mountpoint']) + self.assertIsNone(volume['instance_uuid']) + self.assertIsNone(volume['attached_host']) + + def test_volume_detached_from_host(self): + volume = db.volume_create(self.ctxt, {}) + db.volume_attached(self.ctxt, volume['id'], + None, 'fake_host', '/tmp') + db.volume_detached(self.ctxt, volume['id']) + volume = db.volume_get(self.ctxt, volume['id']) + self.assertEqual('available', volume['status']) + self.assertEqual('detached', volume['attach_status']) + self.assertIsNone(volume['mountpoint']) + self.assertIsNone(volume['instance_uuid']) + self.assertIsNone(volume['attached_host']) + + def test_volume_get(self): + volume = db.volume_create(self.ctxt, {}) + self._assertEqualObjects(volume, db.volume_get(self.ctxt, + volume['id'])) + + def test_volume_destroy(self): + volume = db.volume_create(self.ctxt, {}) + db.volume_destroy(self.ctxt, volume['id']) + self.assertRaises(exception.VolumeNotFound, db.volume_get, + self.ctxt, volume['id']) + + def test_volume_get_all(self): + volumes = [db.volume_create(self.ctxt, + {'host': 'h%d' % i, 'size': i}) + for i in xrange(3)] + self._assertEqualListsOfObjects(volumes, db.volume_get_all( + self.ctxt, None, None, 'host', None)) + + def test_volume_get_all_marker_passed(self): + volumes = [ + db.volume_create(self.ctxt, {'id': 1}), + db.volume_create(self.ctxt, {'id': 2}), + db.volume_create(self.ctxt, {'id': 3}), + db.volume_create(self.ctxt, {'id': 4}), + ] + + self._assertEqualListsOfObjects(volumes[2:], db.volume_get_all( + self.ctxt, 2, 2, 'id', None)) + + def test_volume_get_all_by_host(self): + volumes = [] + for i in xrange(3): + volumes.append([db.volume_create(self.ctxt, {'host': 'h%d' % i}) + for j in xrange(3)]) + for i in xrange(3): + self._assertEqualListsOfObjects(volumes[i], + db.volume_get_all_by_host( + self.ctxt, 'h%d' % i)) + + def test_volume_get_all_by_instance_uuid(self): + instance_uuids = [] + volumes = [] + for i in xrange(3): + instance_uuid = str(uuidutils.uuid.uuid1()) + instance_uuids.append(instance_uuid) + volumes.append([db.volume_create(self.ctxt, + {'instance_uuid': instance_uuid}) + for j in xrange(3)]) + for i in xrange(3): + self._assertEqualListsOfObjects(volumes[i], + db.volume_get_all_by_instance_uuid( + self.ctxt, instance_uuids[i])) + + def test_volume_get_all_by_instance_uuid_empty(self): + self.assertEqual([], db.volume_get_all_by_instance_uuid(self.ctxt, + 'empty')) + + def test_volume_get_all_by_project(self): + volumes = [] + for i in xrange(3): + volumes.append([db.volume_create(self.ctxt, { + 'project_id': 'p%d' % i}) for j in xrange(3)]) + for i in xrange(3): + self._assertEqualListsOfObjects(volumes[i], + db.volume_get_all_by_project( + self.ctxt, 'p%d' % i, None, + None, 'host', None)) + + def test_volume_get_iscsi_target_num(self): + target = db.iscsi_target_create_safe(self.ctxt, {'volume_id': 42, + 'target_num': 43}) + self.assertEqual(43, db.volume_get_iscsi_target_num(self.ctxt, 42)) + + def test_volume_get_iscsi_target_num_nonexistent(self): + self.assertRaises(exception.ISCSITargetNotFoundForVolume, + db.volume_get_iscsi_target_num, self.ctxt, 42) + + def test_volume_update(self): + volume = db.volume_create(self.ctxt, {'host': 'h1'}) + db.volume_update(self.ctxt, volume['id'], + {'host': 'h2', 'metadata': {'m1': 'v1'}}) + volume = db.volume_get(self.ctxt, volume['id']) + self.assertEqual('h2', volume['host']) + + def test_volume_update_nonexistent(self): + self.assertRaises(exception.VolumeNotFound, db.volume_update, + self.ctxt, 42, {}) + + def test_volume_metadata_get(self): + metadata = {'a': 'b', 'c': 'd'} + db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata}) + + self.assertEqual(metadata, db.volume_metadata_get(self.ctxt, 1)) + + def test_volume_metadata_update(self): + metadata1 = {'a': '1', 'c': '2'} + metadata2 = {'a': '3', 'd': '5'} + should_be = {'a': '3', 'c': '2', 'd': '5'} + + db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1}) + db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, False) + + self.assertEqual(should_be, db_meta) + + def test_volume_metadata_update_delete(self): + metadata1 = {'a': '1', 'c': '2'} + metadata2 = {'a': '3', 'd': '4'} + should_be = metadata2 + + db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1}) + db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, True) + + self.assertEqual(should_be, db_meta) + + def test_volume_metadata_delete(self): + metadata = {'a': 'b', 'c': 'd'} + db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata}) + db.volume_metadata_delete(self.ctxt, 1, 'c') + metadata.pop('c') + self.assertEqual(metadata, db.volume_metadata_get(self.ctxt, 1)) + + +class DBAPISnapshotTestCase(BaseTest): + + """Tests for cinder.db.api.snapshot_*.""" + + def test_snapshot_data_get_for_project(self): + actual = db.snapshot_data_get_for_project(self.ctxt, 'project1') + self.assertEqual(actual, (0, 0)) + db.volume_create(self.ctxt, {'id': 1, + 'project_id': 'project1', + 'size': 42}) + snapshot = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, + 'project_id': 'project1', + 'volume_size': 42}) + actual = db.snapshot_data_get_for_project(self.ctxt, 'project1') + self.assertEqual(actual, (1, 42)) + + def test_snapshot_get_all(self): + db.volume_create(self.ctxt, {'id': 1}) + snapshot = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1}) + self._assertEqualListsOfObjects([snapshot], + db.snapshot_get_all(self.ctxt), + ignored_keys=['metadata', 'volume']) + + def test_snapshot_metadata_get(self): + metadata = {'a': 'b', 'c': 'd'} + db.volume_create(self.ctxt, {'id': 1}) + db.snapshot_create(self.ctxt, + {'id': 1, 'volume_id': 1, 'metadata': metadata}) + + self.assertEqual(metadata, db.snapshot_metadata_get(self.ctxt, 1)) + + def test_snapshot_metadata_update(self): + metadata1 = {'a': '1', 'c': '2'} + metadata2 = {'a': '3', 'd': '5'} + should_be = {'a': '3', 'c': '2', 'd': '5'} + + db.volume_create(self.ctxt, {'id': 1}) + db.snapshot_create(self.ctxt, + {'id': 1, 'volume_id': 1, 'metadata': metadata1}) + db_meta = db.snapshot_metadata_update(self.ctxt, 1, metadata2, False) + + self.assertEqual(should_be, db_meta) + + def test_snapshot_metadata_update_delete(self): + metadata1 = {'a': '1', 'c': '2'} + metadata2 = {'a': '3', 'd': '5'} + should_be = metadata2 + + db.volume_create(self.ctxt, {'id': 1}) + db.snapshot_create(self.ctxt, + {'id': 1, 'volume_id': 1, 'metadata': metadata1}) + db_meta = db.snapshot_metadata_update(self.ctxt, 1, metadata2, True) + + self.assertEqual(should_be, db_meta) + + def test_snapshot_metadata_delete(self): + metadata = {'a': '1', 'c': '2'} + should_be = {'a': '1'} + + db.volume_create(self.ctxt, {'id': 1}) + db.snapshot_create(self.ctxt, + {'id': 1, 'volume_id': 1, 'metadata': metadata}) + db.snapshot_metadata_delete(self.ctxt, 1, 'c') + + self.assertEqual(should_be, db.snapshot_metadata_get(self.ctxt, 1)) + + +class DBAPIVolumeTypeTestCase(BaseTest): + + """Tests for the db.api.volume_type_* methods.""" + + def setUp(self): + self.ctxt = context.get_admin_context() + super(DBAPIVolumeTypeTestCase, self).setUp() + + def test_volume_type_create_exists(self): + vt = db.volume_type_create(self.ctxt, {'name': 'n1'}) + self.assertRaises(exception.VolumeTypeExists, + db.volume_type_create, + self.ctxt, + {'name': 'n1'}) + self.assertRaises(exception.VolumeTypeExists, + db.volume_type_create, + self.ctxt, + {'name': 'n2', 'id': vt['id']}) + + +class DBAPIEncryptionTestCase(BaseTest): + + """Tests for the db.api.volume_type_encryption_* methods.""" + + _ignored_keys = [ + 'deleted', + 'deleted_at', + 'created_at', + 'updated_at', + ] + + def setUp(self): + super(DBAPIEncryptionTestCase, self).setUp() + self.created = \ + [db.volume_type_encryption_update_or_create(self.ctxt, 'fake_type', + values) + for values in self._get_values()] + + def _get_values(self, one=False): + base_values = { + 'cipher': 'fake_cipher', + 'key_size': 256, + 'provider': 'fake_provider', + 'volume_type_id': 'fake_type', + 'control_location': 'front-end', + } + if one: + return base_values + + def compose(val, step): + if isinstance(val, str): + step = str(step) + return val + step + + return [dict([(k, compose(v, i)) for k, v in base_values.items()]) + for i in range(1, 4)] + + def test_volume_type_encryption_update_or_create(self): + values = self._get_values() + for i, encryption in enumerate(self.created): + self._assertEqualObjects(values[i], encryption, + self._ignored_keys) + + def test_volume_type_encryption_get(self): + for encryption in self.created: + encryption_get = \ + db.volume_type_encryption_get(self.ctxt, + encryption['volume_type_id']) + self._assertEqualObjects(encryption, encryption_get, + self._ignored_keys) + + def test_volume_type_encryption_delete(self): + values = { + 'cipher': 'fake_cipher', + 'key_size': 256, + 'provider': 'fake_provider', + 'volume_type_id': 'fake_type', + 'control_location': 'front-end', + } + + encryption = db.volume_type_encryption_update_or_create(self.ctxt, + 'fake_type', + values) + self._assertEqualObjects(values, encryption, self._ignored_keys) + + db.volume_type_encryption_delete(self.ctxt, + encryption['volume_type_id']) + encryption_get = \ + db.volume_type_encryption_get(self.ctxt, + encryption['volume_type_id']) + self.assertIsNone(encryption_get) + + +class DBAPIReservationTestCase(BaseTest): + + """Tests for db.api.reservation_* methods.""" + + def setUp(self): + super(DBAPIReservationTestCase, self).setUp() + self.values = { + 'uuid': 'sample-uuid', + 'project_id': 'project1', + 'resource': 'resource', + 'delta': 42, + 'expire': (datetime.datetime.utcnow() + + datetime.timedelta(days=1)), + 'usage': {'id': 1} + } + + def test_reservation_create(self): + reservation = db.reservation_create(self.ctxt, **self.values) + self._assertEqualObjects(self.values, reservation, ignored_keys=( + 'deleted', 'updated_at', + 'deleted_at', 'id', + 'created_at', 'usage', + 'usage_id')) + self.assertEqual(reservation['usage_id'], self.values['usage']['id']) + + def test_reservation_get(self): + reservation = db.reservation_create(self.ctxt, **self.values) + reservation_db = db.reservation_get(self.ctxt, self.values['uuid']) + self._assertEqualObjects(reservation, reservation_db) + + def test_reservation_get_nonexistent(self): + self.assertRaises(exception.ReservationNotFound, + db.reservation_get, + self.ctxt, + 'non-exitent-resevation-uuid') + + def test_reservation_commit(self): + reservations = _quota_reserve(self.ctxt, 'project1') + expected = {'project_id': 'project1', + 'volumes': {'reserved': 1, 'in_use': 0}, + 'gigabytes': {'reserved': 2, 'in_use': 0}, + } + self.assertEqual(expected, + db.quota_usage_get_all_by_project( + self.ctxt, 'project1')) + db.reservation_get(self.ctxt, reservations[0]) + db.reservation_commit(self.ctxt, reservations, 'project1') + self.assertRaises(exception.ReservationNotFound, + db.reservation_get, + self.ctxt, + reservations[0]) + expected = {'project_id': 'project1', + 'volumes': {'reserved': 0, 'in_use': 1}, + 'gigabytes': {'reserved': 0, 'in_use': 2}, + } + self.assertEqual(expected, + db.quota_usage_get_all_by_project( + self.ctxt, + 'project1')) + + def test_reservation_rollback(self): + reservations = _quota_reserve(self.ctxt, 'project1') + expected = {'project_id': 'project1', + 'volumes': {'reserved': 1, 'in_use': 0}, + 'gigabytes': {'reserved': 2, 'in_use': 0}, + } + self.assertEqual(expected, + db.quota_usage_get_all_by_project( + self.ctxt, + 'project1')) + db.reservation_get(self.ctxt, reservations[0]) + db.reservation_rollback(self.ctxt, reservations, 'project1') + self.assertRaises(exception.ReservationNotFound, + db.reservation_get, + self.ctxt, + reservations[0]) + expected = {'project_id': 'project1', + 'volumes': {'reserved': 0, 'in_use': 0}, + 'gigabytes': {'reserved': 0, 'in_use': 0}, + } + self.assertEqual(expected, + db.quota_usage_get_all_by_project( + self.ctxt, + 'project1')) + + def test_reservation_get_all_by_project(self): + reservations = _quota_reserve(self.ctxt, 'project1') + r1 = db.reservation_get(self.ctxt, reservations[0]) + r2 = db.reservation_get(self.ctxt, reservations[1]) + expected = {'project_id': 'project1', + r1['resource']: {r1['uuid']: r1['delta']}, + r2['resource']: {r2['uuid']: r2['delta']}} + self.assertEqual(expected, db.reservation_get_all_by_project( + self.ctxt, 'project1')) + + def test_reservation_expire(self): + self.values['expire'] = datetime.datetime.utcnow() + \ + datetime.timedelta(days=1) + reservations = _quota_reserve(self.ctxt, 'project1') + db.reservation_expire(self.ctxt) + + expected = {'project_id': 'project1', + 'gigabytes': {'reserved': 0, 'in_use': 0}, + 'volumes': {'reserved': 0, 'in_use': 0}} + self.assertEqual(expected, + db.quota_usage_get_all_by_project( + self.ctxt, + 'project1')) + + def test_reservation_destroy(self): + reservations = _quota_reserve(self.ctxt, 'project1') + r1 = db.reservation_get(self.ctxt, reservations[0]) + db.reservation_destroy(self.ctxt, reservations[1]) + expected = {'project_id': 'project1', + r1['resource']: {r1['uuid']: r1['delta']}} + self.assertEqual(expected, db.reservation_get_all_by_project( + self.ctxt, 'project1')) + + +class DBAPIQuotaClassTestCase(BaseTest): + + """Tests for db.api.quota_class_* methods.""" + + def setUp(self): + super(DBAPIQuotaClassTestCase, self).setUp() + self.sample_qc = db.quota_class_create(self.ctxt, 'test_qc', + 'test_resource', 42) + + def test_quota_class_get(self): + qc = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') + self._assertEqualObjects(self.sample_qc, qc) + + def test_quota_class_destroy(self): + db.quota_class_destroy(self.ctxt, 'test_qc', 'test_resource') + self.assertRaises(exception.QuotaClassNotFound, + db.quota_class_get, self.ctxt, + 'test_qc', 'test_resource') + + def test_quota_class_get_not_found(self): + self.assertRaises(exception.QuotaClassNotFound, + db.quota_class_get, self.ctxt, 'nonexistent', + 'nonexistent') + + def test_quota_class_get_all_by_name(self): + sample1 = db.quota_class_create(self.ctxt, 'test2', 'res1', 43) + sample2 = db.quota_class_create(self.ctxt, 'test2', 'res2', 44) + self.assertEqual({'class_name': 'test_qc', 'test_resource': 42}, + db.quota_class_get_all_by_name(self.ctxt, 'test_qc')) + self.assertEqual({'class_name': 'test2', 'res1': 43, 'res2': 44}, + db.quota_class_get_all_by_name(self.ctxt, 'test2')) + + def test_quota_class_update(self): + db.quota_class_update(self.ctxt, 'test_qc', 'test_resource', 43) + updated = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') + self.assertEqual(43, updated['hard_limit']) + + def test_quota_class_destroy_all_by_name(self): + sample1 = db.quota_class_create(self.ctxt, 'test2', 'res1', 43) + sample2 = db.quota_class_create(self.ctxt, 'test2', 'res2', 44) + db.quota_class_destroy_all_by_name(self.ctxt, 'test2') + self.assertEqual({'class_name': 'test2'}, + db.quota_class_get_all_by_name(self.ctxt, 'test2')) + + +class DBAPIQuotaTestCase(BaseTest): + + """Tests for db.api.reservation_* methods.""" + + def test_quota_create(self): + quota = db.quota_create(self.ctxt, 'project1', 'resource', 99) + self.assertEqual(quota.resource, 'resource') + self.assertEqual(quota.hard_limit, 99) + self.assertEqual(quota.project_id, 'project1') + + def test_quota_get(self): + quota = db.quota_create(self.ctxt, 'project1', 'resource', 99) + quota_db = db.quota_get(self.ctxt, 'project1', 'resource') + self._assertEqualObjects(quota, quota_db) + + def test_quota_get_all_by_project(self): + for i in range(3): + for j in range(3): + db.quota_create(self.ctxt, 'proj%d' % i, 'res%d' % j, j) + for i in range(3): + quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i) + self.assertEqual(quotas_db, {'project_id': 'proj%d' % i, + 'res0': 0, + 'res1': 1, + 'res2': 2}) + + def test_quota_update(self): + db.quota_create(self.ctxt, 'project1', 'resource1', 41) + db.quota_update(self.ctxt, 'project1', 'resource1', 42) + quota = db.quota_get(self.ctxt, 'project1', 'resource1') + self.assertEqual(quota.hard_limit, 42) + self.assertEqual(quota.resource, 'resource1') + self.assertEqual(quota.project_id, 'project1') + + def test_quota_update_nonexistent(self): + self.assertRaises(exception.ProjectQuotaNotFound, + db.quota_update, + self.ctxt, + 'project1', + 'resource1', + 42) + + def test_quota_get_nonexistent(self): + self.assertRaises(exception.ProjectQuotaNotFound, + db.quota_get, + self.ctxt, + 'project1', + 'resource1') + + def test_quota_reserve(self): + reservations = _quota_reserve(self.ctxt, 'project1') + self.assertEqual(len(reservations), 2) + res_names = ['gigabytes', 'volumes'] + for uuid in reservations: + reservation = db.reservation_get(self.ctxt, uuid) + self.assertIn(reservation.resource, res_names) + res_names.remove(reservation.resource) + + def test_quota_destroy(self): + db.quota_create(self.ctxt, 'project1', 'resource1', 41) + self.assertIsNone(db.quota_destroy(self.ctxt, 'project1', + 'resource1')) + self.assertRaises(exception.ProjectQuotaNotFound, db.quota_get, + self.ctxt, 'project1', 'resource1') + + def test_quota_destroy_all_by_project(self): + reservations = _quota_reserve(self.ctxt, 'project1') + db.quota_destroy_all_by_project(self.ctxt, 'project1') + self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'), + {'project_id': 'project1'}) + self.assertEqual(db.quota_usage_get_all_by_project(self.ctxt, + 'project1'), + {'project_id': 'project1'}) + for r in reservations: + self.assertRaises(exception.ReservationNotFound, + db.reservation_get, + self.ctxt, + r) + + def test_quota_usage_get_nonexistent(self): + self.assertRaises(exception.QuotaUsageNotFound, + db.quota_usage_get, + self.ctxt, + 'p1', + 'nonexitent_resource') + + def test_quota_usage_get(self): + reservations = _quota_reserve(self.ctxt, 'p1') + quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'gigabytes') + expected = {'resource': 'gigabytes', 'project_id': 'p1', + 'in_use': 0, 'reserved': 2, 'total': 2} + for key, value in expected.iteritems(): + self.assertEqual(value, quota_usage[key], key) + + def test_quota_usage_get_all_by_project(self): + reservations = _quota_reserve(self.ctxt, 'p1') + expected = {'project_id': 'p1', + 'volumes': {'in_use': 0, 'reserved': 1}, + 'gigabytes': {'in_use': 0, 'reserved': 2}} + self.assertEqual(expected, db.quota_usage_get_all_by_project( + self.ctxt, 'p1')) + + +class DBAPIIscsiTargetTestCase(BaseTest): + + """Unit tests for cinder.db.api.iscsi_target_*.""" + + def _get_base_values(self): + return {'target_num': 10, 'host': 'fake_host'} + + def test_iscsi_target_create_safe(self): + target = db.iscsi_target_create_safe(self.ctxt, + self._get_base_values()) + self.assertTrue(target['id']) + self.assertEqual(target['host'], 'fake_host') + self.assertEqual(target['target_num'], 10) + + def test_iscsi_target_count_by_host(self): + for i in range(3): + values = self._get_base_values() + values['target_num'] += i + db.iscsi_target_create_safe(self.ctxt, values) + self.assertEqual(db.iscsi_target_count_by_host(self.ctxt, 'fake_host'), + 3) + + @test.testtools.skip("bug 1187367") + def test_integrity_error(self): + db.iscsi_target_create_safe(self.ctxt, self._get_base_values()) + self.assertFalse(db.iscsi_target_create_safe(self.ctxt, + self._get_base_values())) + + +class DBAPIBackupTestCase(BaseTest): + + """Tests for db.api.backup_* methods.""" + + _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at'] + + def setUp(self): + super(DBAPIBackupTestCase, self).setUp() + self.created = [db.backup_create(self.ctxt, values) + for values in self._get_values()] + + def _get_values(self, one=False): + base_values = { + 'user_id': 'user', + 'project_id': 'project', + 'volume_id': 'volume', + 'host': 'host', + 'availability_zone': 'zone', + 'display_name': 'display', + 'display_description': 'description', + 'container': 'container', + 'status': 'status', + 'fail_reason': 'test', + 'service_metadata': 'metadata', + 'service': 'service', + 'size': 1000, + 'object_count': 100} + if one: + return base_values + + def compose(val, step): + if isinstance(val, str): + step = str(step) + return val + step + + return [dict([(k, compose(v, i)) for k, v in base_values.items()]) + for i in range(1, 4)] + + def test_backup_create(self): + values = self._get_values() + for i, backup in enumerate(self.created): + self.assertTrue(backup['id']) + self._assertEqualObjects(values[i], backup, self._ignored_keys) + + def test_backup_get(self): + for backup in self.created: + backup_get = db.backup_get(self.ctxt, backup['id']) + self._assertEqualObjects(backup, backup_get) + + def tests_backup_get_all(self): + all_backups = db.backup_get_all(self.ctxt) + self._assertEqualListsOfObjects(self.created, all_backups) + + def test_backup_get_all_by_host(self): + byhost = db.backup_get_all_by_host(self.ctxt, + self.created[1]['host']) + self._assertEqualObjects(self.created[1], byhost[0]) + + def test_backup_get_all_by_project(self): + byproj = db.backup_get_all_by_project(self.ctxt, + self.created[1]['project_id']) + self._assertEqualObjects(self.created[1], byproj[0]) + + def test_backup_update_nonexistent(self): + self.assertRaises(exception.BackupNotFound, + db.backup_update, + self.ctxt, 'nonexistent', {}) + + def test_backup_update(self): + updated_values = self._get_values(one=True) + update_id = self.created[1]['id'] + updated_backup = db.backup_update(self.ctxt, update_id, + updated_values) + self._assertEqualObjects(updated_values, updated_backup, + self._ignored_keys) + + def test_backup_destroy(self): + for backup in self.created: + db.backup_destroy(self.ctxt, backup['id']) + self.assertFalse(db.backup_get_all(self.ctxt)) + + def test_backup_not_found(self): + self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, + 'notinbase') diff --git a/cinder/tests/test_deprecated.py b/cinder/tests/test_deprecated.py deleted file mode 100644 index 5af5c76025..0000000000 --- a/cinder/tests/test_deprecated.py +++ /dev/null @@ -1,46 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2010 OpenStack LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.common import deprecated -from cinder import exception -from cinder import test - - -class DeprecatedConfigTestCase(test.TestCase): - def setUp(self): - super(DeprecatedConfigTestCase, self).setUp() - self.logbuffer = "" - - def local_log(msg): - self.logbuffer = msg - - self.stubs.Set(deprecated.LOG, 'warn', local_log) - - def test_deprecated(self): - deprecated.warn('test') - self.assertEqual(self.logbuffer, 'Deprecated Config: test') - - def test_deprecated_fatal(self): - self.flags(fatal_deprecations=True) - self.assertRaises(exception.DeprecatedConfig, - deprecated.warn, "test2") - self.assertEqual(self.logbuffer, 'Deprecated Config: test2') - - def test_deprecated_logs_only_once(self): - deprecated.warn('only once!') - deprecated.warn('only once!') - deprecated.warn('only once!') - self.assertEqual(self.logbuffer, 'Deprecated Config: only once!') diff --git a/cinder/tests/test_drivers_compatibility.py b/cinder/tests/test_drivers_compatibility.py new file mode 100644 index 0000000000..3449d9142a --- /dev/null +++ b/cinder/tests/test_drivers_compatibility.py @@ -0,0 +1,200 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo.config import cfg + +from cinder import context +from cinder.openstack.common import importutils +from cinder import test +from cinder.volume.drivers.solidfire import SolidFireDriver + + +CONF = cfg.CONF + +RBD_MODULE = "cinder.volume.drivers.rbd.RBDDriver" +SHEEPDOG_MODULE = "cinder.volume.drivers.sheepdog.SheepdogDriver" +NEXENTA_MODULE = "cinder.volume.drivers.nexenta.iscsi.NexentaISCSIDriver" +SAN_MODULE = "cinder.volume.drivers.san.san.SanISCSIDriver" +SOLARIS_MODULE = "cinder.volume.drivers.san.solaris.SolarisISCSIDriver" +LEFTHAND_MODULE = "cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver" +NFS_MODULE = "cinder.volume.drivers.nfs.NfsDriver" +SOLIDFIRE_MODULE = "cinder.volume.drivers.solidfire.SolidFireDriver" +STORWIZE_MODULE = "cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver" +WINDOWS_MODULE = "cinder.volume.drivers.windows.windows.WindowsDriver" +XIV_DS8K_MODULE = "cinder.volume.drivers.xiv_ds8k.XIVDS8KDriver" +ZADARA_MODULE = "cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver" +NETAPP_MODULE = "cinder.volume.drivers.netapp.common.Deprecated" + + +class VolumeDriverCompatibility(test.TestCase): + """Test backwards compatibility for volume drivers.""" + + def fake_update_cluster_status(self): + return + + def setUp(self): + super(VolumeDriverCompatibility, self).setUp() + self.manager = importutils.import_object(CONF.volume_manager) + self.context = context.get_admin_context() + + def tearDown(self): + super(VolumeDriverCompatibility, self).tearDown() + + def _load_driver(self, driver): + if 'SolidFire' in driver: + # SolidFire driver does update_cluster stat on init + self.stubs.Set(SolidFireDriver, '_update_cluster_status', + self.fake_update_cluster_status) + self.manager.__init__(volume_driver=driver) + + def _driver_module_name(self): + return "%s.%s" % (self.manager.driver.__class__.__module__, + self.manager.driver.__class__.__name__) + + def test_rbd_old(self): + self._load_driver('cinder.volume.driver.RBDDriver') + self.assertEqual(self._driver_module_name(), RBD_MODULE) + + def test_rbd_new(self): + self._load_driver(RBD_MODULE) + self.assertEqual(self._driver_module_name(), RBD_MODULE) + + def test_sheepdog_old(self): + self._load_driver('cinder.volume.driver.SheepdogDriver') + self.assertEqual(self._driver_module_name(), SHEEPDOG_MODULE) + + def test_sheepdog_new(self): + self._load_driver(SHEEPDOG_MODULE) + self.assertEqual(self._driver_module_name(), SHEEPDOG_MODULE) + + def test_nexenta_old(self): + self._load_driver('cinder.volume.nexenta.volume.NexentaDriver') + self.assertEqual(self._driver_module_name(), NEXENTA_MODULE) + + def test_nexenta_new(self): + self._load_driver(NEXENTA_MODULE) + self.assertEqual(self._driver_module_name(), NEXENTA_MODULE) + + def test_san_old(self): + self._load_driver('cinder.volume.san.SanISCSIDriver') + self.assertEqual(self._driver_module_name(), SAN_MODULE) + + def test_san_new(self): + self._load_driver(SAN_MODULE) + self.assertEqual(self._driver_module_name(), SAN_MODULE) + + def test_solaris_old(self): + self._load_driver('cinder.volume.san.SolarisISCSIDriver') + self.assertEqual(self._driver_module_name(), SOLARIS_MODULE) + + def test_solaris_new(self): + self._load_driver(SOLARIS_MODULE) + self.assertEqual(self._driver_module_name(), SOLARIS_MODULE) + + def test_hp_lefthand_old(self): + self._load_driver('cinder.volume.san.HpSanISCSIDriver') + self.assertEqual(self._driver_module_name(), LEFTHAND_MODULE) + + def test_hp_lefthand_new(self): + self._load_driver(LEFTHAND_MODULE) + self.assertEqual(self._driver_module_name(), LEFTHAND_MODULE) + + def test_nfs_old(self): + self._load_driver('cinder.volume.nfs.NfsDriver') + self.assertEqual(self._driver_module_name(), NFS_MODULE) + + def test_nfs_new(self): + self._load_driver(NFS_MODULE) + self.assertEqual(self._driver_module_name(), NFS_MODULE) + + def test_solidfire_old(self): + self._load_driver('cinder.volume.solidfire.SolidFire') + self.assertEqual(self._driver_module_name(), SOLIDFIRE_MODULE) + + def test_solidfire_old2(self): + self._load_driver('cinder.volume.drivers.solidfire.SolidFire') + self.assertEqual(self._driver_module_name(), SOLIDFIRE_MODULE) + + def test_solidfire_new(self): + self._load_driver(SOLIDFIRE_MODULE) + self.assertEqual(self._driver_module_name(), SOLIDFIRE_MODULE) + + def test_storwize_svc_old(self): + self._load_driver('cinder.volume.storwize_svc.StorwizeSVCDriver') + self.assertEqual(self._driver_module_name(), STORWIZE_MODULE) + + def test_storwize_svc_old2(self): + self._load_driver('cinder.volume.drivers.storwize_svc.' + 'StorwizeSVCDriver') + self.assertEqual(self._driver_module_name(), STORWIZE_MODULE) + + def test_storwize_svc_new(self): + self._load_driver(STORWIZE_MODULE) + self.assertEqual(self._driver_module_name(), STORWIZE_MODULE) + + def test_windows_old(self): + self._load_driver('cinder.volume.windows.WindowsDriver') + self.assertEqual(self._driver_module_name(), WINDOWS_MODULE) + + def test_windows_new(self): + self._load_driver(WINDOWS_MODULE) + self.assertEqual(self._driver_module_name(), WINDOWS_MODULE) + + def test_xiv_old(self): + self._load_driver('cinder.volume.xiv.XIVDriver') + self.assertEqual(self._driver_module_name(), XIV_DS8K_MODULE) + + def test_xiv_ds8k_new(self): + self._load_driver(XIV_DS8K_MODULE) + self.assertEqual(self._driver_module_name(), XIV_DS8K_MODULE) + + def test_zadara_old(self): + self._load_driver('cinder.volume.zadara.ZadaraVPSAISCSIDriver') + self.assertEqual(self._driver_module_name(), ZADARA_MODULE) + + def test_zadara_new(self): + self._load_driver(ZADARA_MODULE) + self.assertEqual(self._driver_module_name(), ZADARA_MODULE) + + def test_netapp_7m_iscsi_old(self): + self._load_driver( + 'cinder.volume.drivers.netapp.iscsi.NetAppISCSIDriver') + self.assertEqual(self._driver_module_name(), NETAPP_MODULE) + + def test_netapp_7m_iscsi_old_old(self): + self._load_driver('cinder.volume.netapp.NetAppISCSIDriver') + self.assertEqual(self._driver_module_name(), NETAPP_MODULE) + + def test_netapp_cm_iscsi_old_old(self): + self._load_driver('cinder.volume.netapp.NetAppCmodeISCSIDriver') + self.assertEqual(self._driver_module_name(), NETAPP_MODULE) + + def test_netapp_cm_iscsi_old(self): + self._load_driver( + 'cinder.volume.drivers.netapp.iscsi.NetAppCmodeISCSIDriver') + self.assertEqual(self._driver_module_name(), NETAPP_MODULE) + + def test_netapp_7m_nfs_old_old(self): + self._load_driver('cinder.volume.netapp_nfs.NetAppNFSDriver') + self.assertEqual(self._driver_module_name(), NETAPP_MODULE) + + def test_netapp_7m_nfs_old(self): + self._load_driver('cinder.volume.drivers.netapp.nfs.NetAppNFSDriver') + self.assertEqual(self._driver_module_name(), NETAPP_MODULE) + + def test_netapp_cm_nfs_old(self): + self._load_driver( + 'cinder.volume.drivers.netapp.nfs.NetAppCmodeNfsDriver') + self.assertEqual(self._driver_module_name(), NETAPP_MODULE) diff --git a/cinder/tests/test_emc.py b/cinder/tests/test_emc.py new file mode 100644 index 0000000000..d7bd280179 --- /dev/null +++ b/cinder/tests/test_emc.py @@ -0,0 +1,766 @@ + +# Copyright (c) 2012 EMC Corporation, Inc. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import shutil +import tempfile +from xml.dom.minidom import Document + +import mox + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.emc.emc_smis_common import EMCSMISCommon +from cinder.volume.drivers.emc.emc_smis_iscsi import EMCSMISISCSIDriver + + +CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml' +LOG = logging.getLogger(__name__) + +config_file_name = 'cinder_emc_config.xml' +storage_system = 'CLARiiON+APM00123456789' +storage_system_vmax = 'SYMMETRIX+000195900551' +lunmaskctrl_id = 'CLARiiON+APM00123456789+00aa11bb22cc33dd44ff55gg66hh77ii88jj' +initiator1 = 'iqn.1993-08.org.debian:01:1a2b3c4d5f6g' +stconf_service_creationclass = 'Clar_StorageConfigurationService' +ctrlconf_service_creationclass = 'Clar_ControllerConfigurationService' +rep_service_creationclass = 'Clar_ReplicationService' +vol_creationclass = 'Clar_StorageVolume' +pool_creationclass = 'Clar_UnifiedStoragePool' +lunmask_creationclass = 'Clar_LunMaskingSCSIProtocolController' +unit_creationclass = 'CIM_ProtocolControllerForUnit' +storage_type = 'gold' + +test_volume = {'name': 'vol1', + 'size': 1, + 'volume_name': 'vol1', + 'id': '1', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'test volume', + 'volume_type_id': None} +test_failed_volume = {'name': 'failed_vol', + 'size': 1, + 'volume_name': 'failed_vol', + 'id': '4', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'failed_vol', + 'display_description': 'test failed volume', + 'volume_type_id': None} +test_snapshot = {'name': 'snapshot1', + 'size': 1, + 'id': '4444', + 'volume_name': 'vol1', + 'volume_size': 1, + 'project_id': 'project'} +test_clone = {'name': 'clone1', + 'size': 1, + 'volume_name': 'vol1', + 'id': '2', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'clone1', + 'display_description': 'volume created from snapshot', + 'volume_type_id': None} +test_clone3 = {'name': 'clone3', + 'size': 1, + 'volume_name': 'vol1', + 'id': '3', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'clone3', + 'display_description': 'cloned volume', + 'volume_type_id': None} +test_snapshot_vmax = {'name': 'snapshot_vmax', + 'size': 1, + 'id': '4445', + 'volume_name': 'vol1', + 'volume_size': 1, + 'project_id': 'project'} +failed_snapshot_replica = {'name': 'failed_snapshot_replica', + 'size': 1, + 'volume_name': 'vol1', + 'id': '5', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'failed snapshot replica', + 'volume_type_id': None} +failed_snapshot_sync = {'name': 'failed_snapshot_sync', + 'size': 1, + 'volume_name': 'vol1', + 'id': '6', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'failed_snapshot_sync', + 'display_description': 'failed snapshot sync', + 'volume_type_id': None} +failed_clone_replica = {'name': 'failed_clone_replica', + 'size': 1, + 'volume_name': 'vol1', + 'id': '7', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'failed clone replica', + 'volume_type_id': None} +failed_clone_sync = {'name': 'failed_clone_sync', + 'size': 1, + 'volume_name': 'vol1', + 'id': '8', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'failed clone sync', + 'volume_type_id': None} +failed_delete_vol = {'name': 'failed_delete_vol', + 'size': 1, + 'volume_name': 'failed_delete_vol', + 'id': '99999', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'failed delete vol', + 'display_description': 'failed delete volume', + 'volume_type_id': None} + + +class EMC_StorageVolume(dict): + pass + + +class SE_ConcreteJob(dict): + pass + + +class FakeEcomConnection(): + + def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None, + ElementType=None, Size=None, + SyncType=None, SourceElement=None, + Operation=None, Synchronization=None, + TheElements=None, + LUNames=None, InitiatorPortIDs=None, DeviceAccesses=None, + ProtocolControllers=None, + MaskingGroup=None, Members=None): + + rc = 0L + myjob = SE_ConcreteJob() + myjob.classname = 'SE_ConcreteJob' + myjob['InstanceID'] = '9999' + myjob['status'] = 'success' + if ElementName == 'failed_vol' and \ + MethodName == 'CreateOrModifyElementFromStoragePool': + rc = 10L + myjob['status'] = 'failure' + elif ElementName == 'failed_snapshot_replica' and \ + MethodName == 'CreateElementReplica': + rc = 10L + myjob['status'] = 'failure' + elif Synchronization and \ + Synchronization['SyncedElement']['ElementName'] \ + == 'failed_snapshot_sync' and \ + MethodName == 'ModifyReplicaSynchronization': + rc = 10L + myjob['status'] = 'failure' + elif ElementName == 'failed_clone_replica' and \ + MethodName == 'CreateElementReplica': + rc = 10L + myjob['status'] = 'failure' + elif Synchronization and \ + Synchronization['SyncedElement']['ElementName'] \ + == 'failed_clone_sync' and \ + MethodName == 'ModifyReplicaSynchronization': + rc = 10L + myjob['status'] = 'failure' + elif TheElements and \ + TheElements[0]['DeviceID'] == '99999' and \ + MethodName == 'EMCReturnToStoragePool': + rc = 10L + myjob['status'] = 'failure' + + job = {'Job': myjob} + return rc, job + + def EnumerateInstanceNames(self, name): + result = None + if name == 'EMC_ReplicationService': + result = self._enum_replicationservices() + elif name == 'EMC_StorageConfigurationService': + result = self._enum_stconfsvcs() + elif name == 'EMC_ControllerConfigurationService': + result = self._enum_ctrlconfsvcs() + elif name == 'EMC_VirtualProvisioningPool': + result = self._enum_pools() + elif name == 'EMC_UnifiedStoragePool': + result = self._enum_pools() + elif name == 'EMC_StorageVolume': + result = self._enum_storagevolumes() + elif name == 'Clar_StorageVolume': + result = self._enum_storagevolumes() + elif name == 'SE_StorageSynchronized_SV_SV': + result = self._enum_syncsvsvs() + elif name == 'CIM_ProtocolControllerForUnit': + result = self._enum_unitnames() + elif name == 'EMC_LunMaskingSCSIProtocolController': + result = self._enum_lunmaskctrls() + elif name == 'EMC_StorageProcessorSystem': + result = self._enum_processors() + else: + result = self._default_enum() + return result + + def EnumerateInstances(self, name): + result = None + if name == 'EMC_VirtualProvisioningPool': + result = self._enum_pool_details() + elif name == 'EMC_UnifiedStoragePool': + result = self._enum_pool_details() + else: + result = self._default_enum() + return result + + def GetInstance(self, objectpath, LocalOnly=False): + try: + name = objectpath['CreationClassName'] + except KeyError: + name = objectpath.classname + result = None + if name == 'Clar_StorageVolume': + result = self._getinstance_storagevolume(objectpath) + elif name == 'CIM_ProtocolControllerForUnit': + result = self._getinstance_unit(objectpath) + elif name == 'Clar_LunMaskingSCSIProtocolController': + result = self._getinstance_lunmask() + elif name == 'SE_ConcreteJob': + result = self._getinstance_job(objectpath) + elif name == 'SE_StorageSynchronized_SV_SV': + result = self._getinstance_syncsvsv(objectpath) + else: + result = self._default_getinstance(objectpath) + return result + + def Associators(self, objectpath, resultClass='EMC_StorageHardwareID'): + result = None + if resultClass == 'EMC_StorageHardwareID': + result = self._assoc_hdwid() + elif resultClass == 'EMC_iSCSIProtocolEndpoint': + result = self._assoc_endpoint() + else: + result = self._default_assoc(objectpath) + return result + + def AssociatorNames(self, objectpath, + resultClass='EMC_LunMaskingSCSIProtocolController'): + result = None + if resultClass == 'EMC_LunMaskingSCSIProtocolController': + result = self._assocnames_lunmaskctrl() + else: + result = self._default_assocnames(objectpath) + return result + + def ReferenceNames(self, objectpath, + ResultClass='CIM_ProtocolControllerForUnit'): + result = None + if ResultClass == 'CIM_ProtocolControllerForUnit': + result = self._ref_unitnames() + else: + result = self._default_ref(objectpath) + return result + + def _ref_unitnames(self): + units = [] + unit = {} + + dependent = {} + dependent['CreationClassName'] = vol_creationclass + dependent['DeviceID'] = test_volume['id'] + dependent['ElementName'] = test_volume['name'] + dependent['SystemName'] = storage_system + + antecedent = {} + antecedent['CreationClassName'] = lunmask_creationclass + antecedent['DeviceID'] = lunmaskctrl_id + antecedent['SystemName'] = storage_system + + unit['Dependent'] = dependent + unit['Antecedent'] = antecedent + unit['CreationClassName'] = unit_creationclass + units.append(unit) + + return units + + def _default_ref(self, objectpath): + return objectpath + + def _assoc_hdwid(self): + assocs = [] + assoc = {} + assoc['StorageID'] = initiator1 + assocs.append(assoc) + return assocs + + def _assoc_endpoint(self): + assocs = [] + assoc = {} + assoc['Name'] = 'iqn.1992-04.com.emc:cx.apm00123907237.a8,t,0x0001' + assoc['SystemName'] = storage_system + '+SP_A+8' + assocs.append(assoc) + return assocs + + def _default_assoc(self, objectpath): + return objectpath + + def _assocnames_lunmaskctrl(self): + return self._enum_lunmaskctrls() + + def _default_assocnames(self, objectpath): + return objectpath + + def _getinstance_storagevolume(self, objectpath): + instance = EMC_StorageVolume() + vols = self._enum_storagevolumes() + for vol in vols: + if vol['DeviceID'] == objectpath['DeviceID']: + instance = vol + break + return instance + + def _getinstance_syncsvsv(self, objectpath): + foundsync = None + syncs = self._enum_syncsvsvs() + for sync in syncs: + if (sync['SyncedElement'] == objectpath['SyncedElement'] and + sync['SystemElement'] == objectpath['SystemElement']): + foundsync = sync + break + return foundsync + + def _getinstance_lunmask(self): + lunmask = {} + lunmask['CreationClassName'] = lunmask_creationclass + lunmask['DeviceID'] = lunmaskctrl_id + lunmask['SystemName'] = storage_system + return lunmask + + def _getinstance_unit(self, objectpath): + unit = {} + + dependent = {} + dependent['CreationClassName'] = vol_creationclass + dependent['DeviceID'] = test_volume['id'] + dependent['ElementName'] = test_volume['name'] + dependent['SystemName'] = storage_system + + antecedent = {} + antecedent['CreationClassName'] = lunmask_creationclass + antecedent['DeviceID'] = lunmaskctrl_id + antecedent['SystemName'] = storage_system + + unit['Dependent'] = dependent + unit['Antecedent'] = antecedent + unit['CreationClassName'] = unit_creationclass + unit['DeviceNumber'] = '0' + + return unit + + def _getinstance_job(self, jobpath): + jobinstance = {} + jobinstance['InstanceID'] = '9999' + if jobpath['status'] == 'failure': + jobinstance['JobState'] = 10 + jobinstance['ErrorCode'] = 99 + jobinstance['ErrorDescription'] = 'Failure' + else: + jobinstance['JobState'] = 7 + jobinstance['ErrorCode'] = 0 + jobinstance['ErrorDescription'] = '' + return jobinstance + + def _default_getinstance(self, objectpath): + return objectpath + + def _enum_replicationservices(self): + rep_services = [] + rep_service = {} + rep_service['SystemName'] = storage_system + rep_service['CreationClassName'] = rep_service_creationclass + rep_services.append(rep_service) + return rep_services + + def _enum_stconfsvcs(self): + conf_services = [] + conf_service = {} + conf_service['SystemName'] = storage_system + conf_service['CreationClassName'] = stconf_service_creationclass + conf_services.append(conf_service) + return conf_services + + def _enum_ctrlconfsvcs(self): + conf_services = [] + conf_service = {} + conf_service['SystemName'] = storage_system + conf_service['CreationClassName'] = ctrlconf_service_creationclass + conf_services.append(conf_service) + return conf_services + + def _enum_pools(self): + pools = [] + pool = {} + pool['InstanceID'] = storage_system + '+U+' + storage_type + pool['CreationClassName'] = 'Clar_UnifiedStoragePool' + pools.append(pool) + return pools + + def _enum_pool_details(self): + pools = [] + pool = {} + pool['InstanceID'] = storage_system + '+U+' + storage_type + pool['CreationClassName'] = 'Clar_UnifiedStoragePool' + pool['TotalManagedSpace'] = 12345678 + pool['RemainingManagedSpace'] = 123456 + pools.append(pool) + return pools + + def _enum_storagevolumes(self): + vols = [] + vol = EMC_StorageVolume() + vol['CreationClassName'] = 'Clar_StorageVolume' + vol['ElementName'] = test_volume['name'] + vol['DeviceID'] = test_volume['id'] + vol['SystemName'] = storage_system + vol.path = {'DeviceID': vol['DeviceID']} + vols.append(vol) + + snap_vol = EMC_StorageVolume() + snap_vol['CreationClassName'] = 'Clar_StorageVolume' + snap_vol['ElementName'] = test_snapshot['name'] + snap_vol['DeviceID'] = test_snapshot['id'] + snap_vol['SystemName'] = storage_system + snap_vol.path = {'DeviceID': snap_vol['DeviceID']} + vols.append(snap_vol) + + clone_vol = EMC_StorageVolume() + clone_vol['CreationClassName'] = 'Clar_StorageVolume' + clone_vol['ElementName'] = test_clone['name'] + clone_vol['DeviceID'] = test_clone['id'] + clone_vol['SystemName'] = storage_system + clone_vol.path = {'DeviceID': clone_vol['DeviceID']} + vols.append(clone_vol) + + clone_vol3 = EMC_StorageVolume() + clone_vol3['CreationClassName'] = 'Clar_StorageVolume' + clone_vol3['ElementName'] = test_clone3['name'] + clone_vol3['DeviceID'] = test_clone3['id'] + clone_vol3['SystemName'] = storage_system + clone_vol3.path = {'DeviceID': clone_vol3['DeviceID']} + vols.append(clone_vol3) + + snap_vol_vmax = EMC_StorageVolume() + snap_vol_vmax['CreationClassName'] = 'Symm_StorageVolume' + snap_vol_vmax['ElementName'] = test_snapshot_vmax['name'] + snap_vol_vmax['DeviceID'] = test_snapshot_vmax['id'] + snap_vol_vmax['SystemName'] = storage_system_vmax + snap_vol_vmax.path = {'DeviceID': snap_vol_vmax['DeviceID']} + vols.append(snap_vol_vmax) + + failed_snap_replica = EMC_StorageVolume() + failed_snap_replica['CreationClassName'] = 'Clar_StorageVolume' + failed_snap_replica['ElementName'] = failed_snapshot_replica['name'] + failed_snap_replica['DeviceID'] = failed_snapshot_replica['id'] + failed_snap_replica['SystemName'] = storage_system + failed_snap_replica.path = { + 'DeviceID': failed_snap_replica['DeviceID']} + vols.append(failed_snap_replica) + + failed_snap_sync = EMC_StorageVolume() + failed_snap_sync['CreationClassName'] = 'Clar_StorageVolume' + failed_snap_sync['ElementName'] = failed_snapshot_sync['name'] + failed_snap_sync['DeviceID'] = failed_snapshot_sync['id'] + failed_snap_sync['SystemName'] = storage_system + failed_snap_sync.path = { + 'DeviceID': failed_snap_sync['DeviceID']} + vols.append(failed_snap_sync) + + failed_clone_rep = EMC_StorageVolume() + failed_clone_rep['CreationClassName'] = 'Clar_StorageVolume' + failed_clone_rep['ElementName'] = failed_clone_replica['name'] + failed_clone_rep['DeviceID'] = failed_clone_replica['id'] + failed_clone_rep['SystemName'] = storage_system + failed_clone_rep.path = { + 'DeviceID': failed_clone_rep['DeviceID']} + vols.append(failed_clone_rep) + + failed_clone_s = EMC_StorageVolume() + failed_clone_s['CreationClassName'] = 'Clar_StorageVolume' + failed_clone_s['ElementName'] = failed_clone_sync['name'] + failed_clone_s['DeviceID'] = failed_clone_sync['id'] + failed_clone_s['SystemName'] = storage_system + failed_clone_s.path = { + 'DeviceID': failed_clone_s['DeviceID']} + vols.append(failed_clone_s) + + failed_delete_vol = EMC_StorageVolume() + failed_delete_vol['CreationClassName'] = 'Clar_StorageVolume' + failed_delete_vol['ElementName'] = 'failed_delete_vol' + failed_delete_vol['DeviceID'] = '99999' + failed_delete_vol['SystemName'] = storage_system + failed_delete_vol.path = {'DeviceID': failed_delete_vol['DeviceID']} + vols.append(failed_delete_vol) + + return vols + + def _enum_syncsvsvs(self): + syncs = [] + + vols = self._enum_storagevolumes() + + sync = self._create_sync(vols[0], vols[1], 100) + syncs.append(sync) + + sync2 = self._create_sync(vols[1], vols[2], 100) + syncs.append(sync2) + + sync3 = self._create_sync(vols[0], vols[3], 100) + syncs.append(sync3) + + objpath1 = vols[1] + for vol in vols: + if vol['ElementName'] == 'failed_snapshot_sync': + objpath2 = vol + break + sync4 = self._create_sync(objpath1, objpath2, 100) + syncs.append(sync4) + + objpath1 = vols[0] + for vol in vols: + if vol['ElementName'] == 'failed_clone_sync': + objpath2 = vol + break + sync5 = self._create_sync(objpath1, objpath2, 100) + syncs.append(sync5) + + return syncs + + def _create_sync(self, objpath1, objpath2, percentsynced): + sync = {} + sync['SyncedElement'] = objpath2 + sync['SystemElement'] = objpath1 + sync['CreationClassName'] = 'SE_StorageSynchronized_SV_SV' + sync['PercentSynced'] = percentsynced + return sync + + def _enum_unitnames(self): + return self._ref_unitnames() + + def _enum_lunmaskctrls(self): + ctrls = [] + ctrl = {} + ctrl['CreationClassName'] = lunmask_creationclass + ctrl['DeviceID'] = lunmaskctrl_id + ctrl['SystemName'] = storage_system + ctrls.append(ctrl) + return ctrls + + def _enum_processors(self): + ctrls = [] + ctrl = {} + ctrl['CreationClassName'] = 'Clar_StorageProcessorSystem' + ctrl['Name'] = storage_system + '+SP_A' + ctrls.append(ctrl) + return ctrls + + def _default_enum(self): + names = [] + name = {} + name['Name'] = 'default' + names.append(name) + return names + + +class EMCSMISISCSIDriverTestCase(test.TestCase): + + def setUp(self): + self.tempdir = tempfile.mkdtemp() + super(EMCSMISISCSIDriverTestCase, self).setUp() + self.config_file_path = None + self.create_fake_config_file() + + configuration = mox.MockObject(conf.Configuration) + configuration.cinder_emc_config_file = self.config_file_path + configuration.append_config_values(mox.IgnoreArg()) + + self.stubs.Set(EMCSMISISCSIDriver, '_do_iscsi_discovery', + self.fake_do_iscsi_discovery) + self.stubs.Set(EMCSMISCommon, '_get_ecom_connection', + self.fake_ecom_connection) + driver = EMCSMISISCSIDriver(configuration=configuration) + self.driver = driver + + def create_fake_config_file(self): + doc = Document() + emc = doc.createElement("EMC") + doc.appendChild(emc) + + storagetype = doc.createElement("StorageType") + storagetypetext = doc.createTextNode("gold") + emc.appendChild(storagetype) + storagetype.appendChild(storagetypetext) + + ecomserverip = doc.createElement("EcomServerIp") + ecomserveriptext = doc.createTextNode("1.1.1.1") + emc.appendChild(ecomserverip) + ecomserverip.appendChild(ecomserveriptext) + + ecomserverport = doc.createElement("EcomServerPort") + ecomserverporttext = doc.createTextNode("10") + emc.appendChild(ecomserverport) + ecomserverport.appendChild(ecomserverporttext) + + ecomusername = doc.createElement("EcomUserName") + ecomusernametext = doc.createTextNode("user") + emc.appendChild(ecomusername) + ecomusername.appendChild(ecomusernametext) + + ecompassword = doc.createElement("EcomPassword") + ecompasswordtext = doc.createTextNode("pass") + emc.appendChild(ecompassword) + ecompassword.appendChild(ecompasswordtext) + + self.config_file_path = self.tempdir + '/' + config_file_name + f = open(self.config_file_path, 'w') + doc.writexml(f) + f.close() + + def fake_ecom_connection(self): + conn = FakeEcomConnection() + return conn + + def fake_do_iscsi_discovery(self, volume): + output = [] + item = '10.0.0.3:3260,1 iqn.1992-04.com.emc:cx.apm00123907237.a8' + item2 = '10.0.0.4:3260,2 iqn.1992-04.com.emc:cx.apm00123907237.b8' + output.append(item) + output.append(item2) + return output + + def test_get_volume_stats(self): + self.driver.get_volume_stats(True) + + def test_create_destroy(self): + self.driver.create_volume(test_volume) + self.driver.delete_volume(test_volume) + + def test_create_volume_snapshot_destroy(self): + self.driver.create_volume(test_volume) + self.driver.create_snapshot(test_snapshot) + self.driver.create_volume_from_snapshot( + test_clone, test_snapshot) + self.driver.create_cloned_volume( + test_clone3, test_volume) + self.driver.delete_volume(test_clone) + self.driver.delete_volume(test_clone3) + self.driver.delete_snapshot(test_snapshot) + self.driver.delete_volume(test_volume) + + def test_map_unmap(self): + self.driver.create_volume(test_volume) + export = self.driver.create_export(None, test_volume) + test_volume['provider_location'] = export['provider_location'] + test_volume['EMCCurrentOwningStorageProcessor'] = 'SP_A' + connector = {'initiator': initiator1} + connection_info = self.driver.initialize_connection(test_volume, + connector) + self.driver.terminate_connection(test_volume, connector) + self.driver.remove_export(None, test_volume) + self.driver.delete_volume(test_volume) + + def test_create_volume_failed(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, + test_failed_volume) + + def test_create_volume_snapshot_unsupported(self): + self.driver.create_volume(test_volume) + self.driver.create_snapshot(test_snapshot_vmax) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + test_clone, + test_snapshot_vmax) + self.driver.delete_snapshot(test_snapshot_vmax) + self.driver.delete_volume(test_volume) + + def test_create_volume_snapshot_replica_failed(self): + self.driver.create_volume(test_volume) + self.driver.create_snapshot(test_snapshot) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + failed_snapshot_replica, + test_snapshot) + self.driver.delete_snapshot(test_snapshot) + self.driver.delete_volume(test_volume) + + def test_create_volume_snapshot_sync_failed(self): + self.driver.create_volume(test_volume) + self.driver.create_snapshot(test_snapshot) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + failed_snapshot_sync, + test_snapshot) + self.driver.delete_snapshot(test_snapshot) + self.driver.delete_volume(test_volume) + + def test_create_volume_clone_replica_failed(self): + self.driver.create_volume(test_volume) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + failed_clone_replica, + test_volume) + self.driver.delete_volume(test_volume) + + def test_create_volume_clone_sync_failed(self): + self.driver.create_volume(test_volume) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + failed_clone_sync, + test_volume) + self.driver.delete_volume(test_volume) + + def test_delete_volume_notfound(self): + notfound_delete_vol = {} + notfound_delete_vol['name'] = 'notfound_delete_vol' + notfound_delete_vol['id'] = '10' + self.driver.delete_volume(notfound_delete_vol) + + def test_delete_volume_failed(self): + self.driver.create_volume(failed_delete_vol) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_volume, + failed_delete_vol) + + def _cleanup(self): + bExists = os.path.exists(self.config_file_path) + if bExists: + os.remove(self.config_file_path) + shutil.rmtree(self.tempdir) + + def tearDown(self): + self._cleanup() + super(EMCSMISISCSIDriverTestCase, self).tearDown() diff --git a/cinder/tests/test_eqlx.py b/cinder/tests/test_eqlx.py new file mode 100644 index 0000000000..61f9dc32ba --- /dev/null +++ b/cinder/tests/test_eqlx.py @@ -0,0 +1,322 @@ +# Copyright (c) 2013 Dell Inc. +# Copyright 2013 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +import mox +import paramiko + +from cinder import context +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers import eqlx + + +LOG = logging.getLogger(__name__) + + +class DellEQLSanISCSIDriverTestCase(test.TestCase): + + def setUp(self): + super(DellEQLSanISCSIDriverTestCase, self).setUp() + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.append_config_values(mox.IgnoreArg()) + self.configuration.san_is_local = False + self.configuration.san_ip = "10.0.0.1" + self.configuration.san_login = "foo" + self.configuration.san_password = "bar" + self.configuration.san_ssh_port = 16022 + self.configuration.san_thin_provision = True + self.configuration.eqlx_pool = 'non-default' + self.configuration.eqlx_use_chap = True + self.configuration.eqlx_group_name = 'group-0' + self.configuration.eqlx_cli_timeout = 30 + self.configuration.eqlx_cli_max_retries = 5 + self.configuration.eqlx_chap_login = 'admin' + self.configuration.eqlx_chap_password = 'password' + self.configuration.volume_name_template = 'volume_%s' + self._context = context.get_admin_context() + self.driver = eqlx.DellEQLSanISCSIDriver( + configuration=self.configuration) + self.volume_name = "fakevolume" + self.volid = "fakeid" + self.connector = {'ip': '10.0.0.2', + 'initiator': 'iqn.1993-08.org.debian:01:222', + 'host': 'fakehost'} + self.fake_iqn = 'iqn.2003-10.com.equallogic:group01:25366:fakev' + self.driver._group_ip = '10.0.1.6' + self.properties = { + 'target_discoverd': True, + 'target_portal': '%s:3260' % self.driver._group_ip, + 'target_iqn': self.fake_iqn, + 'volume_id': 1} + self._model_update = { + 'provider_location': "%s:3260,1 %s 0" % (self.driver._group_ip, + self.fake_iqn), + 'provider_auth': 'CHAP %s %s' % ( + self.configuration.eqlx_chap_login, + self.configuration.eqlx_chap_password) + } + + def _fake_get_iscsi_properties(self, volume): + return self.properties + + def test_create_volume(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + volume = {'name': self.volume_name, 'size': 1} + self.driver._eql_execute('volume', 'create', volume['name'], + "%sG" % (volume['size']), 'pool', + self.configuration.eqlx_pool, + 'thin-provision').\ + AndReturn(['iSCSI target name is %s.' % self.fake_iqn]) + self.mox.ReplayAll() + model_update = self.driver.create_volume(volume) + self.assertEqual(model_update, self._model_update) + + def test_delete_volume(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + volume = {'name': self.volume_name, 'size': 1} + self.driver._eql_execute('volume', 'select', volume['name'], 'show') + self.driver._eql_execute('volume', 'select', volume['name'], 'offline') + self.driver._eql_execute('volume', 'delete', volume['name']) + self.mox.ReplayAll() + self.driver.delete_volume(volume) + + def test_delete_absent_volume(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + volume = {'name': self.volume_name, 'size': 1, 'id': self.volid} + self.driver._eql_execute('volume', 'select', volume['name'], 'show').\ + AndRaise(processutils.ProcessExecutionError( + stdout='% Error ..... does not exist.\n')) + self.mox.ReplayAll() + self.driver.delete_volume(volume) + + def test_ensure_export(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + volume = {'name': self.volume_name, 'size': 1} + self.driver._eql_execute('volume', 'select', volume['name'], 'show') + self.mox.ReplayAll() + self.driver.ensure_export({}, volume) + + def test_create_snapshot(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'} + snap_name = 'fake_snap_name' + self.driver._eql_execute('volume', 'select', snapshot['volume_name'], + 'snapshot', 'create-now').\ + AndReturn(['Snapshot name is %s' % snap_name]) + self.driver._eql_execute('volume', 'select', snapshot['volume_name'], + 'snapshot', 'rename', snap_name, + snapshot['name']) + self.mox.ReplayAll() + self.driver.create_snapshot(snapshot) + + def test_create_volume_from_snapshot(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'} + volume = {'name': self.volume_name} + self.driver._eql_execute('volume', 'select', snapshot['volume_name'], + 'snapshot', 'select', snapshot['name'], + 'clone', volume['name']).\ + AndReturn(['iSCSI target name is %s.' % self.fake_iqn]) + self.mox.ReplayAll() + model_update = self.driver.create_volume_from_snapshot(volume, + snapshot) + self.assertEqual(model_update, self._model_update) + + def test_create_cloned_volume(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + src_vref = {'id': 'fake_uuid'} + volume = {'name': self.volume_name} + src_volume_name = self.configuration.\ + volume_name_template % src_vref['id'] + self.driver._eql_execute('volume', 'select', src_volume_name, 'clone', + volume['name']).\ + AndReturn(['iSCSI target name is %s.' % self.fake_iqn]) + self.mox.ReplayAll() + model_update = self.driver.create_cloned_volume(volume, src_vref) + self.assertEqual(model_update, self._model_update) + + def test_delete_snapshot(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'} + self.driver._eql_execute('volume', 'select', snapshot['volume_name'], + 'snapshot', 'delete', snapshot['name']) + self.mox.ReplayAll() + self.driver.delete_snapshot(snapshot) + + def test_extend_volume(self): + new_size = '200' + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + volume = {'name': self.volume_name, 'size': 100} + self.driver._eql_execute('volume', 'select', volume['name'], + 'size', "%sG" % new_size) + self.mox.ReplayAll() + self.driver.extend_volume(volume, new_size) + + def test_initialize_connection(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + volume = {'name': self.volume_name} + self.stubs.Set(self.driver, "_get_iscsi_properties", + self._fake_get_iscsi_properties) + self.driver._eql_execute('volume', 'select', volume['name'], 'access', + 'create', 'initiator', + self.connector['initiator'], + 'authmethod chap', + 'username', + self.configuration.eqlx_chap_login) + self.mox.ReplayAll() + iscsi_properties = self.driver.initialize_connection(volume, + self.connector) + self.assertEqual(iscsi_properties['data'], + self._fake_get_iscsi_properties(volume)) + + def test_terminate_connection(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + volume = {'name': self.volume_name} + self.driver._eql_execute('volume', 'select', volume['name'], 'access', + 'delete', '1') + self.mox.ReplayAll() + self.driver.terminate_connection(volume, self.connector) + + def test_do_setup(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + fake_group_ip = '10.1.2.3' + for feature in ('confirmation', 'paging', 'events', 'formatoutput'): + self.driver._eql_execute('cli-settings', feature, 'off') + self.driver._eql_execute('grpparams', 'show').\ + AndReturn(['Group-Ipaddress: %s' % fake_group_ip]) + self.mox.ReplayAll() + self.driver.do_setup(self._context) + self.assertEqual(fake_group_ip, self.driver._group_ip) + + def test_update_volume_stats(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + self.driver._eql_execute('pool', 'select', + self.configuration.eqlx_pool, 'show').\ + AndReturn(['TotalCapacity: 111GB', 'FreeSpace: 11GB']) + self.mox.ReplayAll() + self.driver._update_volume_stats() + self.assertEqual(self.driver._stats['total_capacity_gb'], 111.0) + self.assertEqual(self.driver._stats['free_capacity_gb'], 11.0) + + def test_get_volume_stats(self): + self.driver._eql_execute = self.mox.\ + CreateMock(self.driver._eql_execute) + self.driver._eql_execute('pool', 'select', + self.configuration.eqlx_pool, 'show').\ + AndReturn(['TotalCapacity: 111GB', 'FreeSpace: 11GB']) + self.mox.ReplayAll() + stats = self.driver.get_volume_stats(refresh=True) + self.assertEqual(stats['total_capacity_gb'], float('111.0')) + self.assertEqual(stats['free_capacity_gb'], float('11.0')) + self.assertEqual(stats['vendor_name'], 'Dell') + + def test_get_space_in_gb(self): + self.assertEqual(self.driver._get_space_in_gb('123.0GB'), 123.0) + self.assertEqual(self.driver._get_space_in_gb('123.0TB'), 123.0 * 1024) + self.assertEqual(self.driver._get_space_in_gb('1024.0MB'), 1.0) + + def test_get_output(self): + + def _fake_recv(ignore_arg): + return '%s> ' % self.configuration.eqlx_group_name + + chan = self.mox.CreateMock(paramiko.Channel) + self.stubs.Set(chan, "recv", _fake_recv) + self.assertEqual(self.driver._get_output(chan), [_fake_recv(None)]) + + def test_get_prefixed_value(self): + lines = ['Line1 passed', 'Line1 failed'] + prefix = ['Line1', 'Line2'] + expected_output = [' passed', None] + self.assertEqual(self.driver._get_prefixed_value(lines, prefix[0]), + expected_output[0]) + self.assertEqual(self.driver._get_prefixed_value(lines, prefix[1]), + expected_output[1]) + + def test_ssh_execute(self): + ssh = self.mox.CreateMock(paramiko.SSHClient) + chan = self.mox.CreateMock(paramiko.Channel) + transport = self.mox.CreateMock(paramiko.Transport) + self.mox.StubOutWithMock(self.driver, '_get_output') + self.mox.StubOutWithMock(chan, 'invoke_shell') + expected_output = ['NoError: test run'] + ssh.get_transport().AndReturn(transport) + transport.open_session().AndReturn(chan) + chan.invoke_shell() + self.driver._get_output(chan).AndReturn(expected_output) + cmd = 'this is dummy command' + chan.send('stty columns 255' + '\r') + self.driver._get_output(chan).AndReturn(expected_output) + chan.send(cmd + '\r') + self.driver._get_output(chan).AndReturn(expected_output) + chan.close() + self.mox.ReplayAll() + self.assertEqual(self.driver._ssh_execute(ssh, cmd), expected_output) + + def test_ssh_execute_error(self): + ssh = self.mox.CreateMock(paramiko.SSHClient) + chan = self.mox.CreateMock(paramiko.Channel) + transport = self.mox.CreateMock(paramiko.Transport) + self.mox.StubOutWithMock(self.driver, '_get_output') + self.mox.StubOutWithMock(ssh, 'get_transport') + self.mox.StubOutWithMock(chan, 'invoke_shell') + expected_output = ['Error: test run', '% Error'] + ssh.get_transport().AndReturn(transport) + transport.open_session().AndReturn(chan) + chan.invoke_shell() + self.driver._get_output(chan).AndReturn(expected_output) + cmd = 'this is dummy command' + chan.send('stty columns 255' + '\r') + self.driver._get_output(chan).AndReturn(expected_output) + chan.send(cmd + '\r') + self.driver._get_output(chan).AndReturn(expected_output) + chan.close() + self.mox.ReplayAll() + self.assertRaises(processutils.ProcessExecutionError, + self.driver._ssh_execute, ssh, cmd) + + def test_with_timeout(self): + @eqlx.with_timeout + def no_timeout(cmd, *args, **kwargs): + return 'no timeout' + + @eqlx.with_timeout + def w_timeout(cmd, *args, **kwargs): + time.sleep(1) + + self.assertEqual(no_timeout('fake cmd'), 'no timeout') + self.assertRaises(exception.VolumeBackendAPIException, + w_timeout, 'fake cmd', timeout=0.1) + + def test_local_path(self): + self.assertRaises(NotImplementedError, self.driver.local_path, '') diff --git a/cinder/tests/test_exception.py b/cinder/tests/test_exception.py index f87a71cef5..f4c5f48002 100644 --- a/cinder/tests/test_exception.py +++ b/cinder/tests/test_exception.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -16,9 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. -from cinder import test from cinder import exception -from cinder import utils +from cinder import test class FakeNotifier(object): @@ -56,36 +54,38 @@ class FakeCinderException(exception.CinderException): message = "default message" exc = FakeCinderException() - self.assertEquals(unicode(exc), 'default message') + self.assertEqual(unicode(exc), 'default message') def test_error_msg(self): - self.assertEquals(unicode(exception.CinderException('test')), - 'test') + self.assertEqual(unicode(exception.CinderException('test')), 'test') def test_default_error_msg_with_kwargs(self): class FakeCinderException(exception.CinderException): message = "default message: %(code)s" exc = FakeCinderException(code=500) - self.assertEquals(unicode(exc), 'default message: 500') + self.assertEqual(unicode(exc), 'default message: 500') def test_error_msg_exception_with_kwargs(self): + # NOTE(dprince): disable format errors for this test + self.flags(fatal_exception_format_errors=False) + class FakeCinderException(exception.CinderException): message = "default message: %(mispelled_code)s" exc = FakeCinderException(code=500) - self.assertEquals(unicode(exc), 'default message: %(mispelled_code)s') + self.assertEqual(unicode(exc), 'default message: %(mispelled_code)s') def test_default_error_code(self): class FakeCinderException(exception.CinderException): code = 404 exc = FakeCinderException() - self.assertEquals(exc.kwargs['code'], 404) + self.assertEqual(exc.kwargs['code'], 404) def test_error_code_from_kwarg(self): class FakeCinderException(exception.CinderException): code = 500 exc = FakeCinderException(code=404) - self.assertEquals(exc.kwargs['code'], 404) + self.assertEqual(exc.kwargs['code'], 404) diff --git a/cinder/tests/test_flags.py b/cinder/tests/test_flags.py deleted file mode 100644 index afb1eab17c..0000000000 --- a/cinder/tests/test_flags.py +++ /dev/null @@ -1,96 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2011 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from cinder import flags -from cinder.openstack.common import cfg -from cinder import test - -FLAGS = flags.FLAGS -FLAGS.register_opt(cfg.StrOpt('flags_unittest', - default='foo', - help='for testing purposes only')) - - -class FlagsTestCase(test.TestCase): - - def setUp(self): - super(FlagsTestCase, self).setUp() - - def test_declare(self): - self.assert_('answer' not in FLAGS) - flags.DECLARE('answer', 'cinder.tests.declare_flags') - self.assert_('answer' in FLAGS) - self.assertEqual(FLAGS.answer, 42) - - # Make sure we don't overwrite anything - FLAGS.set_override('answer', 256) - self.assertEqual(FLAGS.answer, 256) - flags.DECLARE('answer', 'cinder.tests.declare_flags') - self.assertEqual(FLAGS.answer, 256) - - def test_getopt_non_interspersed_args(self): - self.assert_('runtime_answer' not in FLAGS) - - argv = ['flags_test', 'extra_arg', '--runtime_answer=60'] - args = flags.parse_args(argv, default_config_files=[]) - self.assertEqual(len(args), 3) - self.assertEqual(argv, args) - - def test_runtime_and_unknown_flags(self): - self.assert_('runtime_answer' not in FLAGS) - import cinder.tests.runtime_flags - self.assert_('runtime_answer' in FLAGS) - self.assertEqual(FLAGS.runtime_answer, 54) - - def test_long_vs_short_flags(self): - FLAGS.clear() - FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long', - default='val', - help='desc')) - argv = ['flags_test', '--duplicate_answer=60', 'extra_arg'] - args = flags.parse_args(argv, default_config_files=[]) - - self.assert_('duplicate_answer' not in FLAGS) - self.assert_(FLAGS.duplicate_answer_long, 60) - - FLAGS.clear() - FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer', - default=60, - help='desc')) - args = flags.parse_args(argv, default_config_files=[]) - self.assertEqual(FLAGS.duplicate_answer, 60) - self.assertEqual(FLAGS.duplicate_answer_long, 'val') - - def test_flag_leak_left(self): - self.assertEqual(FLAGS.flags_unittest, 'foo') - self.flags(flags_unittest='bar') - self.assertEqual(FLAGS.flags_unittest, 'bar') - - def test_flag_leak_right(self): - self.assertEqual(FLAGS.flags_unittest, 'foo') - self.flags(flags_unittest='bar') - self.assertEqual(FLAGS.flags_unittest, 'bar') - - def test_flag_overrides(self): - self.assertEqual(FLAGS.flags_unittest, 'foo') - self.flags(flags_unittest='bar') - self.assertEqual(FLAGS.flags_unittest, 'bar') - FLAGS.reset() - self.assertEqual(FLAGS.flags_unittest, 'foo') diff --git a/cinder/tests/test_glusterfs.py b/cinder/tests/test_glusterfs.py new file mode 100644 index 0000000000..ed81dbb828 --- /dev/null +++ b/cinder/tests/test_glusterfs.py @@ -0,0 +1,1648 @@ + +# Copyright (c) 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit tests for the GlusterFS driver module.""" + +import errno +import os +import tempfile + +import mox as mox_lib +from mox import IgnoreArg +from mox import IsA +from mox import stubout + +from cinder import context +from cinder import db +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import imageutils +from cinder.openstack.common import processutils as putils +from cinder import test +from cinder import units +from cinder import utils +from cinder.volume import configuration as conf +from cinder.volume.drivers import glusterfs + + +class DumbVolume(object): + fields = {} + + def __setitem__(self, key, value): + self.fields[key] = value + + def __getitem__(self, item): + return self.fields[item] + + +class GlusterFsDriverTestCase(test.TestCase): + """Test case for GlusterFS driver.""" + + TEST_EXPORT1 = 'glusterfs-host1:/export' + TEST_EXPORT2 = 'glusterfs-host2:/export' + TEST_EXPORT2_OPTIONS = '-o backupvolfile-server=glusterfs-backup1' + TEST_SIZE_IN_GB = 1 + TEST_MNT_POINT = '/mnt/glusterfs' + TEST_MNT_POINT_BASE = '/mnt/test' + TEST_LOCAL_PATH = '/mnt/glusterfs/volume-123' + TEST_FILE_NAME = 'test.txt' + TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' + VOLUME_UUID = 'abcdefab-cdef-abcd-efab-cdefabcdefab' + SNAP_UUID = 'bacadaca-baca-daca-baca-dacadacadaca' + SNAP_UUID_2 = 'bebedede-bebe-dede-bebe-dedebebedede' + + def setUp(self): + super(GlusterFsDriverTestCase, self).setUp() + self._mox = mox_lib.Mox() + self._configuration = mox_lib.MockObject(conf.Configuration) + self._configuration.append_config_values(mox_lib.IgnoreArg()) + self._configuration.glusterfs_shares_config = \ + self.TEST_SHARES_CONFIG_FILE + self._configuration.glusterfs_mount_point_base = \ + self.TEST_MNT_POINT_BASE + self._configuration.glusterfs_disk_util = 'df' + self._configuration.glusterfs_sparsed_volumes = True + self._configuration.glusterfs_qcow2_volumes = False + + self.stubs = stubout.StubOutForTesting() + self._driver =\ + glusterfs.GlusterfsDriver(configuration=self._configuration) + self._driver.shares = {} + + def tearDown(self): + self._mox.UnsetStubs() + self.stubs.UnsetAll() + super(GlusterFsDriverTestCase, self).tearDown() + + def stub_out_not_replaying(self, obj, attr_name): + attr_to_replace = getattr(obj, attr_name) + stub = mox_lib.MockObject(attr_to_replace) + self.stubs.Set(obj, attr_name, stub) + + def test_local_path(self): + """local_path common use case.""" + glusterfs.CONF.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE + drv = self._driver + + volume = DumbVolume() + volume['provider_location'] = self.TEST_EXPORT1 + volume['name'] = 'volume-123' + + self.assertEqual( + '/mnt/test/ab03ab34eaca46a5fb81878f7e9b91fc/volume-123', + drv.local_path(volume)) + + def test_mount_glusterfs_should_mount_correctly(self): + """_mount_glusterfs common case usage.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute('mount', '-t', 'glusterfs', self.TEST_EXPORT1, + self.TEST_MNT_POINT, run_as_root=True) + + mox.ReplayAll() + + drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT) + + mox.VerifyAll() + + def test_mount_glusterfs_should_suppress_already_mounted_error(self): + """_mount_glusterfs should suppress already mounted error if + ensure=True + """ + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute('mount', '-t', 'glusterfs', self.TEST_EXPORT1, + self.TEST_MNT_POINT, run_as_root=True).\ + AndRaise(putils.ProcessExecutionError( + stderr='is busy or already mounted')) + + mox.ReplayAll() + + drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT, + ensure=True) + + mox.VerifyAll() + + def test_mount_glusterfs_should_reraise_already_mounted_error(self): + """_mount_glusterfs should not suppress already mounted error + if ensure=False + """ + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute( + 'mount', + '-t', + 'glusterfs', + self.TEST_EXPORT1, + self.TEST_MNT_POINT, + run_as_root=True). \ + AndRaise(putils.ProcessExecutionError(stderr='is busy or ' + 'already mounted')) + + mox.ReplayAll() + + self.assertRaises(putils.ProcessExecutionError, drv._mount_glusterfs, + self.TEST_EXPORT1, self.TEST_MNT_POINT, + ensure=False) + + mox.VerifyAll() + + def test_mount_glusterfs_should_create_mountpoint_if_not_yet(self): + """_mount_glusterfs should create mountpoint if it doesn't exist.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_execute') + drv._execute('mkdir', '-p', self.TEST_MNT_POINT) + drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg()) + + mox.ReplayAll() + + drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT) + + mox.VerifyAll() + + def test_get_hash_str(self): + """_get_hash_str should calculation correct value.""" + drv = self._driver + + self.assertEqual('ab03ab34eaca46a5fb81878f7e9b91fc', + drv._get_hash_str(self.TEST_EXPORT1)) + + def test_get_mount_point_for_share(self): + """_get_mount_point_for_share should calculate correct value.""" + drv = self._driver + + glusterfs.CONF.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE + + self.assertEqual('/mnt/test/ab03ab34eaca46a5fb81878f7e9b91fc', + drv._get_mount_point_for_share( + self.TEST_EXPORT1)) + + def test_get_available_capacity_with_df(self): + """_get_available_capacity should calculate correct value.""" + mox = self._mox + drv = self._driver + + df_total_size = 2620544 + df_avail = 1490560 + df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n' + df_data = 'glusterfs-host:/export %d 996864 %d 41%% /mnt' % \ + (df_total_size, df_avail) + df_output = df_head + df_data + + setattr(glusterfs.CONF, 'glusterfs_disk_util', 'df') + + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + drv._get_mount_point_for_share(self.TEST_EXPORT1).\ + AndReturn(self.TEST_MNT_POINT) + + mox.StubOutWithMock(drv, '_execute') + drv._execute('df', '--portability', '--block-size', '1', + self.TEST_MNT_POINT, + run_as_root=True).AndReturn((df_output, None)) + + mox.ReplayAll() + + self.assertEqual((df_avail, df_total_size), + drv._get_available_capacity(self.TEST_EXPORT1)) + + mox.VerifyAll() + + delattr(glusterfs.CONF, 'glusterfs_disk_util') + + def test_load_shares_config(self): + mox = self._mox + drv = self._driver + + drv.configuration.glusterfs_shares_config = ( + self.TEST_SHARES_CONFIG_FILE) + + mox.StubOutWithMock(drv, '_read_config_file') + config_data = [] + config_data.append(self.TEST_EXPORT1) + config_data.append('#' + self.TEST_EXPORT2) + config_data.append(self.TEST_EXPORT2 + ' ' + self.TEST_EXPORT2_OPTIONS) + config_data.append('broken:share_format') + config_data.append('') + drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\ + AndReturn(config_data) + mox.ReplayAll() + + drv._load_shares_config(drv.configuration.glusterfs_shares_config) + + self.assertIn(self.TEST_EXPORT1, drv.shares) + self.assertIn(self.TEST_EXPORT2, drv.shares) + self.assertEqual(len(drv.shares), 2) + + self.assertEqual(drv.shares[self.TEST_EXPORT2], + self.TEST_EXPORT2_OPTIONS) + + mox.VerifyAll() + + def test_ensure_share_mounted(self): + """_ensure_share_mounted simple use case.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(utils, 'get_file_mode') + mox.StubOutWithMock(utils, 'get_file_gid') + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_ensure_share_writable') + + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + drv._get_mount_point_for_share(self.TEST_EXPORT1).\ + AndReturn(self.TEST_MNT_POINT) + + mox.StubOutWithMock(drv, '_mount_glusterfs') + drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT, + ensure=True) + + utils.get_file_gid(self.TEST_MNT_POINT).AndReturn(333333) + + utils.get_file_mode(self.TEST_MNT_POINT).AndReturn(0o777) + + drv._ensure_share_writable(self.TEST_MNT_POINT) + + drv._execute('chgrp', IgnoreArg(), self.TEST_MNT_POINT, + run_as_root=True) + + mox.ReplayAll() + + drv._ensure_share_mounted(self.TEST_EXPORT1) + + mox.VerifyAll() + + def test_ensure_shares_mounted_should_save_mounting_successfully(self): + """_ensure_shares_mounted should save share if mounted with success.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_read_config_file') + config_data = [] + config_data.append(self.TEST_EXPORT1) + drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\ + AndReturn(config_data) + + mox.StubOutWithMock(drv, '_ensure_share_mounted') + drv._ensure_share_mounted(self.TEST_EXPORT1) + + mox.ReplayAll() + + drv._ensure_shares_mounted() + + self.assertEqual(1, len(drv._mounted_shares)) + self.assertEqual(self.TEST_EXPORT1, drv._mounted_shares[0]) + + mox.VerifyAll() + + def test_ensure_shares_mounted_should_not_save_mounting_with_error(self): + """_ensure_shares_mounted should not save share if failed to mount.""" + mox = self._mox + drv = self._driver + + mox.StubOutWithMock(drv, '_read_config_file') + config_data = [] + config_data.append(self.TEST_EXPORT1) + drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\ + AndReturn(config_data) + + mox.StubOutWithMock(drv, '_ensure_share_mounted') + drv._ensure_share_mounted(self.TEST_EXPORT1).AndRaise(Exception()) + + mox.ReplayAll() + + drv._ensure_shares_mounted() + + self.assertEqual(0, len(drv._mounted_shares)) + + mox.VerifyAll() + + def test_setup_should_throw_error_if_shares_config_not_configured(self): + """do_setup should throw error if shares config is not configured.""" + drv = self._driver + + glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE + + self.assertRaises(exception.GlusterfsException, + drv.do_setup, IsA(context.RequestContext)) + + def test_setup_should_throw_exception_if_client_is_not_installed(self): + """do_setup should throw exception if client is not installed.""" + mox = self._mox + drv = self._driver + + glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE + + mox.StubOutWithMock(os.path, 'exists') + os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True) + mox.StubOutWithMock(drv, '_execute') + drv._execute('mount.glusterfs', check_exit_code=False).\ + AndRaise(OSError(errno.ENOENT, 'No such file or directory')) + + mox.ReplayAll() + + self.assertRaises(exception.GlusterfsException, + drv.do_setup, IsA(context.RequestContext)) + + mox.VerifyAll() + + def _fake_load_shares_config(self, conf): + self._driver.shares = {'127.7.7.7:/gluster1': None} + + def _fake_NamedTemporaryFile(self, prefix=None, dir=None): + raise OSError('Permission denied!') + + def test_setup_set_share_permissions(self): + mox = self._mox + drv = self._driver + + glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE + + self.stubs.Set(drv, '_load_shares_config', + self._fake_load_shares_config) + self.stubs.Set(tempfile, 'NamedTemporaryFile', + self._fake_NamedTemporaryFile) + mox.StubOutWithMock(os.path, 'exists') + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(utils, 'get_file_gid') + mox.StubOutWithMock(utils, 'get_file_mode') + mox.StubOutWithMock(os, 'getegid') + + drv._execute('mount.glusterfs', check_exit_code=False) + + drv._execute('mkdir', '-p', mox_lib.IgnoreArg()) + + os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True) + + drv._execute('mount', '-t', 'glusterfs', '127.7.7.7:/gluster1', + mox_lib.IgnoreArg(), run_as_root=True) + + utils.get_file_gid(mox_lib.IgnoreArg()).AndReturn(33333) + # perms not writable + utils.get_file_mode(mox_lib.IgnoreArg()).AndReturn(0o000) + + os.getegid().AndReturn(888) + + drv._execute('chgrp', 888, mox_lib.IgnoreArg(), run_as_root=True) + drv._execute('chmod', 'g+w', mox_lib.IgnoreArg(), run_as_root=True) + + mox.ReplayAll() + + drv.do_setup(IsA(context.RequestContext)) + + mox.VerifyAll() + + def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self): + """_find_share should throw error if there is no mounted shares.""" + drv = self._driver + + drv._mounted_shares = [] + + self.assertRaises(exception.GlusterfsNoSharesMounted, + drv._find_share, + self.TEST_SIZE_IN_GB) + + def test_find_share(self): + """_find_share simple use case.""" + mox = self._mox + drv = self._driver + + drv._mounted_shares = [self.TEST_EXPORT1, self.TEST_EXPORT2] + + mox.StubOutWithMock(drv, '_get_available_capacity') + drv._get_available_capacity(self.TEST_EXPORT1).\ + AndReturn((2 * units.GiB, 5 * units.GiB)) + drv._get_available_capacity(self.TEST_EXPORT2).\ + AndReturn((3 * units.GiB, 10 * units.GiB)) + + mox.ReplayAll() + + self.assertEqual(self.TEST_EXPORT2, + drv._find_share(self.TEST_SIZE_IN_GB)) + + mox.VerifyAll() + + def test_find_share_should_throw_error_if_there_is_no_enough_place(self): + """_find_share should throw error if there is no share to host vol.""" + mox = self._mox + drv = self._driver + + drv._mounted_shares = [self.TEST_EXPORT1, + self.TEST_EXPORT2] + + mox.StubOutWithMock(drv, '_get_available_capacity') + drv._get_available_capacity(self.TEST_EXPORT1).\ + AndReturn((0, 5 * units.GiB)) + drv._get_available_capacity(self.TEST_EXPORT2).\ + AndReturn((0, 10 * units.GiB)) + + mox.ReplayAll() + + self.assertRaises(exception.GlusterfsNoSuitableShareFound, + drv._find_share, + self.TEST_SIZE_IN_GB) + + mox.VerifyAll() + + def _simple_volume(self, id=None): + volume = DumbVolume() + volume['provider_location'] = self.TEST_EXPORT1 + if id is None: + volume['id'] = self.VOLUME_UUID + else: + volume['id'] = id + # volume['name'] mirrors format from db/sqlalchemy/models.py + volume['name'] = 'volume-%s' % volume['id'] + volume['size'] = 10 + volume['status'] = 'available' + + return volume + + def test_create_sparsed_volume(self): + mox = self._mox + drv = self._driver + volume = self._simple_volume() + + setattr(glusterfs.CONF, 'glusterfs_sparsed_volumes', True) + + mox.StubOutWithMock(drv, '_create_sparsed_file') + mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') + + drv._create_sparsed_file(IgnoreArg(), IgnoreArg()) + drv._set_rw_permissions_for_all(IgnoreArg()) + + mox.ReplayAll() + + drv._do_create_volume(volume) + + mox.VerifyAll() + + delattr(glusterfs.CONF, 'glusterfs_sparsed_volumes') + + def test_create_nonsparsed_volume(self): + mox = self._mox + drv = self._driver + volume = self._simple_volume() + + old_value = self._configuration.glusterfs_sparsed_volumes + self._configuration.glusterfs_sparsed_volumes = False + + mox.StubOutWithMock(drv, '_create_regular_file') + mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') + + drv._create_regular_file(IgnoreArg(), IgnoreArg()) + drv._set_rw_permissions_for_all(IgnoreArg()) + + mox.ReplayAll() + + drv._do_create_volume(volume) + + mox.VerifyAll() + + self._configuration.glusterfs_sparsed_volumes = old_value + + def test_create_qcow2_volume(self): + (mox, drv) = self._mox, self._driver + + volume = self._simple_volume() + old_value = self._configuration.glusterfs_qcow2_volumes + self._configuration.glusterfs_qcow2_volumes = True + + mox.StubOutWithMock(drv, '_execute') + + hashed = drv._get_hash_str(volume['provider_location']) + path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE, + hashed, + self.VOLUME_UUID) + + drv._execute('qemu-img', 'create', '-f', 'qcow2', + '-o', 'preallocation=metadata', path, + str(volume['size'] * units.GiB), + run_as_root=True) + + drv._execute('chmod', 'ugo+rw', path, run_as_root=True) + + mox.ReplayAll() + + drv._do_create_volume(volume) + + mox.VerifyAll() + + self._configuration.glusterfs_qcow2_volumes = old_value + + def test_create_volume_should_ensure_glusterfs_mounted(self): + """create_volume ensures shares provided in config are mounted.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(glusterfs, 'LOG') + self.stub_out_not_replaying(drv, '_find_share') + self.stub_out_not_replaying(drv, '_do_create_volume') + + mox.StubOutWithMock(drv, '_ensure_shares_mounted') + drv._ensure_shares_mounted() + + mox.ReplayAll() + + volume = DumbVolume() + volume['size'] = self.TEST_SIZE_IN_GB + drv.create_volume(volume) + + mox.VerifyAll() + + def test_create_volume_should_return_provider_location(self): + """create_volume should return provider_location with found share.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(glusterfs, 'LOG') + self.stub_out_not_replaying(drv, '_ensure_shares_mounted') + self.stub_out_not_replaying(drv, '_do_create_volume') + + mox.StubOutWithMock(drv, '_find_share') + drv._find_share(self.TEST_SIZE_IN_GB).AndReturn(self.TEST_EXPORT1) + + mox.ReplayAll() + + volume = DumbVolume() + volume['size'] = self.TEST_SIZE_IN_GB + result = drv.create_volume(volume) + self.assertEqual(self.TEST_EXPORT1, result['provider_location']) + + mox.VerifyAll() + + def test_create_cloned_volume(self): + (mox, drv) = self._mox, self._driver + + mox.StubOutWithMock(drv, '_create_snapshot') + mox.StubOutWithMock(drv, '_delete_snapshot') + mox.StubOutWithMock(drv, '_read_info_file') + mox.StubOutWithMock(image_utils, 'convert_image') + mox.StubOutWithMock(drv, '_copy_volume_from_snapshot') + + volume_file = 'volume-%s' % self.VOLUME_UUID + volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, + drv._get_hash_str(self.TEST_EXPORT1), + volume_file) + + volume = self._simple_volume() + src_vref = self._simple_volume() + src_vref['id'] = '375e32b2-804a-49f2-b282-85d1d5a5b9e1' + src_vref['name'] = 'volume-%s' % src_vref['id'] + volume_file = 'volume-%s' % src_vref['id'] + volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, + drv._get_hash_str(self.TEST_EXPORT1), + volume_file) + src_info_path = '%s.info' % volume_path + volume_ref = {'id': volume['id'], + 'name': volume['name'], + 'status': volume['status'], + 'provider_location': volume['provider_location'], + 'size': volume['size']} + + snap_ref = {'volume_name': src_vref['name'], + 'name': 'clone-snap-%s' % src_vref['id'], + 'size': src_vref['size'], + 'volume_size': src_vref['size'], + 'volume_id': src_vref['id'], + 'id': 'tmp-snap-%s' % src_vref['id'], + 'volume': src_vref} + + drv._create_snapshot(snap_ref) + + snap_info = {'active': volume_file, + snap_ref['id']: volume_path + '-clone'} + + drv._read_info_file(src_info_path).AndReturn(snap_info) + + drv._copy_volume_from_snapshot(snap_ref, volume_ref, volume['size']) + + drv._delete_snapshot(mox_lib.IgnoreArg()) + + mox.ReplayAll() + + drv.create_cloned_volume(volume, src_vref) + + def test_delete_volume(self): + """delete_volume simple test case.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(drv, '_ensure_share_mounted') + + volume = DumbVolume() + volume['name'] = 'volume-123' + volume['provider_location'] = self.TEST_EXPORT1 + + mox.StubOutWithMock(drv, 'local_path') + drv.local_path(volume).AndReturn(self.TEST_LOCAL_PATH) + + mox.StubOutWithMock(drv, '_execute') + drv._execute('rm', '-f', self.TEST_LOCAL_PATH, run_as_root=True) + + mox.ReplayAll() + + drv.delete_volume(volume) + + mox.VerifyAll() + + def test_delete_should_ensure_share_mounted(self): + """delete_volume should ensure that corresponding share is mounted.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(drv, '_execute') + + volume = DumbVolume() + volume['name'] = 'volume-123' + volume['provider_location'] = self.TEST_EXPORT1 + + mox.StubOutWithMock(drv, '_ensure_share_mounted') + drv._ensure_share_mounted(self.TEST_EXPORT1) + + mox.ReplayAll() + + drv.delete_volume(volume) + + mox.VerifyAll() + + def test_delete_should_not_delete_if_provider_location_not_provided(self): + """delete_volume shouldn't delete if provider_location missed.""" + mox = self._mox + drv = self._driver + + self.stub_out_not_replaying(drv, '_ensure_share_mounted') + + volume = DumbVolume() + volume['name'] = 'volume-123' + volume['provider_location'] = None + + mox.StubOutWithMock(drv, '_execute') + + mox.ReplayAll() + + drv.delete_volume(volume) + + mox.VerifyAll() + + def test_create_snapshot(self): + (mox, drv) = self._mox, self._driver + + self.stub_out_not_replaying(drv, '_ensure_share_mounted') + mox.StubOutWithMock(drv, '_create_qcow2_snap_file') + mox.StubOutWithMock(drv, '_read_info_file') + mox.StubOutWithMock(drv, '_write_info_file') + + volume = self._simple_volume() + snap_ref = {'name': 'test snap', + 'volume_id': self.VOLUME_UUID, + 'volume': volume, + 'id': self.SNAP_UUID} + + mox.StubOutWithMock(drv, '_execute') + + vol_filename = 'volume-%s' % self.VOLUME_UUID + snap_filename = '%s.%s' % (vol_filename, self.SNAP_UUID) + + hashed = drv._get_hash_str(self.TEST_EXPORT1) + vol_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, + hashed, + vol_filename) + snap_path = '%s.%s' % (vol_path, self.SNAP_UUID) + info_path = '%s%s' % (vol_path, '.info') + + info_dict = {'active': vol_filename} + drv._read_info_file(info_path, empty_if_missing=True).\ + AndReturn(info_dict) + + drv._create_qcow2_snap_file(snap_ref, vol_filename, snap_path) + + qemu_img_info_output = ("""image: volume-%s + file format: raw + virtual size: 1.0G (1073741824 bytes) + disk size: 152K + """ % self.VOLUME_UUID, '') + + drv._read_info_file(info_path, empty_if_missing=True).\ + AndReturn(info_dict) + + # SNAP_UUID_2 has been removed from dict. + info_file_dict = {'active': 'volume-%s.%s' % + (self.VOLUME_UUID, self.SNAP_UUID), + self.SNAP_UUID: 'volume-%s.%s' % + (self.VOLUME_UUID, self.SNAP_UUID)} + + drv._write_info_file(info_path, info_file_dict) + + mox.ReplayAll() + + drv.create_snapshot(snap_ref) + + mox.VerifyAll() + + def test_delete_snapshot_bottom(self): + """Multiple snapshots exist. + + In this test, path (volume-) is backed by + snap_path (volume-.) which is backed by + snap_path_2 (volume-.). + + Delete the snapshot identified by SNAP_UUID_2. + + Chain goes from + (SNAP_UUID) (SNAP_UUID_2) + volume-abc -> volume-abc.baca -> volume-abc.bebe + to + (SNAP_UUID) + volume-abc -> volume-abc.baca + """ + (mox, drv) = self._mox, self._driver + + hashed = drv._get_hash_str(self.TEST_EXPORT1) + volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed) + volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE, + hashed, + self.VOLUME_UUID) + volume_filename = 'volume-%s' % self.VOLUME_UUID + + snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) + snap_path_2 = '%s.%s' % (volume_path, self.SNAP_UUID_2) + snap_file = '%s.%s' % (volume_filename, self.SNAP_UUID) + snap_file_2 = '%s.%s' % (volume_filename, self.SNAP_UUID_2) + info_path = '%s%s' % (volume_path, '.info') + + qemu_img_info_output = """image: volume-%s.%s + file format: qcow2 + virtual size: 1.0G (1073741824 bytes) + disk size: 173K + backing file: %s + """ % (self.VOLUME_UUID, self.SNAP_UUID, volume_filename) + + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_read_file') + mox.StubOutWithMock(drv, '_read_info_file') + mox.StubOutWithMock(drv, '_get_backing_chain_for_path') + mox.StubOutWithMock(drv, '_get_matching_backing_file') + mox.StubOutWithMock(drv, '_write_info_file') + mox.StubOutWithMock(drv, '_ensure_share_writable') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + + drv._ensure_share_writable(volume_dir) + + img_info = imageutils.QemuImgInfo(qemu_img_info_output) + image_utils.qemu_img_info(snap_path_2).AndReturn(img_info) + + info_file_dict = {'active': snap_file_2, + self.SNAP_UUID_2: snap_file_2, + self.SNAP_UUID: snap_file} + + snap_ref = {'name': 'test snap', + 'volume_id': self.VOLUME_UUID, + 'volume': self._simple_volume(), + 'id': self.SNAP_UUID_2} + + snap_path_2_chain = [{self.SNAP_UUID_2: snap_file_2}, + {self.SNAP_UUID: snap_file}, + {'active': snap_file_2}] + + snap_path_chain = [{self.SNAP_UUID: snap_file}, + {'active': snap_file}] + + drv._read_info_file(info_path, empty_if_missing=True).\ + AndReturn(info_file_dict) + + drv._execute('qemu-img', 'commit', snap_path_2, run_as_root=True) + + drv._execute('rm', '-f', snap_path_2, run_as_root=True) + + drv._read_info_file(info_path, empty_if_missing=True).\ + AndReturn(info_file_dict) + + drv._read_info_file(info_path).AndReturn(info_file_dict) + + drv._write_info_file(info_path, info_file_dict) + + mox.ReplayAll() + + drv.delete_snapshot(snap_ref) + + mox.VerifyAll() + + def test_delete_snapshot_middle(self): + """Multiple snapshots exist. + + In this test, path (volume-) is backed by + snap_path (volume-.) which is backed by + snap_path_2 (volume-.). + + Delete the snapshot identified with SNAP_UUID. + + Chain goes from + (SNAP_UUID) (SNAP_UUID_2) + volume-abc -> volume-abc.baca -> volume-abc.bebe + to (SNAP_UUID_2) + volume-abc -> volume-abc.bebe + """ + (mox, drv) = self._mox, self._driver + + volume = self._simple_volume() + + hashed = drv._get_hash_str(self.TEST_EXPORT1) + volume_file = 'volume-%s' % self.VOLUME_UUID + volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed) + volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, + hashed, + volume_file) + + info_path = '%s%s' % (volume_path, '.info') + snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) + snap_file = 'volume-%s.%s' % (self.VOLUME_UUID, self.SNAP_UUID) + snap_path_2 = '%s.%s' % (volume_path, self.SNAP_UUID_2) + snap_file_2 = 'volume-%s.%s' % (self.VOLUME_UUID, self.SNAP_UUID_2) + + qemu_img_info_output_snap_2 = """image: volume-%s.%s + file format: qcow2 + virtual size: 1.0G (1073741824 bytes) + disk size: 173K + backing file: %s + """ % (self.VOLUME_UUID, self.SNAP_UUID_2, + 'volume-%s.%s' % (self.VOLUME_UUID, self.SNAP_UUID_2)) + + qemu_img_info_output_snap_1 = """image: volume-%s.%s + file format: qcow2 + virtual size: 1.0G (1073741824 bytes) + disk size: 122K + backing file: %s + """ % (self.VOLUME_UUID, self.SNAP_UUID, + 'volume-%s.%s' % (self.VOLUME_UUID, self.SNAP_UUID)) + + qemu_img_info_output = """image: volume-%s + file format: qcow2 + virtual size: 1.0G (1073741824 bytes) + disk size: 175K + """ % self.VOLUME_UUID + + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_read_info_file') + mox.StubOutWithMock(drv, '_write_info_file') + mox.StubOutWithMock(drv, '_get_backing_chain_for_path') + mox.StubOutWithMock(drv, 'get_active_image_from_info') + mox.StubOutWithMock(drv, '_ensure_share_writable') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + + info_file_dict = {self.SNAP_UUID_2: 'volume-%s.%s' % + (self.VOLUME_UUID, self.SNAP_UUID_2), + self.SNAP_UUID: 'volume-%s.%s' % + (self.VOLUME_UUID, self.SNAP_UUID)} + + drv._ensure_share_writable(volume_dir) + + info_path = drv._local_path_volume(volume) + '.info' + drv._read_info_file(info_path, empty_if_missing=True).\ + AndReturn(info_file_dict) + + img_info = imageutils.QemuImgInfo(qemu_img_info_output_snap_1) + image_utils.qemu_img_info(snap_path).AndReturn(img_info) + + snap_ref = {'name': 'test snap', + 'volume_id': self.VOLUME_UUID, + 'volume': volume, + 'id': self.SNAP_UUID} + + snap_path_chain = [{'filename': snap_file_2, + 'backing-filename': snap_file}, + {'filename': snap_file, + 'backing-filename': volume_file}] + + drv.get_active_image_from_info(volume).AndReturn(snap_file_2) + drv._get_backing_chain_for_path(volume, snap_path_2).\ + AndReturn(snap_path_chain) + + drv._read_info_file(info_path).AndReturn(info_file_dict) + + drv._execute('qemu-img', 'commit', snap_path_2, run_as_root=True) + + drv._execute('rm', '-f', snap_path_2, run_as_root=True) + + drv._read_info_file(info_path).AndReturn(info_file_dict) + + drv._write_info_file(info_path, info_file_dict) + + mox.ReplayAll() + + drv.delete_snapshot(snap_ref) + + mox.VerifyAll() + + def test_delete_snapshot_not_in_info(self): + """Snapshot not in info file / info file doesn't exist. + + Snapshot creation failed so nothing is on-disk. Driver + should allow operation to succeed so the manager can + remove the snapshot record. + + (Scenario: Snapshot object created in Cinder db but not + on backing storage.) + + """ + (mox, drv) = self._mox, self._driver + + hashed = drv._get_hash_str(self.TEST_EXPORT1) + volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed) + volume_filename = 'volume-%s' % self.VOLUME_UUID + volume_path = os.path.join(volume_dir, volume_filename) + info_path = '%s%s' % (volume_path, '.info') + + mox.StubOutWithMock(drv, '_read_file') + mox.StubOutWithMock(drv, '_read_info_file') + mox.StubOutWithMock(drv, '_ensure_share_writable') + + snap_ref = {'name': 'test snap', + 'volume_id': self.VOLUME_UUID, + 'volume': self._simple_volume(), + 'id': self.SNAP_UUID_2} + + drv._ensure_share_writable(volume_dir) + + drv._read_info_file(info_path, empty_if_missing=True).AndReturn({}) + + mox.ReplayAll() + + drv.delete_snapshot(snap_ref) + + mox.VerifyAll() + + def test_read_info_file(self): + (mox, drv) = self._mox, self._driver + + mox.StubOutWithMock(drv, '_read_file') + hashed = drv._get_hash_str(self.TEST_EXPORT1) + volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE, + hashed, + self.VOLUME_UUID) + info_path = '%s%s' % (volume_path, '.info') + + drv._read_file(info_path).AndReturn('{"%(id)s": "volume-%(id)s"}' % + {'id': self.VOLUME_UUID}) + + mox.ReplayAll() + + volume = DumbVolume() + volume['id'] = self.VOLUME_UUID + volume['name'] = 'volume-%s' % self.VOLUME_UUID + + info = drv._read_info_file(info_path) + + self.assertEqual(info[self.VOLUME_UUID], + 'volume-%s' % self.VOLUME_UUID) + + mox.VerifyAll() + + def test_extend_volume(self): + (mox, drv) = self._mox, self._driver + + volume = self._simple_volume() + + volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE, + drv._get_hash_str( + self.TEST_EXPORT1), + self.VOLUME_UUID) + + qemu_img_info_output = """image: volume-%s + file format: qcow2 + virtual size: 1.0G (1073741824 bytes) + disk size: 473K + """ % self.VOLUME_UUID + + img_info = imageutils.QemuImgInfo(qemu_img_info_output) + + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, 'get_active_image_from_info') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + mox.StubOutWithMock(image_utils, 'resize_image') + + drv.get_active_image_from_info(volume).AndReturn(volume['name']) + + image_utils.qemu_img_info(volume_path).AndReturn(img_info) + + image_utils.resize_image(volume_path, 3) + + mox.ReplayAll() + + drv.extend_volume(volume, 3) + + mox.VerifyAll() + + def test_create_snapshot_online(self): + (mox, drv) = self._mox, self._driver + + volume = self._simple_volume() + volume['status'] = 'in-use' + + hashed = drv._get_hash_str(self.TEST_EXPORT1) + volume_file = 'volume-%s' % self.VOLUME_UUID + volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, + hashed, + volume_file) + info_path = '%s.info' % volume_path + + ctxt = context.RequestContext('fake_user', 'fake_project') + + snap_ref = {'name': 'test snap (online)', + 'volume_id': self.VOLUME_UUID, + 'volume': volume, + 'id': self.SNAP_UUID, + 'context': ctxt, + 'status': 'asdf', + 'progress': 'asdf'} + + snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) + snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) + + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_create_qcow2_snap_file') + mox.StubOutWithMock(db, 'snapshot_get') + mox.StubOutWithMock(drv, '_write_info_file') + mox.StubOutWithMock(drv, '_nova') + + drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path) + + create_info = {'snapshot_id': snap_ref['id'], + 'type': 'qcow2', + 'new_file': snap_file} + + drv._nova.create_volume_snapshot(ctxt, self.VOLUME_UUID, create_info) + + snap_ref['status'] = 'creating' + snap_ref['progress'] = '0%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_ref['progress'] = '50%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_ref['progress'] = '90%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_info = {'active': snap_file, + self.SNAP_UUID: snap_file} + + drv._write_info_file(info_path, snap_info) + + mox.ReplayAll() + + drv.create_snapshot(snap_ref) + + def test_create_snapshot_online_novafailure(self): + (mox, drv) = self._mox, self._driver + + volume = self._simple_volume() + volume['status'] = 'in-use' + + hashed = drv._get_hash_str(self.TEST_EXPORT1) + volume_file = 'volume-%s' % self.VOLUME_UUID + volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, + hashed, + volume_file) + info_path = '%s.info' % volume_path + + ctxt = context.RequestContext('fake_user', 'fake_project') + + snap_ref = {'name': 'test snap (online)', + 'volume_id': self.VOLUME_UUID, + 'volume': volume, + 'id': self.SNAP_UUID, + 'context': ctxt} + + snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) + snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) + + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_create_qcow2_snap_file') + mox.StubOutWithMock(drv, '_nova') + mox.StubOutWithMock(db, 'snapshot_get') + mox.StubOutWithMock(drv, '_write_info_file') + + drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path) + + create_info = {'snapshot_id': snap_ref['id'], + 'type': 'qcow2', + 'new_file': snap_file} + + drv._nova.create_volume_snapshot(ctxt, self.VOLUME_UUID, create_info) + + snap_ref['status'] = 'creating' + snap_ref['progress'] = '0%' + + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_ref['progress'] = '50%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_ref['progress'] = '99%' + snap_ref['status'] = 'error' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_info = {'active': snap_file, + self.SNAP_UUID: snap_file} + + drv._write_info_file(info_path, snap_info) + + mox.ReplayAll() + + self.assertRaises(exception.GlusterfsException, + drv.create_snapshot, + snap_ref) + + def test_delete_snapshot_online_1(self): + """Delete the newest snapshot, with only one snap present.""" + (mox, drv) = self._mox, self._driver + + volume = self._simple_volume() + volume['status'] = 'in-use' + + ctxt = context.RequestContext('fake_user', 'fake_project') + + snap_ref = {'name': 'test snap to delete (online)', + 'volume_id': self.VOLUME_UUID, + 'volume': volume, + 'id': self.SNAP_UUID, + 'context': ctxt} + + hashed = drv._get_hash_str(self.TEST_EXPORT1) + volume_file = 'volume-%s' % self.VOLUME_UUID + volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed) + volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, + hashed, + volume_file) + info_path = '%s.info' % volume_path + + snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) + snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) + + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_nova') + mox.StubOutWithMock(drv, '_read_info_file') + mox.StubOutWithMock(drv, '_write_info_file') + mox.StubOutWithMock(os.path, 'exists') + mox.StubOutWithMock(db, 'snapshot_get') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + mox.StubOutWithMock(drv, '_ensure_share_writable') + + snap_info = {'active': snap_file, + self.SNAP_UUID: snap_file} + + drv._ensure_share_writable(volume_dir) + + drv._read_info_file(info_path, empty_if_missing=True).\ + AndReturn(snap_info) + + os.path.exists(snap_path).AndReturn(True) + + qemu_img_info_output = """image: %s + file format: qcow2 + virtual size: 1.0G (1073741824 bytes) + disk size: 173K + backing file: %s + """ % (snap_file, volume_file) + img_info = imageutils.QemuImgInfo(qemu_img_info_output) + + vol_qemu_img_info_output = """image: %s + file format: raw + virtual size: 1.0G (1073741824 bytes) + disk size: 173K + """ % volume_file + volume_img_info = imageutils.QemuImgInfo(vol_qemu_img_info_output) + + image_utils.qemu_img_info(snap_path).AndReturn(img_info) + + image_utils.qemu_img_info(volume_path).AndReturn(volume_img_info) + + drv._read_info_file(info_path, empty_if_missing=True).\ + AndReturn(snap_info) + + delete_info = { + 'type': 'qcow2', + 'merge_target_file': None, + 'file_to_merge': None, + 'volume_id': self.VOLUME_UUID + } + + drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info) + + drv._read_info_file(info_path).AndReturn(snap_info) + + drv._read_info_file(info_path).AndReturn(snap_info) + + snap_ref['status'] = 'deleting' + snap_ref['progress'] = '0%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_ref['progress'] = '50%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_ref['progress'] = '90%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + drv._write_info_file(info_path, snap_info) + + drv._execute('rm', '-f', volume_path, run_as_root=True) + + mox.ReplayAll() + + drv.delete_snapshot(snap_ref) + + def test_delete_snapshot_online_2(self): + """Delete the middle of 3 snapshots.""" + (mox, drv) = self._mox, self._driver + + volume = self._simple_volume() + volume['status'] = 'in-use' + + ctxt = context.RequestContext('fake_user', 'fake_project') + + snap_ref = {'name': 'test snap to delete (online)', + 'volume_id': self.VOLUME_UUID, + 'volume': volume, + 'id': self.SNAP_UUID, + 'context': ctxt} + + hashed = drv._get_hash_str(self.TEST_EXPORT1) + volume_file = 'volume-%s' % self.VOLUME_UUID + volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed) + volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, + hashed, + volume_file) + info_path = '%s.info' % volume_path + + snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) + snap_path_2 = '%s.%s' % (volume_path, self.SNAP_UUID_2) + snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) + snap_file_2 = '%s.%s' % (volume_file, self.SNAP_UUID_2) + + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_nova') + mox.StubOutWithMock(drv, '_read_info_file') + mox.StubOutWithMock(drv, '_write_info_file') + mox.StubOutWithMock(os.path, 'exists') + mox.StubOutWithMock(db, 'snapshot_get') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + mox.StubOutWithMock(drv, '_ensure_share_writable') + + snap_info = {'active': snap_file_2, + self.SNAP_UUID: snap_file, + self.SNAP_UUID_2: snap_file_2} + + drv._ensure_share_writable(volume_dir) + + drv._read_info_file(info_path, empty_if_missing=True).\ + AndReturn(snap_info) + + os.path.exists(snap_path).AndReturn(True) + + qemu_img_info_output = """image: %s + file format: qcow2 + virtual size: 1.0G (1073741824 bytes) + disk size: 173K + backing file: %s + """ % (snap_file, volume_file) + img_info = imageutils.QemuImgInfo(qemu_img_info_output) + + vol_qemu_img_info_output = """image: %s + file format: raw + virtual size: 1.0G (1073741824 bytes) + disk size: 173K + """ % volume_file + volume_img_info = imageutils.QemuImgInfo(vol_qemu_img_info_output) + + image_utils.qemu_img_info(snap_path).AndReturn(img_info) + + image_utils.qemu_img_info(volume_path).AndReturn(volume_img_info) + + drv._read_info_file(info_path, empty_if_missing=True).\ + AndReturn(snap_info) + + delete_info = {'type': 'qcow2', + 'merge_target_file': volume_file, + 'file_to_merge': snap_file, + 'volume_id': self.VOLUME_UUID} + drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info) + + drv._read_info_file(info_path).AndReturn(snap_info) + + drv._read_info_file(info_path).AndReturn(snap_info) + + snap_ref['status'] = 'deleting' + snap_ref['progress'] = '0%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_ref['progress'] = '50%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_ref['progress'] = '90%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + drv._write_info_file(info_path, snap_info) + + drv._execute('rm', '-f', snap_path, run_as_root=True) + + mox.ReplayAll() + + drv.delete_snapshot(snap_ref) + + def test_delete_snapshot_online_novafailure(self): + """Delete the newest snapshot.""" + (mox, drv) = self._mox, self._driver + + volume = self._simple_volume() + volume['status'] = 'in-use' + + ctxt = context.RequestContext('fake_user', 'fake_project') + + snap_ref = {'name': 'test snap to delete (online)', + 'volume_id': self.VOLUME_UUID, + 'volume': volume, + 'id': self.SNAP_UUID, + 'context': ctxt} + + hashed = drv._get_hash_str(self.TEST_EXPORT1) + volume_file = 'volume-%s' % self.VOLUME_UUID + volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE, + hashed, + volume_file) + info_path = '%s.info' % volume_path + + snap_path = '%s.%s' % (volume_path, self.SNAP_UUID) + snap_file = '%s.%s' % (volume_file, self.SNAP_UUID) + + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_nova') + mox.StubOutWithMock(drv, '_read_info_file') + mox.StubOutWithMock(drv, '_write_info_file') + mox.StubOutWithMock(os.path, 'exists') + mox.StubOutWithMock(db, 'snapshot_get') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + + snap_info = {'active': snap_file, + self.SNAP_UUID: snap_file} + + drv._read_info_file(info_path, empty_if_missing=True).\ + AndReturn(snap_info) + + os.path.exists(snap_path).AndReturn(True) + + qemu_img_info_output = """image: %s + file format: qcow2 + virtual size: 1.0G (1073741824 bytes) + disk size: 173K + backing file: %s + """ % (snap_file, volume_file) + img_info = imageutils.QemuImgInfo(qemu_img_info_output) + + image_utils.qemu_img_info(snap_path).AndReturn(img_info) + + drv._read_info_file(info_path, empty_if_missing=True).\ + AndReturn(snap_info) + + delete_info = { + 'type': 'qcow2', + 'merge_target_file': None, + 'file_to_merge': volume_file, + 'volume_id': self.VOLUME_UUID + } + + drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info) + + drv._read_info_file(info_path).AndReturn(snap_info) + + drv._read_info_file(info_path).AndReturn(snap_info) + + snap_ref['status'] = 'deleting' + snap_ref['progress'] = '0%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_ref['progress'] = '50%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + snap_ref['status'] = 'error_deleting' + snap_ref['progress'] = '90%' + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref) + + drv._write_info_file(info_path, snap_info) + + drv._execute('rm', '-f', volume_path, run_as_root=True) + + mox.ReplayAll() + + self.assertRaises(exception.GlusterfsException, + drv.delete_snapshot, + snap_ref) + + def test_get_backing_chain_for_path(self): + (mox, drv) = self._mox, self._driver + + glusterfs.CONF.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE + + volume = self._simple_volume() + vol_filename = volume['name'] + vol_filename_2 = volume['name'] + '.asdfjkl' + vol_filename_3 = volume['name'] + 'qwertyuiop' + hashed = drv._get_hash_str(self.TEST_EXPORT1) + vol_dir = '%s/%s' % (self.TEST_MNT_POINT_BASE, hashed) + vol_path = '%s/%s' % (vol_dir, vol_filename) + vol_path_2 = '%s/%s' % (vol_dir, vol_filename_2) + vol_path_3 = '%s/%s' % (vol_dir, vol_filename_3) + + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_local_volume_dir') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + + qemu_img_output_base = """image: %(image_name)s + file format: qcow2 + virtual size: 1.0G (1073741824 bytes) + disk size: 173K + """ + qemu_img_output = """image: %(image_name)s + file format: qcow2 + virtual size: 1.0G (1073741824 bytes) + disk size: 173K + backing file: %(backing_file)s + """ + + qemu_img_output_1 = qemu_img_output_base % {'image_name': vol_filename} + qemu_img_output_2 = qemu_img_output % {'image_name': vol_filename_2, + 'backing_file': vol_filename} + qemu_img_output_3 = qemu_img_output % {'image_name': vol_filename_3, + 'backing_file': vol_filename_2} + + info_1 = imageutils.QemuImgInfo(qemu_img_output_1) + info_2 = imageutils.QemuImgInfo(qemu_img_output_2) + info_3 = imageutils.QemuImgInfo(qemu_img_output_3) + + drv._local_volume_dir(volume).AndReturn(vol_dir) + image_utils.qemu_img_info(vol_path_3).\ + AndReturn(info_3) + drv._local_volume_dir(volume).AndReturn(vol_dir) + image_utils.qemu_img_info(vol_path_2).\ + AndReturn(info_2) + drv._local_volume_dir(volume).AndReturn(vol_dir) + image_utils.qemu_img_info(vol_path).\ + AndReturn(info_1) + + mox.ReplayAll() + + chain = drv._get_backing_chain_for_path(volume, vol_path_3) + + # Verify chain contains all expected data + item_1 = drv._get_matching_backing_file(chain, vol_filename) + self.assertEqual(item_1['filename'], vol_filename_2) + chain.remove(item_1) + item_2 = drv._get_matching_backing_file(chain, vol_filename_2) + self.assertEqual(item_2['filename'], vol_filename_3) + chain.remove(item_2) + self.assertEqual(len(chain), 1) + self.assertEqual(chain[0]['filename'], vol_filename) + + def test_copy_volume_from_snapshot(self): + (mox, drv) = self._mox, self._driver + + mox.StubOutWithMock(image_utils, 'convert_image') + mox.StubOutWithMock(drv, '_read_info_file') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') + + dest_volume = self._simple_volume( + 'c1073000-0000-0000-0000-0000000c1073') + src_volume = self._simple_volume() + + vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, + drv._get_hash_str(self.TEST_EXPORT1)) + src_vol_path = os.path.join(vol_dir, src_volume['name']) + dest_vol_path = os.path.join(vol_dir, dest_volume['name']) + info_path = os.path.join(vol_dir, src_volume['name']) + '.info' + + snapshot = {'volume_name': src_volume['name'], + 'name': 'clone-snap-%s' % src_volume['id'], + 'size': src_volume['size'], + 'volume_size': src_volume['size'], + 'volume_id': src_volume['id'], + 'id': 'tmp-snap-%s' % src_volume['id'], + 'volume': src_volume} + + snap_file = dest_volume['name'] + '.' + snapshot['id'] + snap_path = os.path.join(vol_dir, snap_file) + + size = dest_volume['size'] + + drv._read_info_file(info_path).AndReturn( + {'active': snap_file, + snapshot['id']: snap_file} + ) + + qemu_img_output = """image: %s + file format: raw + virtual size: 1.0G (1073741824 bytes) + disk size: 173K + backing file: %s + """ % (snap_file, src_volume['name']) + img_info = imageutils.QemuImgInfo(qemu_img_output) + + image_utils.qemu_img_info(snap_path).AndReturn(img_info) + + image_utils.convert_image(src_vol_path, dest_vol_path, 'raw') + + drv._set_rw_permissions_for_all(dest_vol_path) + + mox.ReplayAll() + + drv._copy_volume_from_snapshot(snapshot, dest_volume, size) + + def test_create_volume_from_snapshot(self): + (mox, drv) = self._mox, self._driver + + volume = self._simple_volume('c1073000-0000-0000-0000-0000000c1073') + src_volume = self._simple_volume() + + mox.StubOutWithMock(drv, '_create_snapshot') + mox.StubOutWithMock(drv, '_copy_volume_from_snapshot') + mox.StubOutWithMock(drv, '_delete_snapshot') + + snap_ref = {'volume_name': src_volume['name'], + 'name': 'clone-snap-%s' % src_volume['id'], + 'size': src_volume['size'], + 'volume_size': src_volume['size'], + 'volume_id': src_volume['id'], + 'id': 'tmp-snap-%s' % src_volume['id'], + 'volume': src_volume} + + volume_ref = {'id': volume['id'], + 'size': volume['size'], + 'status': volume['status'], + 'provider_location': volume['provider_location'], + 'name': 'volume-' + volume['id']} + + drv._create_snapshot(snap_ref) + drv._copy_volume_from_snapshot(snap_ref, + volume_ref, + src_volume['size']) + drv._delete_snapshot(snap_ref) + + mox.ReplayAll() + + drv.create_cloned_volume(volume, src_volume) + + def test_initialize_connection(self): + (mox, drv) = self._mox, self._driver + + volume = self._simple_volume() + vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, + drv._get_hash_str(self.TEST_EXPORT1)) + vol_path = os.path.join(vol_dir, volume['name']) + + qemu_img_output = """image: %s + file format: raw + virtual size: 1.0G (1073741824 bytes) + disk size: 173K + """ % volume['name'] + img_info = imageutils.QemuImgInfo(qemu_img_output) + + mox.StubOutWithMock(drv, 'get_active_image_from_info') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + + drv.get_active_image_from_info(volume).AndReturn(volume['name']) + image_utils.qemu_img_info(vol_path).AndReturn(img_info) + + mox.ReplayAll() + + conn_info = drv.initialize_connection(volume, None) + + self.assertEqual(conn_info['data']['format'], 'raw') + self.assertEqual(conn_info['driver_volume_type'], 'glusterfs') + self.assertEqual(conn_info['data']['name'], volume['name']) diff --git a/cinder/tests/test_gpfs.py b/cinder/tests/test_gpfs.py new file mode 100644 index 0000000000..ffa85c179d --- /dev/null +++ b/cinder/tests/test_gpfs.py @@ -0,0 +1,553 @@ + +# Copyright IBM Corp. 2013 All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mox as mox_lib +import os +import tempfile + +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import imageutils +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import test +from cinder.tests import utils as test_utils +from cinder import units +from cinder import utils +from cinder.volume import configuration as conf +from cinder.volume.drivers.gpfs import GPFSDriver + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +class FakeImageService(): + def update(self, context, image_id, path): + pass + + def show(self, context, image_id): + image_meta = {'disk_format': None, + 'container_format': None} + return image_meta + + def download(self, context, image_id, image_fd): + for b in range(256): + image_fd.write('some_image_data') + image_fd.close() + + +class FakeQemuImgInfo(object): + def __init__(self): + self.file_format = None + self.backing_file = None + + +class GPFSDriverTestCase(test.TestCase): + driver_name = "cinder.volume.drivers.gpfs.GPFSDriver" + context = context.get_admin_context() + + def _execute_wrapper(self, cmd, *args, **kwargs): + try: + kwargs.pop('run_as_root') + except KeyError: + pass + return utils.execute(cmd, *args, **kwargs) + + def setUp(self): + super(GPFSDriverTestCase, self).setUp() + self.volumes_path = tempfile.mkdtemp(prefix="gpfs_") + self.images_dir = '%s/images' % self.volumes_path + + if not os.path.exists(self.volumes_path): + os.mkdir(self.volumes_path) + if not os.path.exists(self.images_dir): + os.mkdir(self.images_dir) + self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b' + + self.driver = GPFSDriver(configuration=conf.Configuration(None)) + self.driver.set_execute(self._execute_wrapper) + self.flags(volume_driver=self.driver_name, + gpfs_mount_point_base=self.volumes_path) + self.volume = importutils.import_object(CONF.volume_manager) + self.volume.driver.set_execute(self._execute_wrapper) + self.volume.driver.set_initialized() + self.volume.stats = dict(allocated_capacity_gb=0) + + self.stubs.Set(GPFSDriver, '_create_gpfs_snap', + self._fake_gpfs_snap) + self.stubs.Set(GPFSDriver, '_create_gpfs_copy', + self._fake_gpfs_copy) + self.stubs.Set(GPFSDriver, '_gpfs_redirect', + self._fake_gpfs_redirect) + self.stubs.Set(GPFSDriver, '_is_gpfs_parent_file', + self._fake_is_gpfs_parent) + self.stubs.Set(GPFSDriver, '_is_gpfs_path', + self._fake_is_gpfs_path) + self.stubs.Set(GPFSDriver, '_delete_gpfs_file', + self._fake_delete_gpfs_file) + self.stubs.Set(GPFSDriver, '_create_sparse_file', + self._fake_create_sparse_file) + self.stubs.Set(GPFSDriver, '_allocate_file_blocks', + self._fake_allocate_file_blocks) + self.stubs.Set(GPFSDriver, '_get_available_capacity', + self._fake_get_available_capacity) + self.stubs.Set(image_utils, 'qemu_img_info', + self._fake_qemu_qcow2_image_info) + self.stubs.Set(image_utils, 'convert_image', + self._fake_convert_image) + self.stubs.Set(image_utils, 'resize_image', + self._fake_qemu_image_resize) + + self.context = context.get_admin_context() + self.context.user_id = 'fake' + self.context.project_id = 'fake' + CONF.gpfs_images_dir = self.images_dir + + def tearDown(self): + try: + os.rmdir(self.images_dir) + os.rmdir(self.volumes_path) + except OSError: + pass + super(GPFSDriverTestCase, self).tearDown() + + def test_create_delete_volume_full_backing_file(self): + """Create and delete vol with full creation method.""" + CONF.gpfs_sparse_volumes = False + vol = test_utils.create_volume(self.context, host=CONF.host) + volume_id = vol['id'] + self.assertTrue(os.path.exists(self.volumes_path)) + self.volume.create_volume(self.context, volume_id) + path = self.volumes_path + '/' + vol['name'] + self.assertTrue(os.path.exists(path)) + self.volume.delete_volume(self.context, volume_id) + self.assertFalse(os.path.exists(path)) + + def test_create_delete_volume_sparse_backing_file(self): + """Create and delete vol with default sparse creation method.""" + CONF.gpfs_sparse_volumes = True + vol = test_utils.create_volume(self.context, host=CONF.host) + volume_id = vol['id'] + self.assertTrue(os.path.exists(self.volumes_path)) + self.volume.create_volume(self.context, volume_id) + path = self.volumes_path + '/' + vol['name'] + self.assertTrue(os.path.exists(path)) + self.volume.delete_volume(self.context, volume_id) + self.assertFalse(os.path.exists(path)) + + def test_create_volume_with_attributes(self): + self.stubs.Set(GPFSDriver, '_gpfs_change_attributes', + self._fake_gpfs_change_attributes) + attributes = {'dio': 'yes', 'data_pool_name': 'ssd_pool', + 'replicas': '2', 'write_affinity_depth': '1', + 'block_group_factor': '1', + 'write_affinity_failure-group': + '1,1,1:2;2,1,1:2;2,0,3:4'} + vol = test_utils.create_volume(self.context, host=CONF.host, + metadata=attributes) + volume_id = vol['id'] + self.assertTrue(os.path.exists(self.volumes_path)) + self.volume.create_volume(self.context, volume_id) + path = self.volumes_path + '/' + vol['name'] + self.assertTrue(os.path.exists(path)) + self.volume.delete_volume(self.context, volume_id) + self.assertFalse(os.path.exists(path)) + + def test_migrate_volume(self): + """Test volume migration done by driver.""" + loc = 'GPFSDriver:cindertest:openstack' + cap = {'location_info': loc} + host = {'host': 'foo', 'capabilities': cap} + volume = test_utils.create_volume(self.context, host=CONF.host) + self.driver.create_volume(volume) + self.driver.migrate_volume(self.context, volume, host) + self.driver.delete_volume(volume) + + def _create_snapshot(self, volume_id, size='0'): + """Create a snapshot object.""" + snap = {} + snap['volume_size'] = size + snap['user_id'] = 'fake' + snap['project_id'] = 'fake' + snap['volume_id'] = volume_id + snap['status'] = "creating" + return db.snapshot_create(context.get_admin_context(), snap) + + def test_create_delete_snapshot(self): + volume_src = test_utils.create_volume(self.context, host=CONF.host) + self.volume.create_volume(self.context, volume_src['id']) + snapCount = len(db.snapshot_get_all_for_volume(self.context, + volume_src['id'])) + self.assertEqual(snapCount, 0) + snapshot = self._create_snapshot(volume_src['id']) + snapshot_id = snapshot['id'] + self.volume.create_snapshot(self.context, volume_src['id'], + snapshot_id) + self.assertTrue(os.path.exists(os.path.join(self.volumes_path, + snapshot['name']))) + snapCount = len(db.snapshot_get_all_for_volume(self.context, + volume_src['id'])) + self.assertEqual(snapCount, 1) + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_src['id']) + self.assertFalse(os.path.exists(os.path.join(self.volumes_path, + snapshot['name']))) + snapCount = len(db.snapshot_get_all_for_volume(self.context, + volume_src['id'])) + self.assertEqual(snapCount, 0) + + def test_create_volume_from_snapshot(self): + volume_src = test_utils.create_volume(self.context, host=CONF.host) + self.volume.create_volume(self.context, volume_src['id']) + snapshot = self._create_snapshot(volume_src['id']) + snapshot_id = snapshot['id'] + self.volume.create_snapshot(self.context, volume_src['id'], + snapshot_id) + self.assertTrue(os.path.exists(os.path.join(self.volumes_path, + snapshot['name']))) + volume_dst = test_utils.create_volume(self.context, host=CONF.host, + snapshot_id=snapshot_id) + self.volume.create_volume(self.context, volume_dst['id'], snapshot_id) + self.assertEqual(volume_dst['id'], db.volume_get( + context.get_admin_context(), + volume_dst['id']).id) + self.assertEqual(snapshot_id, db.volume_get( + context.get_admin_context(), + volume_dst['id']).snapshot_id) + self.volume.delete_volume(self.context, volume_dst['id']) + + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_src['id']) + + def test_create_cloned_volume(self): + volume_src = test_utils.create_volume(self.context, host=CONF.host) + self.volume.create_volume(self.context, volume_src['id']) + + volume_dst = test_utils.create_volume(self.context, host=CONF.host) + volumepath = os.path.join(self.volumes_path, volume_dst['name']) + self.assertFalse(os.path.exists(volumepath)) + + self.driver.create_cloned_volume(volume_dst, volume_src) + self.assertEqual(volume_dst['id'], db.volume_get( + context.get_admin_context(), + volume_dst['id']).id) + + self.assertTrue(os.path.exists(volumepath)) + + self.volume.delete_volume(self.context, volume_src['id']) + self.volume.delete_volume(self.context, volume_dst['id']) + + def test_create_volume_from_snapshot_method(self): + volume_src = test_utils.create_volume(self.context, host=CONF.host) + self.volume.create_volume(self.context, volume_src['id']) + + snapshot = self._create_snapshot(volume_src['id']) + snapshot_id = snapshot['id'] + self.volume.create_snapshot(self.context, volume_src['id'], + snapshot_id) + volume_dst = test_utils.create_volume(self.context, host=CONF.host) + self.driver.create_volume_from_snapshot(volume_dst, snapshot) + self.assertEqual(volume_dst['id'], db.volume_get( + context.get_admin_context(), + volume_dst['id']).id) + + volumepath = os.path.join(self.volumes_path, volume_dst['name']) + self.assertTrue(os.path.exists(volumepath)) + + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_dst['id']) + self.volume.delete_volume(self.context, volume_src['id']) + + def test_clone_image_to_volume_with_copy_on_write_mode(self): + """Test the function of copy_image_to_volume + focusing on the integretion of the image_util + using copy_on_write image sharing mode. + """ + + # specify image file format is raw + self.stubs.Set(image_utils, 'qemu_img_info', + self._fake_qemu_raw_image_info) + + volume = test_utils.create_volume(self.context, host=CONF.host) + volumepath = os.path.join(self.volumes_path, volume['name']) + CONF.gpfs_images_share_mode = 'copy_on_write' + self.driver.clone_image(volume, + None, + self.image_id, + {}) + + self.assertTrue(os.path.exists(volumepath)) + self.volume.delete_volume(self.context, volume['id']) + self.assertFalse(os.path.exists(volumepath)) + + def test_clone_image_to_volume_with_copy_mode(self): + """Test the function of copy_image_to_volume + focusing on the integretion of the image_util + using copy image sharing mode. + """ + + # specify image file format is raw + self.stubs.Set(image_utils, 'qemu_img_info', + self._fake_qemu_raw_image_info) + + volume = test_utils.create_volume(self.context, host=CONF.host) + volumepath = os.path.join(self.volumes_path, volume['name']) + CONF.gpfs_images_share_mode = 'copy' + self.driver.clone_image(volume, + None, + self.image_id, + {}) + + self.assertTrue(os.path.exists(volumepath)) + self.volume.delete_volume(self.context, volume['id']) + + def test_copy_image_to_volume_with_non_gpfs_image_dir(self): + """Test the function of copy_image_to_volume + focusing on the integretion of the image_util + using a non gpfs glance images directory + """ + # specify image file format is raw + self.stubs.Set(image_utils, 'qemu_img_info', + self._fake_qemu_raw_image_info) + + for share_mode in ['copy_on_write', 'copy']: + volume = test_utils.create_volume(self.context, host=CONF.host) + volumepath = os.path.join(self.volumes_path, volume['name']) + CONF.gpfs_images_share_mode = share_mode + CONF.gpfs_images_dir = None + self.driver.copy_image_to_volume(self.context, + volume, + FakeImageService(), + self.image_id) + self.assertTrue(os.path.exists(volumepath)) + self.volume.delete_volume(self.context, volume['id']) + + def test_copy_image_to_volume_with_illegal_image_format(self): + """Test the function of copy_image_to_volume + focusing on the integretion of the image_util + using an illegal image file format + """ + # specify image file format is qcow2 + self.stubs.Set(image_utils, 'qemu_img_info', + self._fake_qemu_qcow2_image_info) + + volume = test_utils.create_volume(self.context, host=CONF.host) + CONF.gpfs_images_share_mode = 'copy' + CONF.gpfs_images_dir = self.images_dir + self.assertRaises(exception.ImageUnacceptable, + self.driver.copy_image_to_volume, + self.context, + volume, + FakeImageService(), + self.image_id) + + self.volume.delete_volume(self.context, volume['id']) + + def test_get_volume_stats(self): + stats = self.driver.get_volume_stats() + self.assertEqual(stats['volume_backend_name'], 'GPFS') + self.assertEqual(stats['storage_protocol'], 'file') + + def test_extend_volume(self): + new_vol_size = 15 + mox = mox_lib.Mox() + volume = test_utils.create_volume(self.context, host=CONF.host) + volpath = os.path.join(self.volumes_path, volume['name']) + + qemu_img_info_output = """image: %s + file format: raw + virtual size: %sG (%s bytes) + backing file: %s + """ % (volume['name'], new_vol_size, new_vol_size * units.GiB, volpath) + mox.StubOutWithMock(image_utils, 'resize_image') + image_utils.resize_image(volpath, new_vol_size) + + mox.StubOutWithMock(image_utils, 'qemu_img_info') + img_info = imageutils.QemuImgInfo(qemu_img_info_output) + image_utils.qemu_img_info(volpath).AndReturn(img_info) + mox.ReplayAll() + + self.driver.extend_volume(volume, new_vol_size) + mox.VerifyAll() + + def test_extend_volume_with_failure(self): + new_vol_size = 15 + mox = mox_lib.Mox() + volume = test_utils.create_volume(self.context, host=CONF.host) + volpath = os.path.join(self.volumes_path, volume['name']) + + mox.StubOutWithMock(image_utils, 'resize_image') + image_utils.resize_image(volpath, new_vol_size).AndRaise( + processutils.ProcessExecutionError('error')) + mox.ReplayAll() + + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.extend_volume, volume, new_vol_size) + mox.VerifyAll() + + def test_check_for_setup_error_ok(self): + self.stubs.Set(GPFSDriver, '_get_gpfs_state', + self._fake_gpfs_get_state_active) + self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level', + self._fake_gpfs_compatible_cluster_release_level) + self.stubs.Set(GPFSDriver, '_get_gpfs_filesystem_release_level', + self._fake_gpfs_compatible_filesystem_release_level) + self.driver.check_for_setup_error() + + def test_check_for_setup_error_gpfs_not_active(self): + self.stubs.Set(GPFSDriver, '_get_gpfs_state', + self._fake_gpfs_get_state_not_active) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.check_for_setup_error) + + def test_check_for_setup_error_not_gpfs_path(self): + self.stubs.Set(GPFSDriver, '_get_gpfs_state', + self._fake_gpfs_get_state_active) + self.stubs.Set(GPFSDriver, '_is_gpfs_path', + self._fake_is_not_gpfs_path) + self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level', + self._fake_gpfs_compatible_cluster_release_level) + self.stubs.Set(GPFSDriver, '_get_gpfs_filesystem_release_level', + self._fake_gpfs_compatible_filesystem_release_level) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.check_for_setup_error) + + def test_check_for_setup_error_incompatible_cluster_version(self): + self.stubs.Set(GPFSDriver, '_get_gpfs_state', + self._fake_gpfs_get_state_active) + self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level', + self._fake_gpfs_incompatible_cluster_release_level) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.check_for_setup_error) + + def test_check_for_setup_error_incompatible_filesystem_version(self): + self.stubs.Set(GPFSDriver, '_get_gpfs_state', + self._fake_gpfs_get_state_active) + self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level', + self._fake_gpfs_compatible_cluster_release_level) + self.stubs.Set(GPFSDriver, '_get_gpfs_filesystem_release_level', + self._fake_gpfs_incompatible_filesystem_release_level) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.check_for_setup_error) + + def _fake_create_file(self, path, modebits='666'): + open(path, 'w').close() + utils.execute('chmod', modebits, path) + + def _fake_gpfs_snap(self, src, dest=None, modebits='644'): + if dest is None: + dest = src + self._fake_create_file(dest, '644') + + def _fake_gpfs_copy(self, src, dest): + self._fake_create_file(dest) + + def _fake_create_sparse_file(self, path, size): + self._fake_create_file(path) + + def _fake_allocate_file_blocks(self, path, size): + self._fake_create_file(path) + + def _fake_gpfs_change_attributes(self, options, path): + pass + + def _fake_gpfs_redirect(self, src): + return True + + def _fake_is_gpfs_parent(self, gpfs_file): + return False + + def _fake_get_available_capacity(self, path): + fake_avail = 80 * units.GiB + fake_size = 2 * fake_avail + return fake_avail, fake_size + + def _fake_gpfs_get_state_active(self): + active_txt = ('mmgetstate::HEADER:version:reserved:reserved:' + 'nodeName:nodeNumber:state:quorum:nodesUp:totalNodes:' + 'remarks:cnfsState:\n' + 'mmgetstate::0:1:::hostname:1:active:1:1:' + '1:quorum node:(undefined):') + return active_txt + + def _fake_gpfs_get_state_not_active(self): + inactive_txt = ('mmgetstate::HEADER:version:reserved:reserved:' + 'nodeName:nodeNumber:state:quorum:nodesUp:totalNodes:' + 'remarks:cnfsState:\n' + 'mmgetstate::0:1:::hostname:1:down:1:1:' + '1:quorum node:(undefined):') + return inactive_txt + + def _fake_gpfs_compatible_cluster_release_level(self): + release = 1400 + return release + + def _fake_gpfs_incompatible_cluster_release_level(self): + release = 1105 + return release + + def _fake_gpfs_compatible_filesystem_release_level(self, path=None): + release = 1400 + fs = '/dev/gpfs' + return fs, release + + def _fake_gpfs_incompatible_filesystem_release_level(self, path=None): + release = 1105 + fs = '/dev/gpfs' + return fs, release + + def _fake_is_gpfs_path(self, path): + pass + + def _fake_is_not_gpfs_path(self, path): + raise(processutils.ProcessExecutionError('invalid gpfs path')) + + def _fake_convert_image(self, source, dest, out_format): + utils.execute('cp', source, dest) + + def _fake_qemu_qcow2_image_info(self, path): + data = FakeQemuImgInfo() + data.file_format = 'qcow2' + data.backing_file = None + data.virtual_size = 1 * units.GiB + return data + + def _fake_qemu_raw_image_info(self, path): + data = FakeQemuImgInfo() + data.file_format = 'raw' + data.backing_file = None + data.virtual_size = 1 * units.GiB + return data + + def _fake_qemu_image_resize(self, path, size): + LOG.info('wtf') + pass + + def _fake_delete_gpfs_file(self, fchild): + volume_path = fchild + vol_name = os.path.basename(fchild) + vol_id = vol_name.split('volume-').pop() + utils.execute('rm', '-f', volume_path) + utils.execute('rm', '-f', volume_path + '.snap') + all_snaps = db.snapshot_get_all_for_volume(self.context, vol_id) + for snap in all_snaps: + snap_path = self.volumes_path + '/' + snap['name'] + utils.execute('rm', '-f', snap_path) diff --git a/cinder/tests/test_hds.py b/cinder/tests/test_hds.py new file mode 100644 index 0000000000..290da4b21a --- /dev/null +++ b/cinder/tests/test_hds.py @@ -0,0 +1,296 @@ +# Copyright (c) 2013 Hitachi Data Systems, Inc. +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +Self test for Hitachi Unified Storage (HUS) platform. +""" + +import mox +import os +import tempfile + +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.hds import hds + + +CONF = """ + + 172.17.44.16 + 172.17.44.17 + system + manager + + default + 172.17.39.132 + 9 + + + silver + 172.17.39.133 + 9 + + + gold + 172.17.39.134 + 9 + + + platinum + 172.17.39.135 + 9 + + + 9 + + + 3300 + + +""" + + +class SimulatedHusBackend: + """Simulation Back end. Talks to HUS.""" + + alloc_lun = [] # allocated LUs + connections = [] # iSCSI connections + init_index = 0 # initiator index + target_index = 0 # target index + hlun = 0 # hlun index + out = '' + + def __init__(self): + self.start_lun = 0 + + def get_version(self, cmd, ver, ip0, ip1, user, pw): + out = ("Array_ID: 92210013 (HUS130) version: 0920/B-S LU: 4096" + " RG: 75 RG_LU: 1024 Utility_version: 1.0.0") + return out + + def get_iscsi_info(self, cmd, ver, ip0, ip1, user, pw): + out = """CTL: 0 Port: 4 IP: 172.17.39.132 Port: 3260 Link: Up + CTL: 0 Port: 5 IP: 172.17.39.133 Port: 3260 Link: Up + CTL: 1 Port: 4 IP: 172.17.39.134 Port: 3260 Link: Up + CTL: 1 Port: 5 IP: 172.17.39.135 Port: 3260 Link: Up""" + return out + + def get_hdp_info(self, cmd, ver, ip0, ip1, user, pw): + out = """HDP: 2 272384 MB 33792 MB 12 % LUs: 70 Normal Normal + HDP: 9 546816 MB 73728 MB 13 % LUs: 194 Normal Normal""" + return out + + def create_lu(self, cmd, ver, ip0, ip1, user, pw, id, hdp, start, + end, size): + if self.start_lun < int(start): # initialize first time + self.start_lun = int(start) + out = ("LUN: %d HDP: 9 size: %s MB, is successfully created" % + (self.start_lun, size)) + self.alloc_lun.append(str(self.start_lun)) + self.start_lun += 1 + return out + + def extend_vol(self, cmd, ver, ip0, ip1, user, pw, id, lu, size): + out = ("LUN: %s successfully extended to %s MB" % (lu, size)) + SimulatedHusBackend.out = out + return out + + def delete_lu(self, cmd, ver, ip0, ip1, user, pw, id, lun): + out = "" + if lun in self.alloc_lun: + out = "LUN: %s is successfully deleted" % (lun) + self.alloc_lun.remove(lun) + return out + + def create_dup(self, cmd, ver, ip0, ip1, user, pw, id, src_lun, + hdp, start, end, size): + out = ("LUN: %s HDP: 9 size: %s MB, is successfully created" % + (self.start_lun, size)) + self.alloc_lun.append(str(self.start_lun)) + self.start_lun += 1 + return out + + def add_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port, + iqn, initiator): + conn = (self.hlun, lun, initiator, self.init_index, iqn, + self.target_index, ctl, port) + out = ("H-LUN: %d mapped. LUN: %s, iSCSI Initiator: %s @ index: %d, \ + and Target: %s @ index %d is successfully paired @ CTL: %s, \ + Port: %s" % conn) + self.init_index += 1 + self.target_index += 1 + self.hlun += 1 + SimulatedHusBackend.connections.append(conn) + return out + + def del_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port, + iqn, initiator): + conn = () + for connection in SimulatedHusBackend.connections: + if (connection[1] == lun): + conn = connection + SimulatedHusBackend.connections.remove(connection) + if conn is None: + return + (hlun, lun, initiator, init_index, iqn, target_index, ctl, port) = conn + detail = (hlun, iqn) + out = ("H-LUN: %d successfully deleted from target %s" % detail) + return out + + +# The following information is passed on to tests, when creating a volume + +_VOLUME = {'volume_id': '1234567890', 'size': 128, + 'volume_type': None, 'provider_location': None, 'id': 'abcdefg'} + + +class HUSiSCSIDriverTest(test.TestCase): + """Test HUS iSCSI volume driver.""" + + def __init__(self, *args, **kwargs): + super(HUSiSCSIDriverTest, self).__init__(*args, **kwargs) + + def setUp(self): + super(HUSiSCSIDriverTest, self).setUp() + (handle, self.config_file) = tempfile.mkstemp('.xml') + os.write(handle, CONF) + os.close(handle) + SimulatedHusBackend.alloc_lun = [] + SimulatedHusBackend.connections = [] + SimulatedHusBackend.out = '' + self.mox = mox.Mox() + self.mox.StubOutWithMock(hds, 'factory_bend') + hds.factory_bend().AndReturn(SimulatedHusBackend()) + self.mox.ReplayAll() + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.hds_cinder_config_file = self.config_file + self.driver = hds.HUSDriver(configuration=self.configuration) + + def tearDown(self): + os.remove(self.config_file) + self.mox.UnsetStubs() + super(HUSiSCSIDriverTest, self).tearDown() + + def test_get_volume_stats(self): + stats = self.driver.get_volume_stats(True) + self.assertEqual(stats["vendor_name"], "HDS") + self.assertEqual(stats["storage_protocol"], "iSCSI") + self.assertGreater(stats["total_capacity_gb"], 0) + + def test_create_volume(self): + loc = self.driver.create_volume(_VOLUME) + self.assertIsNotNone(loc) + vol = _VOLUME.copy() + vol['provider_location'] = loc['provider_location'] + self.assertIsNotNone(loc['provider_location']) + return vol + + def test_delete_volume(self): + """Delete a volume (test). + + Note: this API call should not expect any exception: + This driver will silently accept a delete request, because + the DB can be out of sync, and Cinder manager will keep trying + to delete, even though the volume has been wiped out of the + Array. We don't want to have a dangling volume entry in the + customer dashboard. + """ + vol = self.test_create_volume() + self.assertTrue(SimulatedHusBackend.alloc_lun) + num_luns_before = len(SimulatedHusBackend.alloc_lun) + self.driver.delete_volume(vol) + num_luns_after = len(SimulatedHusBackend.alloc_lun) + self.assertGreater(num_luns_before, num_luns_after) + + def test_extend_volume(self): + vol = self.test_create_volume() + new_size = _VOLUME['size'] * 2 + self.driver.extend_volume(vol, new_size) + self.assertTrue(str(new_size * 1024) in + SimulatedHusBackend.out) + + def test_create_snapshot(self): + vol = self.test_create_volume() + self.mox.StubOutWithMock(self.driver, '_id_to_vol') + self.driver._id_to_vol(vol['volume_id']).AndReturn(vol) + self.mox.ReplayAll() + svol = vol.copy() + svol['volume_size'] = svol['size'] + loc = self.driver.create_snapshot(svol) + self.assertIsNotNone(loc) + svol['provider_location'] = loc['provider_location'] + return svol + + def test_create_clone(self): + vol = self.test_create_volume() + self.mox.StubOutWithMock(self.driver, '_id_to_vol') + self.driver._id_to_vol(vol['volume_id']).AndReturn(vol) + self.mox.ReplayAll() + svol = vol.copy() + svol['volume_size'] = svol['size'] + loc = self.driver.create_snapshot(svol) + self.assertIsNotNone(loc) + svol['provider_location'] = loc['provider_location'] + return svol + + def test_delete_snapshot(self): + """Delete a snapshot (test). + + Note: this API call should not expect any exception: + This driver will silently accept a delete request, because + the DB can be out of sync, and Cinder manager will keep trying + to delete, even though the snapshot has been wiped out of the + Array. We don't want to have a dangling snapshot entry in the + customer dashboard. + """ + svol = self.test_create_snapshot() + num_luns_before = len(SimulatedHusBackend.alloc_lun) + self.driver.delete_snapshot(svol) + num_luns_after = len(SimulatedHusBackend.alloc_lun) + self.assertGreater(num_luns_before, num_luns_after) + + def test_create_volume_from_snapshot(self): + svol = self.test_create_snapshot() + vol = self.driver.create_volume_from_snapshot(_VOLUME, svol) + self.assertIsNotNone(vol) + return vol + + def test_initialize_connection(self): + connector = {} + connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2' + connector['host'] = 'dut_1.lab.hds.com' + vol = self.test_create_volume() + self.mox.StubOutWithMock(self.driver, '_update_vol_location') + conn = self.driver.initialize_connection(vol, connector) + self.assertIn('hitachi', conn['data']['target_iqn']) + self.assertIn('3260', conn['data']['target_portal']) + vol['provider_location'] = conn['data']['provider_location'] + return (vol, connector) + + def test_terminate_connection(self): + """Terminate a connection (test). + + Note: this API call should not expect any exception: + This driver will silently accept a terminate_connection request + because an error/exception return will only jeopardize the + connection tear down at a host. + """ + (vol, conn) = self.test_initialize_connection() + num_conn_before = len(SimulatedHusBackend.connections) + self.driver.terminate_connection(vol, conn) + num_conn_after = len(SimulatedHusBackend.connections) + self.assertGreater(num_conn_before, num_conn_after) diff --git a/cinder/tests/test_hp3par.py b/cinder/tests/test_hp3par.py new file mode 100644 index 0000000000..ee325c4e5a --- /dev/null +++ b/cinder/tests/test_hp3par.py @@ -0,0 +1,1488 @@ +# +# (c) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit tests for OpenStack Cinder volume drivers.""" + +import ast +import mock +import mox +import shutil +import tempfile + +from hp3parclient import exceptions as hpexceptions + +from cinder import context +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.san.hp import hp_3par_fc as hpfcdriver +from cinder.volume.drivers.san.hp import hp_3par_iscsi as hpdriver + +LOG = logging.getLogger(__name__) + +HP3PAR_DOMAIN = 'OpenStack', +HP3PAR_CPG = 'OpenStackCPG', +HP3PAR_CPG_SNAP = 'OpenStackCPGSnap' +CLI_CR = '\r\n' + + +class FakeHP3ParClient(object): + + PORT_MODE_TARGET = 2 + PORT_MODE_INITIATOR = 3 + PORT_MODE_PEER = 4 + + PORT_TYPE_HOST = 1 + PORT_TYPE_DISK = 2 + PORT_TYPE_FREE = 3 + PORT_TYPE_RCIP = 6 + PORT_TYPE_ISCSI = 7 + + PORT_PROTO_FC = 1 + PORT_PROTO_ISCSI = 2 + PORT_PROTO_IP = 4 + + PORT_STATE_READY = 4 + PORT_STATE_SYNC = 5 + PORT_STATE_OFFLINE = 10 + + HOST_EDIT_ADD = 1 + HOST_EDIT_REMOVE = 2 + + api_url = None + debug = False + + connection_count = 0 + + volumes = [] + hosts = [] + vluns = [] + cpgs = [ + {'SAGrowth': {'LDLayout': {'diskPatterns': [{'diskType': 2}]}, + 'incrementMiB': 8192}, + 'SAUsage': {'rawTotalMiB': 24576, + 'rawUsedMiB': 768, + 'totalMiB': 8192, + 'usedMiB': 256}, + 'SDGrowth': {'LDLayout': {'RAIDType': 4, + 'diskPatterns': [{'diskType': 2}]}, + 'incrementMiB': 32768}, + 'SDUsage': {'rawTotalMiB': 49152, + 'rawUsedMiB': 1023, + 'totalMiB': 36864, + 'usedMiB': 768}, + 'UsrUsage': {'rawTotalMiB': 57344, + 'rawUsedMiB': 43349, + 'totalMiB': 43008, + 'usedMiB': 32512}, + 'additionalStates': [], + 'degradedStates': [], + 'domain': HP3PAR_DOMAIN, + 'failedStates': [], + 'id': 5, + 'name': HP3PAR_CPG, + 'numFPVVs': 2, + 'numTPVVs': 0, + 'state': 1, + 'uuid': '29c214aa-62b9-41c8-b198-543f6cf24edf'}] + + def __init__(self, api_url): + self.api_url = api_url + self.volumes = [] + self.hosts = [] + self.vluns = [] + + def debug_rest(self, flag): + self.debug = flag + + def login(self, username, password, optional=None): + self.connection_count += 1 + return None + + def logout(self): + if self.connection_count < 1: + raise hpexceptions.CommandError('No connection to log out.') + self.connection_count -= 1 + return None + + def getVolumes(self): + return self.volumes + + def getVolume(self, name): + if self.volumes: + for volume in self.volumes: + if volume['name'] == name: + return volume + + msg = {'code': 'NON_EXISTENT_HOST', + 'desc': "VOLUME '%s' was not found" % name} + raise hpexceptions.HTTPNotFound(msg) + + def createVolume(self, name, cpgName, sizeMiB, optional=None): + new_vol = {'additionalStates': [], + 'adminSpace': {'freeMiB': 0, + 'rawReservedMiB': 384, + 'reservedMiB': 128, + 'usedMiB': 128}, + 'baseId': 115, + 'comment': optional['comment'], + 'copyType': 1, + 'creationTime8601': '2012-10-22T16:37:57-07:00', + 'creationTimeSec': 1350949077, + 'degradedStates': [], + 'domain': HP3PAR_DOMAIN, + 'failedStates': [], + 'id': 115, + 'name': name, + 'policies': {'caching': True, + 'oneHost': False, + 'staleSS': True, + 'system': False, + 'zeroDetect': False}, + 'provisioningType': 1, + 'readOnly': False, + 'sizeMiB': sizeMiB, + 'snapCPG': optional['snapCPG'], + 'snapshotSpace': {'freeMiB': 0, + 'rawReservedMiB': 683, + 'reservedMiB': 512, + 'usedMiB': 512}, + 'ssSpcAllocLimitPct': 0, + 'ssSpcAllocWarningPct': 0, + 'state': 1, + 'userCPG': cpgName, + 'userSpace': {'freeMiB': 0, + 'rawReservedMiB': 41984, + 'reservedMiB': 31488, + 'usedMiB': 31488}, + 'usrSpcAllocLimitPct': 0, + 'usrSpcAllocWarningPct': 0, + 'uuid': '1e7daee4-49f4-4d07-9ab8-2b6a4319e243', + 'wwn': '50002AC00073383D'} + self.volumes.append(new_vol) + return None + + def deleteVolume(self, name): + volume = self.getVolume(name) + self.volumes.remove(volume) + + def createSnapshot(self, name, copyOfName, optional=None): + new_snap = {'additionalStates': [], + 'adminSpace': {'freeMiB': 0, + 'rawReservedMiB': 0, + 'reservedMiB': 0, + 'usedMiB': 0}, + 'baseId': 342, + 'comment': optional['comment'], + 'copyOf': copyOfName, + 'copyType': 3, + 'creationTime8601': '2012-11-09T15:13:28-08:00', + 'creationTimeSec': 1352502808, + 'degradedStates': [], + 'domain': HP3PAR_DOMAIN, + 'expirationTime8601': '2012-11-09T17:13:28-08:00', + 'expirationTimeSec': 1352510008, + 'failedStates': [], + 'id': 343, + 'name': name, + 'parentId': 342, + 'policies': {'caching': True, + 'oneHost': False, + 'staleSS': True, + 'system': False, + 'zeroDetect': False}, + 'provisioningType': 3, + 'readOnly': True, + 'retentionTime8601': '2012-11-09T16:13:27-08:00', + 'retentionTimeSec': 1352506407, + 'sizeMiB': 256, + 'snapCPG': HP3PAR_CPG_SNAP, + 'snapshotSpace': {'freeMiB': 0, + 'rawReservedMiB': 0, + 'reservedMiB': 0, + 'usedMiB': 0}, + 'ssSpcAllocLimitPct': 0, + 'ssSpcAllocWarningPct': 0, + 'state': 1, + 'userCPG': HP3PAR_CPG, + 'userSpace': {'freeMiB': 0, + 'rawReservedMiB': 0, + 'reservedMiB': 0, + 'usedMiB': 0}, + 'usrSpcAllocLimitPct': 0, + 'usrSpcAllocWarningPct': 0, + 'uuid': 'd7a40b8f-2511-46a8-9e75-06383c826d19', + 'wwn': '50002AC00157383D'} + self.volumes.append(new_snap) + return None + + def deleteSnapshot(self, name): + volume = self.getVolume(name) + self.volumes.remove(volume) + + def createCPG(self, name, optional=None): + cpg = {'SAGrowth': {'LDLayout': {'diskPatterns': [{'diskType': 2}]}, + 'incrementMiB': 8192}, + 'SAUsage': {'rawTotalMiB': 24576, + 'rawUsedMiB': 768, + 'totalMiB': 8192, + 'usedMiB': 256}, + 'SDGrowth': {'LDLayout': {'RAIDType': 4, + 'diskPatterns': [{'diskType': 2}]}, + 'incrementMiB': 32768}, + 'SDUsage': {'rawTotalMiB': 49152, + 'rawUsedMiB': 1023, + 'totalMiB': 36864, + 'usedMiB': 768}, + 'UsrUsage': {'rawTotalMiB': 57344, + 'rawUsedMiB': 43349, + 'totalMiB': 43008, + 'usedMiB': 32512}, + 'additionalStates': [], + 'degradedStates': [], + 'domain': HP3PAR_DOMAIN, + 'failedStates': [], + 'id': 1, + 'name': name, + 'numFPVVs': 2, + 'numTPVVs': 0, + 'state': 1, + 'uuid': '29c214aa-62b9-41c8-b198-000000000000'} + + new_cpg = cpg.copy() + new_cpg.update(optional) + self.cpgs.append(new_cpg) + + def getCPGs(self): + return self.cpgs + + def getCPG(self, name): + if self.cpgs: + for cpg in self.cpgs: + if cpg['name'] == name: + return cpg + + msg = {'code': 'NON_EXISTENT_HOST', + 'desc': "CPG '%s' was not found" % name} + raise hpexceptions.HTTPNotFound(msg) + + def deleteCPG(self, name): + cpg = self.getCPG(name) + self.cpgs.remove(cpg) + + def createVLUN(self, volumeName, lun, hostname=None, + portPos=None, noVcn=None, + overrideLowerPriority=None): + + vlun = {'active': False, + 'failedPathInterval': 0, + 'failedPathPol': 1, + 'hostname': hostname, + 'lun': lun, + 'multipathing': 1, + 'portPos': portPos, + 'type': 4, + 'volumeName': volumeName, + 'volumeWWN': '50002AC00077383D'} + self.vluns.append(vlun) + return None + + def deleteVLUN(self, name, lunID, hostname=None, port=None): + vlun = self.getVLUN(name) + self.vluns.remove(vlun) + + def getVLUNs(self): + return self.vluns + + def getVLUN(self, volumeName): + for vlun in self.vluns: + if vlun['volumeName'] == volumeName: + return vlun + + msg = {'code': 'NON_EXISTENT_HOST', + 'desc': "VLUN '%s' was not found" % volumeName} + raise hpexceptions.HTTPNotFound(msg) + + def getHost(self, hostname): + return None + + def modifyHost(self, hostname, options): + return None + + def getPorts(self): + return None + + +class HP3PARBaseDriver(): + + VOLUME_ID = "d03338a9-9115-48a3-8dfc-35cdfcdc15a7" + CLONE_ID = "d03338a9-9115-48a3-8dfc-000000000000" + VOLUME_NAME = "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7" + SNAPSHOT_ID = "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31" + SNAPSHOT_NAME = "snapshot-2f823bdc-e36e-4dc8-bd15-de1c7a28ff31" + VOLUME_3PAR_NAME = "osv-0DM4qZEVSKON-DXN-NwVpw" + SNAPSHOT_3PAR_NAME = "oss-L4I73ONuTci9Fd4ceij-MQ" + FAKE_HOST = "fakehost" + USER_ID = '2689d9a913974c008b1d859013f23607' + PROJECT_ID = 'fac88235b9d64685a3530f73e490348f' + VOLUME_ID_SNAP = '761fc5e5-5191-4ec7-aeba-33e36de44156' + FAKE_DESC = 'test description name' + FAKE_FC_PORTS = [{'portPos': {'node': 7, 'slot': 1, 'cardPort': 1}, + 'portWWN': '0987654321234', + 'protocol': 1, + 'mode': 2, + 'linkState': 4}, + {'portPos': {'node': 6, 'slot': 1, 'cardPort': 1}, + 'portWWN': '123456789000987', + 'protocol': 1, + 'mode': 2, + 'linkState': 4}] + QOS = {'qos:maxIOPS': '1000', 'qos:maxBWS': '50'} + VVS_NAME = "myvvs" + FAKE_ISCSI_PORT = {'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}, + 'protocol': 2, + 'mode': 2, + 'IPAddr': '1.1.1.2', + 'iSCSIName': ('iqn.2000-05.com.3pardata:' + '21810002ac00383d'), + 'linkState': 4} + volume = {'name': VOLUME_NAME, + 'id': VOLUME_ID, + 'display_name': 'Foo Volume', + 'size': 2, + 'host': FAKE_HOST, + 'volume_type': None, + 'volume_type_id': None} + + volume_qos = {'name': VOLUME_NAME, + 'id': VOLUME_ID, + 'display_name': 'Foo Volume', + 'size': 2, + 'host': FAKE_HOST, + 'volume_type': None, + 'volume_type_id': 'gold'} + + snapshot = {'name': SNAPSHOT_NAME, + 'id': SNAPSHOT_ID, + 'user_id': USER_ID, + 'project_id': PROJECT_ID, + 'volume_id': VOLUME_ID_SNAP, + 'volume_name': VOLUME_NAME, + 'status': 'creating', + 'progress': '0%', + 'volume_size': 2, + 'display_name': 'fakesnap', + 'display_description': FAKE_DESC} + + connector = {'ip': '10.0.0.2', + 'initiator': 'iqn.1993-08.org.debian:01:222', + 'wwpns': ["123456789012345", "123456789054321"], + 'wwnns': ["223456789012345", "223456789054321"], + 'host': 'fakehost'} + + volume_type = {'name': 'gold', + 'deleted': False, + 'updated_at': None, + 'extra_specs': {'qos:maxBWS': '50', + 'qos:maxIOPS': '1000'}, + 'deleted_at': None, + 'id': 'gold'} + + def setup_configuration(self): + configuration = mox.MockObject(conf.Configuration) + configuration.hp3par_debug = False + configuration.hp3par_username = 'testUser' + configuration.hp3par_password = 'testPassword' + configuration.hp3par_api_url = 'https://1.1.1.1/api/v1' + configuration.hp3par_cpg = HP3PAR_CPG + configuration.hp3par_cpg_snap = HP3PAR_CPG_SNAP + configuration.iscsi_ip_address = '1.1.1.2' + configuration.iscsi_port = '1234' + configuration.san_ip = '2.2.2.2' + configuration.san_login = 'test' + configuration.san_password = 'test' + configuration.hp3par_snapshot_expiration = "" + configuration.hp3par_snapshot_retention = "" + configuration.hp3par_iscsi_ips = [] + return configuration + + def setup_fakes(self): + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_create_client", + self.fake_create_client) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_set_connections", + self.fake_set_connections) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_get_3par_host", + self.fake_get_3par_host) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_delete_3par_host", + self.fake_delete_3par_host) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_create_3par_vlun", + self.fake_create_3par_vlun) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_ports", + self.fake_get_ports) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_cpg", + self.fake_get_cpg) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, + "get_volume_settings_from_type", + self.fake_get_volume_settings_from_type) + self.stubs.Set(hpfcdriver.hpcommon.HP3PARCommon, "get_domain", + self.fake_get_domain) + + def clear_mox(self): + self.mox.ResetAll() + self.stubs.UnsetAll() + + def fake_create_client(self): + return FakeHP3ParClient(self.driver.configuration.hp3par_api_url) + + def fake_get_cpg(self, volume, allowSnap=False): + return HP3PAR_CPG + + def fake_set_connections(self): + return + + def fake_get_domain(self, cpg): + return HP3PAR_DOMAIN + + def fake_extend_volume(self, volume, new_size): + vol = self.driver.common.client.getVolume(volume['name']) + old_size = vol['sizeMiB'] + option = {'comment': vol['comment'], 'snapCPG': vol['snapCPG']} + self.driver.common.client.deleteVolume(volume['name']) + self.driver.common.client.createVolume(vol['name'], + vol['userCPG'], + new_size, option) + + def fake_get_3par_host(self, hostname): + if hostname not in self._hosts: + msg = {'code': 'NON_EXISTENT_HOST', + 'desc': "HOST '%s' was not found" % hostname} + raise hpexceptions.HTTPNotFound(msg) + else: + return self._hosts[hostname] + + def fake_delete_3par_host(self, hostname): + if hostname not in self._hosts: + msg = {'code': 'NON_EXISTENT_HOST', + 'desc': "HOST '%s' was not found" % hostname} + raise hpexceptions.HTTPNotFound(msg) + else: + del self._hosts[hostname] + + def fake_create_3par_vlun(self, volume, hostname, nsp): + self.driver.common.client.createVLUN(volume, 19, hostname, nsp) + + def fake_get_ports(self): + ports = self.FAKE_FC_PORTS + ports.append(self.FAKE_ISCSI_PORT) + return {'members': ports} + + def fake_get_volume_type(self, type_id): + return self.volume_type + + def fake_get_qos_by_volume_type(self, volume_type): + return self.QOS + + def fake_add_volume_to_volume_set(self, volume, volume_name, + cpg, vvs_name, qos): + return volume + + def fake_copy_volume(self, src_name, dest_name, cpg=None, + snap_cpg=None, tpvv=True): + pass + + def fake_get_volume_stats(self, vol_name): + return "normal" + + def fake_get_volume_settings_from_type(self, volume): + return {'cpg': HP3PAR_CPG, + 'snap_cpg': HP3PAR_CPG_SNAP, + 'vvs_name': self.VVS_NAME, + 'qos': self.QOS, + 'tpvv': True, + 'volume_type': self.volume_type} + + def fake_get_volume_settings_from_type_noqos(self, volume): + return {'cpg': HP3PAR_CPG, + 'snap_cpg': HP3PAR_CPG_SNAP, + 'vvs_name': None, + 'qos': None, + 'tpvv': True, + 'volume_type': None} + + def test_create_volume(self): + self.flags(lock_path=self.tempdir) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, + "get_volume_settings_from_type", + self.fake_get_volume_settings_from_type_noqos) + self.driver.create_volume(self.volume) + volume = self.driver.common.client.getVolume(self.VOLUME_3PAR_NAME) + self.assertEqual(volume['name'], self.VOLUME_3PAR_NAME) + + def test_create_volume_qos(self): + self.flags(lock_path=self.tempdir) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, + "get_volume_settings_from_type", + self.fake_get_volume_settings_from_type) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, + "_add_volume_to_volume_set", + self.fake_add_volume_to_volume_set) + self.driver.create_volume(self.volume_qos) + volume = self.driver.common.client.getVolume(self.VOLUME_3PAR_NAME) + + self.assertEqual(volume['name'], self.VOLUME_3PAR_NAME) + self.assertNotIn(self.QOS, dict(ast.literal_eval(volume['comment']))) + + def test_delete_volume(self): + self.flags(lock_path=self.tempdir) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, + "get_volume_settings_from_type", + self.fake_get_volume_settings_from_type) + self.driver.delete_volume(self.volume) + self.assertRaises(hpexceptions.HTTPNotFound, + self.driver.common.client.getVolume, + self.VOLUME_ID) + + def test_create_cloned_volume(self): + self.flags(lock_path=self.tempdir) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, + "get_volume_settings_from_type", + self.fake_get_volume_settings_from_type) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_copy_volume", + self.fake_copy_volume) + volume = {'name': HP3PARBaseDriver.VOLUME_NAME, + 'id': HP3PARBaseDriver.CLONE_ID, + 'display_name': 'Foo Volume', + 'size': 2, + 'host': HP3PARBaseDriver.FAKE_HOST, + 'source_volid': HP3PARBaseDriver.VOLUME_ID} + src_vref = {} + model_update = self.driver.create_cloned_volume(volume, src_vref) + self.assertIsNotNone(model_update) + + @mock.patch.object(hpdriver.hpcommon.HP3PARCommon, '_run_ssh') + def test_attach_volume(self, mock_run_ssh): + mock_run_ssh.side_effect = [[CLI_CR, ''], Exception('Custom ex')] + self.driver.attach_volume(context.get_admin_context(), + self.volume, + 'abcdef', + 'newhost', + '/dev/vdb') + self.assertTrue(mock_run_ssh.called) + self.assertRaises(exception.CinderException, + self.driver.attach_volume, + context.get_admin_context(), + self.volume, + 'abcdef', + 'newhost', + '/dev/vdb') + + @mock.patch.object(hpdriver.hpcommon.HP3PARCommon, '_run_ssh') + def test_detach_volume(self, mock_run_ssh): + mock_run_ssh.side_effect = [[CLI_CR, ''], Exception('Custom ex')] + self.driver.detach_volume(context.get_admin_context(), self.volume) + self.assertTrue(mock_run_ssh.called) + self.assertRaises(exception.CinderException, + self.driver.detach_volume, + context.get_admin_context(), + self.volume) + + def test_create_snapshot(self): + self.flags(lock_path=self.tempdir) + self.driver.create_snapshot(self.snapshot) + + # check to see if the snapshot was created + snap_vol = self.driver.common.client.getVolume(self.SNAPSHOT_3PAR_NAME) + self.assertEqual(snap_vol['name'], self.SNAPSHOT_3PAR_NAME) + + def test_delete_snapshot(self): + self.flags(lock_path=self.tempdir) + + self.driver.create_snapshot(self.snapshot) + #make sure it exists first + vol = self.driver.common.client.getVolume(self.SNAPSHOT_3PAR_NAME) + self.assertEqual(vol['name'], self.SNAPSHOT_3PAR_NAME) + self.driver.delete_snapshot(self.snapshot) + + # the snapshot should be deleted now + self.assertRaises(hpexceptions.HTTPNotFound, + self.driver.common.client.getVolume, + self.SNAPSHOT_3PAR_NAME) + + def test_delete_snapshot_in_use(self): + self.flags(lock_path=self.tempdir) + + self.driver.create_snapshot(self.snapshot) + self.driver.create_volume_from_snapshot(self.volume, self.snapshot) + + ex = hpexceptions.HTTPConflict("In use") + self.driver.common.client.deleteVolume = mock.Mock(side_effect=ex) + + # Deleting the snapshot that a volume is dependent on should fail + self.assertRaises(exception.SnapshotIsBusy, + self.driver.delete_snapshot, + self.snapshot) + + def test_create_volume_from_snapshot(self): + self.flags(lock_path=self.tempdir) + self.driver.create_volume_from_snapshot(self.volume, self.snapshot) + + snap_vol = self.driver.common.client.getVolume(self.VOLUME_3PAR_NAME) + self.assertEqual(snap_vol['name'], self.VOLUME_3PAR_NAME) + + volume = self.volume.copy() + volume['size'] = 1 + self.assertRaises(exception.InvalidInput, + self.driver.create_volume_from_snapshot, + volume, self.snapshot) + + def test_create_volume_from_snapshot_qos(self): + self.flags(lock_path=self.tempdir) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_get_volume_type", + self.fake_get_volume_type) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, + "_get_qos_by_volume_type", + self.fake_get_qos_by_volume_type) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, + "_add_volume_to_volume_set", + self.fake_add_volume_to_volume_set) + self.driver.create_volume_from_snapshot(self.volume_qos, self.snapshot) + snap_vol = self.driver.common.client.getVolume(self.VOLUME_3PAR_NAME) + self.assertEqual(snap_vol['name'], self.VOLUME_3PAR_NAME) + self.assertNotIn(self.QOS, dict(ast.literal_eval(snap_vol['comment']))) + + volume = self.volume.copy() + volume['size'] = 1 + self.assertRaises(exception.InvalidInput, + self.driver.create_volume_from_snapshot, + volume, self.snapshot) + + def test_terminate_connection(self): + self.flags(lock_path=self.tempdir) + #setup the connections + self.driver.initialize_connection(self.volume, self.connector) + vlun = self.driver.common.client.getVLUN(self.VOLUME_3PAR_NAME) + self.assertEqual(vlun['volumeName'], self.VOLUME_3PAR_NAME) + self.driver.terminate_connection(self.volume, self.connector, + force=True) + # vlun should be gone. + self.assertRaises(hpexceptions.HTTPNotFound, + self.driver.common.client.getVLUN, + self.VOLUME_3PAR_NAME) + + @mock.patch.object(hpdriver.hpcommon.HP3PARCommon, '_run_ssh') + def test_update_volume_key_value_pair(self, mock_run_ssh): + mock_run_ssh.return_value = [CLI_CR, ''] + self.assertEqual( + self.driver.common.update_volume_key_value_pair(self.volume, + 'a', + 'b'), + None) + update_cmd = ['setvv', '-setkv', 'a=b', self.VOLUME_3PAR_NAME] + mock_run_ssh.assert_called_once_with(update_cmd, False) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.common.update_volume_key_value_pair, + self.volume, + None, + 'b') + + @mock.patch.object(hpdriver.hpcommon.HP3PARCommon, '_run_ssh') + def test_clear_volume_key_value_pair(self, mock_run_ssh): + mock_run_ssh.side_effect = [[CLI_CR, ''], Exception('Custom ex')] + self.assertEqual( + self.driver.common.clear_volume_key_value_pair(self.volume, 'a'), + None) + clear_cmd = ['setvv', '-clrkey', 'a', self.VOLUME_3PAR_NAME] + mock_run_ssh.assert_called_once_with(clear_cmd, False) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.common.clear_volume_key_value_pair, + self.volume, + None) + + def test_extend_volume(self): + self.flags(lock_path=self.tempdir) + self.stubs.UnsetAll() + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "extend_volume", + self.fake_extend_volume) + option = {'comment': '', 'snapCPG': HP3PAR_CPG_SNAP} + self.driver.common.client.createVolume(self.volume['name'], + HP3PAR_CPG, + self.volume['size'], + option) + old_size = self.volume['size'] + volume = self.driver.common.client.getVolume(self.volume['name']) + self.driver.extend_volume(volume, str(old_size + 1)) + vol = self.driver.common.client.getVolume(self.volume['name']) + self.assertEqual(vol['sizeMiB'], str(old_size + 1)) + + +class TestHP3PARFCDriver(HP3PARBaseDriver, test.TestCase): + + _hosts = {} + + def setUp(self): + self.tempdir = tempfile.mkdtemp() + super(TestHP3PARFCDriver, self).setUp() + self.setup_driver(self.setup_configuration()) + self.setup_fakes() + + def setup_fakes(self): + super(TestHP3PARFCDriver, self).setup_fakes() + self.stubs.Set(hpfcdriver.HP3PARFCDriver, + "_create_3par_fibrechan_host", + self.fake_create_3par_fibrechan_host) + + def tearDown(self): + shutil.rmtree(self.tempdir) + self.assertEqual(0, self.driver.common.client.connection_count, + 'Leaked hp3parclient connection.') + super(TestHP3PARFCDriver, self).tearDown() + + def setup_driver(self, configuration): + self.driver = hpfcdriver.HP3PARFCDriver(configuration=configuration) + + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_create_client", + self.fake_create_client) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_set_connections", + self.fake_set_connections) + self.driver.do_setup(None) + + def fake_create_3par_fibrechan_host(self, hostname, wwn, + domain, persona_id): + host = {'FCPaths': [{'driverVersion': None, + 'firmwareVersion': None, + 'hostSpeed': 0, + 'model': None, + 'portPos': {'cardPort': 1, 'node': 1, + 'slot': 2}, + 'vendor': None, + 'wwn': wwn[0]}, + {'driverVersion': None, + 'firmwareVersion': None, + 'hostSpeed': 0, + 'model': None, + 'portPos': {'cardPort': 1, 'node': 0, + 'slot': 2}, + 'vendor': None, + 'wwn': wwn[1]}], + 'descriptors': None, + 'domain': domain, + 'iSCSIPaths': [], + 'id': 11, + 'name': hostname} + self._hosts[hostname] = host + self.properties = {'data': + {'target_discovered': True, + 'target_lun': 186, + 'target_portal': '1.1.1.2:1234'}, + 'driver_volume_type': 'fibre_channel'} + return hostname + + def test_initialize_connection(self): + self.flags(lock_path=self.tempdir) + result = self.driver.initialize_connection(self.volume, self.connector) + self.assertEqual(result['driver_volume_type'], 'fibre_channel') + + # we should have a host and a vlun now. + host = self.fake_get_3par_host(self.FAKE_HOST) + self.assertEqual(self.FAKE_HOST, host['name']) + self.assertEqual(HP3PAR_DOMAIN, host['domain']) + vlun = self.driver.common.client.getVLUN(self.VOLUME_3PAR_NAME) + + self.assertEqual(self.VOLUME_3PAR_NAME, vlun['volumeName']) + self.assertEqual(self.FAKE_HOST, vlun['hostname']) + + def test_get_volume_stats(self): + self.flags(lock_path=self.tempdir) + + def fake_safe_get(*args): + return "HP3PARFCDriver" + + self.stubs.Set(self.driver.configuration, 'safe_get', fake_safe_get) + stats = self.driver.get_volume_stats(True) + self.assertEqual(stats['storage_protocol'], 'FC') + self.assertEqual(stats['total_capacity_gb'], 'infinite') + self.assertEqual(stats['free_capacity_gb'], 'infinite') + + #modify the CPG to have a limit + old_cpg = self.driver.common.client.getCPG(HP3PAR_CPG) + options = {'SDGrowth': {'limitMiB': 8192}} + self.driver.common.client.deleteCPG(HP3PAR_CPG) + self.driver.common.client.createCPG(HP3PAR_CPG, options) + + const = 0.0009765625 + stats = self.driver.get_volume_stats(True) + self.assertEqual(stats['storage_protocol'], 'FC') + total_capacity_gb = 8192 * const + self.assertEqual(stats['total_capacity_gb'], total_capacity_gb) + free_capacity_gb = int((8192 - old_cpg['UsrUsage']['usedMiB']) * const) + self.assertEqual(stats['free_capacity_gb'], free_capacity_gb) + self.driver.common.client.deleteCPG(HP3PAR_CPG) + self.driver.common.client.createCPG(HP3PAR_CPG, {}) + + def test_create_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.clear_mox() + self.stubs.Set(hpfcdriver.hpcommon.HP3PARCommon, "get_cpg", + self.fake_get_cpg) + self.stubs.Set(hpfcdriver.hpcommon.HP3PARCommon, "get_domain", + self.fake_get_domain) + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + getHost = self.mox.CreateMock(FakeHP3ParClient.getHost) + self.stubs.Set(FakeHP3ParClient, "getHost", getHost) + + ex = hpexceptions.HTTPNotFound('Host not found.') + getHost('fakehost').AndRaise(ex) + + create_host_cmd = (['createhost', '-persona', '1', '-domain', + ('OpenStack',), 'fakehost', '123456789012345', + '123456789054321']) + _run_ssh(create_host_cmd, False).AndReturn([CLI_CR, '']) + + getHost('fakehost').AndReturn({'name': self.FAKE_HOST, + 'FCPaths': [{'wwn': '123456789012345'}, + {'wwn': '123456789054321'}]} + ) + self.mox.ReplayAll() + + host = self.driver._create_host(self.volume, self.connector) + self.assertEqual(host['name'], self.FAKE_HOST) + + def test_create_invalid_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.clear_mox() + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_cpg", + self.fake_get_cpg) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_domain", + self.fake_get_domain) + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + getHost = self.mox.CreateMock(FakeHP3ParClient.getHost) + self.stubs.Set(FakeHP3ParClient, "getHost", getHost) + + not_found_ex = hpexceptions.HTTPNotFound('Host not found.') + getHost('fakehost').AndRaise(not_found_ex) + + create_host_cmd = (['createhost', '-persona', '1', '-domain', + ('OpenStack',), 'fakehost', '123456789012345', + '123456789054321']) + create_host_ret = pack(CLI_CR + + 'already used by host fakehost.foo (19)') + _run_ssh(create_host_cmd, False).AndReturn([create_host_ret, '']) + + host_ret = { + 'name': 'fakehost.foo', + 'FCPaths': [{'wwn': '123456789012345'}, + {'wwn': '123456789054321'}]} + getHost('fakehost.foo').AndReturn(host_ret) + + self.mox.ReplayAll() + + host = self.driver._create_host(self.volume, self.connector) + + self.assertEqual(host['name'], 'fakehost.foo') + + def test_create_modify_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.clear_mox() + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_cpg", + self.fake_get_cpg) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_domain", + self.fake_get_domain) + + getHost = self.mox.CreateMock(FakeHP3ParClient.getHost) + self.stubs.Set(FakeHP3ParClient, "getHost", getHost) + + modifyHost = self.mox.CreateMock(FakeHP3ParClient.modifyHost) + self.stubs.Set(FakeHP3ParClient, "modifyHost", modifyHost) + + getHost('fakehost').AndReturn(({'name': self.FAKE_HOST, + 'FCPaths': []})) + + modifyHost('fakehost', {'FCWWNs': + ['123456789012345', '123456789054321'], + 'pathOperation': 1}) + + getHost('fakehost').AndReturn({'name': self.FAKE_HOST, + 'FCPaths': [{'wwn': '123456789012345'}, + {'wwn': '123456789054321'}]} + ) + + self.mox.ReplayAll() + + host = self.driver._create_host(self.volume, self.connector) + self.assertEqual(host['name'], self.FAKE_HOST) + self.assertEqual(len(host['FCPaths']), 2) + + def test_modify_host_with_new_wwn(self): + self.flags(lock_path=self.tempdir) + self.clear_mox() + + hpdriver.hpcommon.HP3PARCommon.get_cpg = mock.Mock( + return_value=self.fake_get_cpg) + hpdriver.hpcommon.HP3PARCommon.get_domain = mock.Mock( + return_value=self.fake_get_domain) + + # set up the getHost mock + self.driver.common.client.getHost = mock.Mock() + # define the return values for the 2 calls + getHost_ret1 = { + 'name': self.FAKE_HOST, + 'FCPaths': [{'wwn': '123456789054321'}]} + getHost_ret2 = { + 'name': self.FAKE_HOST, + 'FCPaths': [{'wwn': '123456789012345'}, + {'wwn': '123456789054321'}]} + self.driver.common.client.getHost.side_effect = [ + getHost_ret1, getHost_ret2] + + # setup the modifyHost mock + self.driver.common.client.modifyHost = mock.Mock() + + host = self.driver._create_host(self.volume, self.connector) + + # mock assertions + self.driver.common.client.getHost.assert_has_calls([ + mock.call('fakehost'), + mock.call('fakehost')]) + self.driver.common.client.modifyHost.assert_called_once_with( + 'fakehost', {'FCWWNs': ['123456789012345'], 'pathOperation': 1}) + + self.assertEqual(host['name'], self.FAKE_HOST) + self.assertEqual(len(host['FCPaths']), 2) + + def test_modify_host_with_unknown_wwn_and_new_wwn(self): + self.flags(lock_path=self.tempdir) + self.clear_mox() + + hpdriver.hpcommon.HP3PARCommon.get_cpg = mock.Mock( + return_value=self.fake_get_cpg) + hpdriver.hpcommon.HP3PARCommon.get_domain = mock.Mock( + return_value=self.fake_get_domain) + + # set up the getHost mock + self.driver.common.client.getHost = mock.Mock() + # define the return values for the 2 calls + getHost_ret1 = { + 'name': self.FAKE_HOST, + 'FCPaths': [{'wwn': '123456789054321'}, + {'wwn': 'xxxxxxxxxxxxxxx'}]} + getHost_ret2 = { + 'name': self.FAKE_HOST, + 'FCPaths': [{'wwn': '123456789012345'}, + {'wwn': '123456789054321'}, + {'wwn': 'xxxxxxxxxxxxxxx'}]} + self.driver.common.client.getHost.side_effect = [ + getHost_ret1, getHost_ret2] + + # setup the modifyHost mock + self.driver.common.client.modifyHost = mock.Mock() + + host = self.driver._create_host(self.volume, self.connector) + + # mock assertions + self.driver.common.client.getHost.assert_has_calls([ + mock.call('fakehost'), + mock.call('fakehost')]) + self.driver.common.client.modifyHost.assert_called_once_with( + 'fakehost', {'FCWWNs': ['123456789012345'], 'pathOperation': 1}) + + self.assertEqual(host['name'], self.FAKE_HOST) + self.assertEqual(len(host['FCPaths']), 3) + + +class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase): + + TARGET_IQN = "iqn.2000-05.com.3pardata:21810002ac00383d" + + _hosts = {} + + def setUp(self): + self.tempdir = tempfile.mkdtemp() + super(TestHP3PARISCSIDriver, self).setUp() + self.setup_driver(self.setup_configuration()) + self.setup_fakes() + + def setup_fakes(self): + super(TestHP3PARISCSIDriver, self).setup_fakes() + + self.stubs.Set(hpdriver.HP3PARISCSIDriver, "_create_3par_iscsi_host", + self.fake_create_3par_iscsi_host) + + #target_iqn = 'iqn.2000-05.com.3pardata:21810002ac00383d' + self.properties = {'data': + {'target_discovered': True, + 'target_iqn': self.TARGET_IQN, + 'target_lun': 186, + 'target_portal': '1.1.1.2:1234'}, + 'driver_volume_type': 'iscsi'} + + def tearDown(self): + shutil.rmtree(self.tempdir) + self.assertEqual(0, self.driver.common.client.connection_count, + 'Leaked hp3parclient connection.') + self._hosts = {} + super(TestHP3PARISCSIDriver, self).tearDown() + + def setup_driver(self, configuration, set_up_fakes=True): + self.driver = hpdriver.HP3PARISCSIDriver(configuration=configuration) + + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_create_client", + self.fake_create_client) + + if set_up_fakes: + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_ports", + self.fake_get_ports) + + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_set_connections", + self.fake_set_connections) + self.driver.do_setup(None) + + def fake_create_3par_iscsi_host(self, hostname, iscsi_iqn, + domain, persona_id): + host = {'FCPaths': [], + 'descriptors': None, + 'domain': domain, + 'iSCSIPaths': [{'driverVersion': None, + 'firmwareVersion': None, + 'hostSpeed': 0, + 'ipAddr': '10.10.221.59', + 'model': None, + 'name': iscsi_iqn, + 'portPos': {'cardPort': 1, 'node': 1, + 'slot': 8}, + 'vendor': None}], + 'id': 11, + 'name': hostname} + self._hosts[hostname] = host + return hostname + + def test_initialize_connection(self): + self.flags(lock_path=self.tempdir) + result = self.driver.initialize_connection(self.volume, self.connector) + self.assertEqual(result['driver_volume_type'], 'iscsi') + self.assertEqual(result['data']['target_iqn'], + self.properties['data']['target_iqn']) + self.assertEqual(result['data']['target_portal'], + self.properties['data']['target_portal']) + self.assertEqual(result['data']['target_discovered'], + self.properties['data']['target_discovered']) + + # we should have a host and a vlun now. + host = self.fake_get_3par_host(self.FAKE_HOST) + self.assertEqual(self.FAKE_HOST, host['name']) + self.assertEqual(HP3PAR_DOMAIN, host['domain']) + vlun = self.driver.common.client.getVLUN(self.VOLUME_3PAR_NAME) + + self.assertEqual(self.VOLUME_3PAR_NAME, vlun['volumeName']) + self.assertEqual(self.FAKE_HOST, vlun['hostname']) + + def test_get_volume_stats(self): + self.flags(lock_path=self.tempdir) + + def fake_safe_get(*args): + return "HP3PARFCDriver" + + self.stubs.Set(self.driver.configuration, 'safe_get', fake_safe_get) + stats = self.driver.get_volume_stats(True) + self.assertEqual(stats['storage_protocol'], 'iSCSI') + self.assertEqual(stats['total_capacity_gb'], 'infinite') + self.assertEqual(stats['free_capacity_gb'], 'infinite') + + #modify the CPG to have a limit + old_cpg = self.driver.common.client.getCPG(HP3PAR_CPG) + options = {'SDGrowth': {'limitMiB': 8192}} + self.driver.common.client.deleteCPG(HP3PAR_CPG) + self.driver.common.client.createCPG(HP3PAR_CPG, options) + + const = 0.0009765625 + stats = self.driver.get_volume_stats(True) + self.assertEqual(stats['storage_protocol'], 'iSCSI') + total_capacity_gb = 8192 * const + self.assertEqual(stats['total_capacity_gb'], total_capacity_gb) + free_capacity_gb = int((8192 - old_cpg['UsrUsage']['usedMiB']) * const) + self.assertEqual(stats['free_capacity_gb'], free_capacity_gb) + self.driver.common.client.deleteCPG(HP3PAR_CPG) + self.driver.common.client.createCPG(HP3PAR_CPG, {}) + + def test_create_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.clear_mox() + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_cpg", + self.fake_get_cpg) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_domain", + self.fake_get_domain) + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + getHost = self.mox.CreateMock(FakeHP3ParClient.getHost) + self.stubs.Set(FakeHP3ParClient, "getHost", getHost) + + not_found_ex = hpexceptions.HTTPNotFound('Host not found.') + getHost('fakehost').AndRaise(not_found_ex) + + create_host_cmd = (['createhost', '-iscsi', '-persona', '1', '-domain', + ('OpenStack',), 'fakehost', + 'iqn.1993-08.org.debian:01:222']) + _run_ssh(create_host_cmd, False).AndReturn([CLI_CR, '']) + + getHost('fakehost').AndReturn({'name': self.FAKE_HOST}) + self.mox.ReplayAll() + + host = self.driver._create_host(self.volume, self.connector) + self.assertEqual(host['name'], self.FAKE_HOST) + + def test_create_invalid_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.clear_mox() + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_cpg", + self.fake_get_cpg) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_domain", + self.fake_get_domain) + _run_ssh = self.mox.CreateMock(hpdriver.hpcommon.HP3PARCommon._run_ssh) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "_run_ssh", _run_ssh) + + getHost = self.mox.CreateMock(FakeHP3ParClient.getHost) + self.stubs.Set(FakeHP3ParClient, "getHost", getHost) + + not_found_ex = hpexceptions.HTTPNotFound('Host not found.') + getHost('fakehost').AndRaise(not_found_ex) + + create_host_cmd = (['createhost', '-iscsi', '-persona', '1', '-domain', + ('OpenStack',), 'fakehost', + 'iqn.1993-08.org.debian:01:222']) + in_use_ret = pack('\r\nalready used by host fakehost.foo ') + _run_ssh(create_host_cmd, False).AndReturn([in_use_ret, '']) + + getHost('fakehost.foo').AndReturn({'name': 'fakehost.foo'}) + self.mox.ReplayAll() + + host = self.driver._create_host(self.volume, self.connector) + + self.assertEqual(host['name'], 'fakehost.foo') + + def test_create_modify_host(self): + self.flags(lock_path=self.tempdir) + + #record + self.clear_mox() + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_cpg", + self.fake_get_cpg) + self.stubs.Set(hpdriver.hpcommon.HP3PARCommon, "get_domain", + self.fake_get_domain) + + getHost = self.mox.CreateMock(FakeHP3ParClient.getHost) + self.stubs.Set(FakeHP3ParClient, "getHost", getHost) + + modifyHost = self.mox.CreateMock(FakeHP3ParClient.modifyHost) + self.stubs.Set(FakeHP3ParClient, "modifyHost", modifyHost) + + getHost('fakehost').AndReturn(({'name': self.FAKE_HOST, + 'iSCSIPaths': []})) + + modifyHost('fakehost', {'iSCSINames': + ['iqn.1993-08.org.debian:01:222'], + 'pathOperation': 1}) + + ret_value = {'name': self.FAKE_HOST, + 'iSCSIPaths': [{'name': 'iqn.1993-08.org.debian:01:222'}] + } + getHost('fakehost').AndReturn(ret_value) + self.mox.ReplayAll() + + host = self.driver._create_host(self.volume, self.connector) + self.assertEqual(host['name'], self.FAKE_HOST) + self.assertEqual(len(host['iSCSIPaths']), 1) + + def test_get_ports(self): + self.flags(lock_path=self.tempdir) + + #record + self.clear_mox() + getPorts = self.mox.CreateMock(FakeHP3ParClient.getPorts) + self.stubs.Set(FakeHP3ParClient, "getPorts", getPorts) + + getPorts().AndReturn(PORTS1_RET) + self.mox.ReplayAll() + + ports = self.driver.common.get_ports()['members'] + self.assertEqual(len(ports), 3) + + def test_get_least_used_nsp_for_host_single(self): + self.flags(lock_path=self.tempdir) + self.clear_mox() + + self.driver.common.client.getPorts = mock.Mock( + return_value=PORTS_RET) + + self.driver.common.client.getVLUNs = mock.Mock( + return_value=VLUNS1_RET) + + #Setup a single ISCSI IP + iscsi_ips = ["10.10.220.253"] + self.driver.configuration.hp3par_iscsi_ips = iscsi_ips + + self.driver.initialize_iscsi_ports() + + nsp = self.driver._get_least_used_nsp_for_host('newhost') + self.assertEqual(nsp, "1:8:1") + + def test_get_least_used_nsp_for_host_new(self): + self.flags(lock_path=self.tempdir) + self.clear_mox() + + self.driver.common.client.getPorts = mock.Mock( + return_value=PORTS_RET) + + self.driver.common.client.getVLUNs = mock.Mock( + return_value=VLUNS1_RET) + + #Setup two ISCSI IPs + iscsi_ips = ["10.10.220.252", "10.10.220.253"] + self.driver.configuration.hp3par_iscsi_ips = iscsi_ips + + self.driver.initialize_iscsi_ports() + + # Host 'newhost' does not yet have any iscsi paths, + # so the 'least used' is returned + nsp = self.driver._get_least_used_nsp_for_host('newhost') + self.assertEqual(nsp, "1:8:2") + + def test_get_least_used_nsp_for_host_reuse(self): + self.flags(lock_path=self.tempdir) + self.clear_mox() + + self.driver.common.client.getPorts = mock.Mock( + return_value=PORTS_RET) + + self.driver.common.client.getVLUNs = mock.Mock( + return_value=VLUNS1_RET) + + #Setup two ISCSI IPs + iscsi_ips = ["10.10.220.252", "10.10.220.253"] + self.driver.configuration.hp3par_iscsi_ips = iscsi_ips + + self.driver.initialize_iscsi_ports() + + # hosts 'foo' and 'bar' already have active iscsi paths + # the same one should be used + nsp = self.driver._get_least_used_nsp_for_host('foo') + self.assertEqual(nsp, "1:8:2") + + nsp = self.driver._get_least_used_nsp_for_host('bar') + self.assertEqual(nsp, "1:8:1") + + def test_get_least_used_nps_for_host_fc(self): + # Ensure that with an active FC path setup + # an ISCSI path is used + self.flags(lock_path=self.tempdir) + self.clear_mox() + + self.driver.common.client.getPorts = mock.Mock( + return_value=PORTS1_RET) + self.driver.common.client.getVLUNs = mock.Mock( + return_value=VLUNS5_RET) + + #Setup two ISCSI IPs + iscsi_ips = ["10.10.220.252", "10.10.220.253"] + self.driver.configuration.hp3par_iscsi_ips = iscsi_ips + + self.driver.initialize_iscsi_ports() + + nsp = self.driver._get_least_used_nsp_for_host('newhost') + self.assertNotEqual(nsp, "0:6:3") + self.assertEqual(nsp, "1:8:1") + + def test_invalid_iscsi_ip(self): + self.flags(lock_path=self.tempdir) + + #record driver set up + self.clear_mox() + getPorts = self.mox.CreateMock(FakeHP3ParClient.getPorts) + self.stubs.Set(FakeHP3ParClient, "getPorts", getPorts) + + getPorts().AndReturn(PORTS_RET) + + config = self.setup_configuration() + config.hp3par_iscsi_ips = ['10.10.220.250', '10.10.220.251'] + config.iscsi_ip_address = '10.10.10.10' + self.mox.ReplayAll() + + # no valid ip addr should be configured. + self.assertRaises(exception.InvalidInput, + self.setup_driver, + config, + set_up_fakes=False) + + def test_get_least_used_nsp(self): + self.flags(lock_path=self.tempdir) + + #record + self.clear_mox() + getVLUNs = self.mox.CreateMock(FakeHP3ParClient.getVLUNs) + self.stubs.Set(FakeHP3ParClient, "getVLUNs", getVLUNs) + + getVLUNs().AndReturn(VLUNS3_RET) + getVLUNs().AndReturn(VLUNS4_RET) + getVLUNs().AndReturn(VLUNS4_RET) + + self.mox.ReplayAll() + # in use count + vluns = self.driver.common.client.getVLUNs() + nsp = self.driver._get_least_used_nsp(vluns['members'], + ['0:2:1', '1:8:1']) + self.assertEqual(nsp, '1:8:1') + + # in use count + vluns = self.driver.common.client.getVLUNs() + nsp = self.driver._get_least_used_nsp(vluns['members'], + ['0:2:1', '1:2:1']) + self.assertEqual(nsp, '1:2:1') + + # in use count + vluns = self.driver.common.client.getVLUNs() + nsp = self.driver._get_least_used_nsp(vluns['members'], + ['1:1:1', '1:2:1']) + self.assertEqual(nsp, '1:1:1') + + +def pack(arg): + header = '\r\n\r\n\r\n\r\n\r\n' + footer = '\r\n\r\n\r\n' + return header + arg + footer + +PORTS_RET = ({'members': + [{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, + 'protocol': 2, + 'IPAddr': '10.10.220.252', + 'linkState': 4, + 'device': [], + 'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d', + 'mode': 2, + 'HWAddr': '2C27D75375D2', + 'type': 8}, + {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, + 'protocol': 2, + 'IPAddr': '10.10.220.253', + 'linkState': 4, + 'device': [], + 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', + 'mode': 2, + 'HWAddr': '2C27D75375D6', + 'type': 8}]}) + +PORTS1_RET = ({'members': + [{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2}, + 'protocol': 2, + 'IPAddr': '10.10.120.252', + 'linkState': 4, + 'device': [], + 'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d', + 'mode': 2, + 'HWAddr': '2C27D75375D2', + 'type': 8}, + {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, + 'protocol': 2, + 'IPAddr': '10.10.220.253', + 'linkState': 4, + 'device': [], + 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', + 'mode': 2, + 'HWAddr': '2C27D75375D6', + 'type': 8}, + {'portWWN': '20210002AC00383D', + 'protocol': 1, + 'linkState': 4, + 'mode': 2, + 'device': ['cage2'], + 'nodeWWN': '20210002AC00383D', + 'type': 2, + 'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]}) + +VLUNS1_RET = ({'members': + [{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, + 'hostname': 'foo', 'active': True}, + {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, + 'hostname': 'bar', 'active': True}, + {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, + 'hostname': 'bar', 'active': True}, + {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, + 'hostname': 'bar', 'active': True}]}) + +VLUNS2_RET = ({'members': + [{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, + 'hostname': 'bar', 'active': True}, + {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, + 'hostname': 'bar', 'active': True}, + {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, + 'hostname': 'bar', 'active': True}, + {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, + 'hostname': 'fakehost', 'active': True}]}) + +VLUNS3_RET = ({'members': + [{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, + 'active': True}, + {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, + 'active': True}, + {'portPos': {'node': 0, 'slot': 2, 'cardPort': 2}, + 'active': True}, + {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, + 'active': True}]}) + +VLUNS4_RET = ({'members': + [{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, + 'active': True}, + {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, + 'active': True}]}) +VLUNS5_RET = ({'members': + [{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2}, + 'active': True}, + {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, + 'active': True}]}) diff --git a/cinder/tests/test_huawei_hvs.py b/cinder/tests/test_huawei_hvs.py new file mode 100644 index 0000000000..1974ce0002 --- /dev/null +++ b/cinder/tests/test_huawei_hvs.py @@ -0,0 +1,865 @@ + +# Copyright (c) 2013 Huawei Technologies Co., Ltd. +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for Huawei HVS volume drivers. +""" + +import json +import mox +import os +import shutil +import tempfile +import time + +from xml.dom.minidom import Document + +from cinder import exception +from cinder import test +from cinder import utils +from cinder.volume import configuration as conf +from cinder.volume.drivers.huawei import huawei_hvs +from cinder.volume.drivers.huawei import rest_common + + +test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', + 'size': 2, + 'volume_name': 'vol1', + 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', + 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'test volume', + 'volume_type_id': None} + +test_snap = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', + 'size': 1, + 'volume_name': 'vol1', + 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', + 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', + 'provider_auth': None, + 'project_id': 'project', + 'display_name': 'vol1', + 'display_description': 'test volume', + 'volume_type_id': None} + +FakeConnector = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3', + 'wwpns': ['10000090fa0d6754'], + 'wwnns': ['10000090fa0d6755'], + 'host': 'fakehost', + 'ip': '10.10.0.1'} + +volume_size = 3 + + +def Fake_sleep(time): + pass + + +class FakeHVSCommon(rest_common.HVSCommon): + + def __init__(self, configuration): + rest_common.HVSCommon.__init__(self, configuration) + self.test_normal = True + self.other_flag = True + self.deviceid = None + self.lun_id = None + self.snapshot_id = None + self.luncopy_id = None + self.termin_flag = False + + def _parse_volume_type(self, volume): + self._get_lun_conf_params() + poolinfo = self._find_pool_info() + volume_size = self._get_volume_size(poolinfo, volume) + + params = {'LUNType': 0, + 'WriteType': '1', + 'PrefetchType': '3', + 'qos_level': 'Qos-high', + 'StripUnitSize': '64', + 'PrefetchValue': '0', + 'PrefetchTimes': '0', + 'qos': 'OpenStack_Qos_High', + 'MirrorSwitch': '1', + 'tier': 'Tier_high'} + + params['volume_size'] = volume_size + params['pool_id'] = poolinfo['ID'] + return params + + def _change_file_mode(self, filepath): + utils.execute('chmod', '777', filepath) + + def call(self, url=False, data=None, method=None): + + url = url.replace('http://100.115.10.69:8082/deviceManager/rest', '') + url = url.replace('/210235G7J20000000000/', '') + data = None + + if self.test_normal: + if url == "/xx/sessions": + data = """{"error":{"code":0}, + "data":{"username":"admin", + "deviceid":"210235G7J20000000000" + }}""" + if url == "sessions": + data = """{"error":{"code":0}, + "data":{"ID":11}}""" + + if url == "storagepool": + data = """{"error":{"code":0}, + "data":[{"ID":"0", + "NAME":"OpenStack_Pool", + "USERFREECAPACITY":"985661440", + "USERTOTALCAPACITY":"985661440" + }]}""" + + if url == "lun": + if method is None: + data = """{"error":{"code":0}, + "data":{"ID":"1", + "NAME":"5mFHcBv4RkCcD+JyrWc0SA"}}""" + self.lun_id = "0" + + if method == 'GET': + data = """{"error":{"code":0}, + "data":[{"ID":"1", + "NAME":"IexzQZJWSXuX2e9I7c8GNQ"}]}""" + + if url == "lungroup": + if method is None: + data = """{"error":{"code":0}, + "data":{"NAME":"5mFHcBv4RkCcD+JyrWc0SA", + "DESCRIPTION":"5mFHcBv4RkCcD", + "ID":"11", + "TYPE":256}}""" + + if method == "GET": + data = """{"error":{"code":0}, + "data":[{"NAME":"IexzQZJWSXuX2e9I7c8GNQ", + "DESCRIPTION":"5mFHcBv4RkCcD", + "ID":"11", + "TYPE":256}]}""" + + if method == "DELETE": + data = """{"error":{"code":0}, + "data":[{"NAME":"IexzQZJWSXuX2e9I7c8GNQ", + "DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA", + "ID":"11", + "TYPE":256}]}""" + + if url == "lungroup/associate": + data = """{"error":{"code":0}, + "data":{"NAME":"5mFHcBv4RkCcD+JyrWc0SA", + "DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA", + "ID":"11", + "TYPE":256}}""" + + if url == "snapshot": + if method is None: + data = """{"error":{"code":0}, + "data":{"ID":11}}""" + self.snapshot_id = "3" + + if method == "GET": + data = """{"error":{"code":0}, + "data":[{"ID":11,"NAME":"SDFAJSDFLKJ"}, + {"ID":12,"NAME":"SDFAJSDFLKJ"}]}""" + + if url == "snapshot/activate": + data = """{"error":{"code":0}}""" + + if url == ("lungroup/associate?ID=11" + "&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=1"): + data = """{"error":{"code":0}}""" + + if url == "LUNGroup/11": + data = """{"error":{"code":0}}""" + + if url == 'lun/1': + data = """{"error":{"code":0}}""" + self.lun_id = None + + if url == 'snapshot': + if method == "GET": + data = """{"error":{"code":0}, + "data":[{"PARENTTYPE":11, + "NAME":"IexzQZJWSXuX2e9I7c8GNQ", + "WWN":"60022a11000a2a3907ce96cb00000b", + "ID":"11", + "CONSUMEDCAPACITY":"0"}]}""" + + if url == "snapshot/stop": + data = """{"error":{"code":0}}""" + + if url == "snapshot/11": + data = """{"error":{"code":0}}""" + self.snapshot_id = None + + if url == "luncopy": + data = """{"error":{"code":0}, + "data":{"COPYSTOPTIME":"-1", + "HEALTHSTATUS":"1", + "NAME":"w1PSNvu6RumcZMmSh4/l+Q==", + "RUNNINGSTATUS":"36", + "DESCRIPTION":"w1PSNvu6RumcZMmSh4/l+Q==", + "ID":"0","LUNCOPYTYPE":"1", + "COPYPROGRESS":"0","COPYSPEED":"2", + "TYPE":219,"COPYSTARTTIME":"-1"}}""" + self.luncopy_id = "7" + + if url == "LUNCOPY/start": + data = """{"error":{"code":0}}""" + + if url == "LUNCOPY?range=[0-100000]": + data = """{"error":{"code":0}, + "data":[{"COPYSTOPTIME":"1372209335", + "HEALTHSTATUS":"1", + "NAME":"w1PSNvu6RumcZMmSh4/l+Q==", + "RUNNINGSTATUS":"40", + "DESCRIPTION":"w1PSNvu6RumcZMmSh4/l+Q==", + "ID":"0","LUNCOPYTYPE":"1", + "COPYPROGRESS":"100", + "COPYSPEED":"2", + "TYPE":219, + "COPYSTARTTIME":"1372209329"}]}""" + + if url == "LUNCOPY/0": + data = '{"error":{"code":0}}' + + if url == "eth_port": + data = """{"error":{"code":0}, + "data":[{"PARENTTYPE":209, + "MACADDRESS":"00:22:a1:0a:79:57", + "ETHNEGOTIATE":"-1","ERRORPACKETS":"0", + "IPV4ADDR":"100.115.10.68", + "IPV6GATEWAY":"","IPV6MASK":"0", + "OVERFLOWEDPACKETS":"0","ISCSINAME":"P0", + "HEALTHSTATUS":"1","ETHDUPLEX":"2", + "ID":"16909568","LOSTPACKETS":"0", + "TYPE":213,"NAME":"P0","INIORTGT":"4", + "RUNNINGSTATUS":"10","IPV4GATEWAY":"", + "BONDNAME":"","STARTTIME":"1371684218", + "SPEED":"1000","ISCSITCPPORT":"0", + "IPV4MASK":"255.255.0.0","IPV6ADDR":"", + "LOGICTYPE":"0","LOCATION":"ENG0.B5.P0", + "MTU":"1500","PARENTID":"1.5"}]}""" + + if url == "iscsidevicename": + data = """{"error":{"code":0}, +"data":[{"CMO_ISCSI_DEVICE_NAME": +"iqn.2006-08.com.huawei:oceanstor:21000022a10a2a39:iscsinametest"}]}""" + + if url == "hostgroup": + if method is None: + data = """{"error":{"code":0}, + "data":{"NAME":"ubuntuc", + "DESCRIPTION":"", + "ID":"0", + "TYPE":14}}""" + + if method == "GET": + data = """{"error":{"code":0}, + "data":[{"NAME":"ubuntuc", + "DESCRIPTION":"", + "ID":"0", + "TYPE":14}]}""" + + if url == "host": + if method is None: + data = """{"error":{"code":0}, + "data":{"PARENTTYPE":245, + "NAME":"Default Host", + "DESCRIPTION":"", + "RUNNINGSTATUS":"1", + "IP":"","PARENTNAME":"0", + "OPERATIONSYSTEM":"1","LOCATION":"", + "HEALTHSTATUS":"1","MODEL":"", + "ID":"0","PARENTID":"0", + "NETWORKNAME":"","TYPE":21}} """ + + if method == "GET": + data = """{"error":{"code":0}, + "data":[{"PARENTTYPE":245, + "NAME":"ubuntuc", + "DESCRIPTION":"", + "RUNNINGSTATUS":"1", + "IP":"","PARENTNAME":"", + "OPERATIONSYSTEM":"0", + "LOCATION":"", + "HEALTHSTATUS":"1", + "MODEL":"", + "ID":"1","PARENTID":"", + "NETWORKNAME":"","TYPE":21}, + {"PARENTTYPE":245, + "NAME":"ubuntu", + "DESCRIPTION":"", + "RUNNINGSTATUS":"1", + "IP":"","PARENTNAME":"", + "OPERATIONSYSTEM":"0", + "LOCATION":"", + "HEALTHSTATUS":"1", + "MODEL":"","ID":"2", + "PARENTID":"", + "NETWORKNAME":"","TYPE":21}]} """ + + if url == "host/associate": + if method is None: + data = """{"error":{"code":0}}""" + if method == "GET": + data = """{"error":{"code":0}}""" + + if url == "iscsi_initiator/iqn.1993-08.debian:01:ec2bff7ac3a3": + data = """{"error":{"code":0}, + "data":{"ID":"iqn.1993-08.win:01:ec2bff7ac3a3", + "NAME":"iqn.1993-08.win:01:ec2bff7ac3a3", + "ISFREE":"True"}}""" + + if url == "iscsi_initiator/": + data = """{"error":{"code":0}}""" + + if url == "iscsi_initiator": + data = """{"error":{"code":0}}""" + + if url == "mappingview": + self.termin_flag = True + if method is None: + data = """{"error":{"code":0}, + "data":{"WORKMODE":"255", + "HEALTHSTATUS":"1", + "NAME":"mOWtSXnaQKi3hpB3tdFRIQ", + "RUNNINGSTATUS":"27","DESCRIPTION":"", + "ENABLEINBANDCOMMAND":"true", + "ID":"1","INBANDLUNWWN":"", + "TYPE":245}}""" + + if method == "GET": + if self.other_flag: + data = """{"error":{"code":0}, + "data":[{"WORKMODE":"255", + "HEALTHSTATUS":"1", + "NAME":"mOWtSXnaQKi3hpB3tdFRIQ", + "RUNNINGSTATUS":"27", + "DESCRIPTION":"", + "ENABLEINBANDCOMMAND": + "true","ID":"1", + "INBANDLUNWWN":"", + "TYPE":245}, + {"WORKMODE":"255", + "HEALTHSTATUS":"1", + "NAME":"YheUoRwbSX2BxN767nvLSw", + "RUNNINGSTATUS":"27", + "DESCRIPTION":"", + "ENABLEINBANDCOMMAND":"true", + "ID":"2", + "INBANDLUNWWN":"", + "TYPE":245}]}""" + else: + data = """{"error":{"code":0}, + "data":[{"WORKMODE":"255", + "HEALTHSTATUS":"1", + "NAME":"IexzQZJWSXuX2e9I7c8GNQ", + "RUNNINGSTATUS":"27", + "DESCRIPTION":"", + "ENABLEINBANDCOMMAND":"true", + "ID":"1", + "INBANDLUNWWN":"", + "TYPE":245}, + {"WORKMODE":"255", + "HEALTHSTATUS":"1", + "NAME":"YheUoRwbSX2BxN767nvLSw", + "RUNNINGSTATUS":"27", + "DESCRIPTION":"", + "ENABLEINBANDCOMMAND":"true", + "ID":"2", + "INBANDLUNWWN":"", + "TYPE":245}]}""" + + if url == "MAPPINGVIEW/CREATE_ASSOCIATE": + data = """{"error":{"code":0}}""" + + if url == ("lun/associate?TYPE=11&" + "ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=0"): + data = """{"error":{"code":0}}""" + + if url == "fc_initiator?ISFREE=true&range=[0-1000]": + data = """{"error":{"code":0}, + "data":[{"HEALTHSTATUS":"1", + "NAME":"", + "MULTIPATHTYPE":"1", + "ISFREE":"true", + "RUNNINGSTATUS":"27", + "ID":"10000090fa0d6754", + "OPERATIONSYSTEM":"255", + "TYPE":223}, + {"HEALTHSTATUS":"1", + "NAME":"", + "MULTIPATHTYPE":"1", + "ISFREE":"true", + "RUNNINGSTATUS":"27", + "ID":"10000090fa0d6755", + "OPERATIONSYSTEM":"255", + "TYPE":223}]}""" + + if url == "host_link?INITIATOR_TYPE=223&INITIATOR_PORT_WWN="\ + "10000090fa0d6754": + + data = """{"error":{"code":0}, + "data":[{"PARENTTYPE":21, + "TARGET_ID":"0000000000000000", + "INITIATOR_NODE_WWN":"20000090fa0d6754", + "INITIATOR_TYPE":"223", + "RUNNINGSTATUS":"27", + "PARENTNAME":"ubuntuc", + "INITIATOR_ID":"10000090fa0d6754", + "TARGET_PORT_WWN":"24000022a10a2a39", + "HEALTHSTATUS":"1", + "INITIATOR_PORT_WWN":"10000090fa0d6754", + "ID":"010000090fa0d675-0000000000110400", + "TARGET_NODE_WWN":"21000022a10a2a39", + "PARENTID":"1","CTRL_ID":"0", + "TYPE":255,"TARGET_TYPE":"212"}]}""" + + if url == ("mappingview/associate?TYPE=245&" + "ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0"): + + data = """{"error":{"code":0}, + "data":[{"ID":11,"NAME":"test"}]}""" + + if url == ("mappingview/associate?TYPE=245&" + "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=11"): + + data = """{"error":{"code":0}, + "data":[{"ID":11,"NAME":"test"}]}""" + + if url == "fc_initiator/10000090fa0d6754": + data = """{"error":{"code":0}}""" + + if url == "mappingview/REMOVE_ASSOCIATE": + data = """{"error":{"code":0}}""" + self.termin_flag = True + + if url == "mappingview/1": + data = """{"error":{"code":0}}""" + + if url == "ioclass": + data = """{"error":{"code":0}, + "data":[{"NAME":"OpenStack_Qos_High", + "ID":"0", + "LUNLIST":"[]", + "TYPE":230}]}""" + + if url == "ioclass/0": + data = """{"error":{"code":0}}""" + + if url == "lun/expand": + data = """{"error":{"code":0}}""" + self.lun_id = '0' + + else: + data = """{"error":{"code":31755596}}""" + + res_json = json.loads(data) + return res_json + + +class FakeHVSiSCSIStorage(huawei_hvs.HuaweiHVSISCSIDriver): + + def __init__(self, configuration): + super(FakeHVSiSCSIStorage, self).__init__(configuration) + self.configuration = configuration + + def do_setup(self, context): + self.common = FakeHVSCommon(configuration=self.configuration) + + +class FakeHVSFCStorage(huawei_hvs.HuaweiHVSFCDriver): + + def __init__(self, configuration): + super(FakeHVSFCStorage, self).__init__(configuration) + self.configuration = configuration + + def do_setup(self, context): + self.common = FakeHVSCommon(configuration=self.configuration) + + +class HVSRESTiSCSIDriverTestCase(test.TestCase): + def setUp(self): + super(HVSRESTiSCSIDriverTestCase, self).setUp() + self.tmp_dir = tempfile.mkdtemp() + self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml' + self.create_fake_conf_file() + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.cinder_huawei_conf_file = self.fake_conf_file + self.configuration.append_config_values(mox.IgnoreArg()) + + self.stubs.Set(time, 'sleep', Fake_sleep) + + self.driver = FakeHVSiSCSIStorage(configuration=self.configuration) + self.driver.do_setup({}) + self.driver.common.test_normal = True + + def tearDown(self): + if os.path.exists(self.fake_conf_file): + os.remove(self.fake_conf_file) + shutil.rmtree(self.tmp_dir) + super(HVSRESTiSCSIDriverTestCase, self).tearDown() + + def test_log_in_success(self): + deviceid = self.driver.common.login() + self.assertIsNotNone(deviceid) + + def test_log_out_success(self): + self.driver.common.login() + self.driver.common.login_out() + + def test_create_volume_success(self): + self.driver.common.login() + self.driver.create_volume(test_volume) + self.assertEqual(self.driver.common.lun_id, "0") + + def test_extend_volume_success(self): + self.driver.common.login() + self.driver.extend_volume(test_volume, volume_size) + self.assertEqual(self.driver.common.lun_id, "0") + + def test_create_snapshot_success(self): + self.driver.common.login() + self.driver.create_snapshot(test_volume) + self.assertEqual(self.driver.common.snapshot_id, "3") + + def test_delete_volume_success(self): + self.driver.common.login() + self.driver.delete_volume(test_volume) + self.assertIsNone(self.driver.common.lun_id) + + def test_delete_snapshot_success(self): + self.driver.common.login() + self.driver.delete_snapshot(test_snap) + self.assertIsNone(self.driver.common.snapshot_id) + + def test_colone_volume_success(self): + self.driver.common.login() + self.driver.create_cloned_volume(test_volume, test_volume) + self.assertEqual(self.driver.common.luncopy_id, "7") + + def test_create_volume_from_snapshot_success(self): + self.driver.common.login() + self.driver.create_volume_from_snapshot(test_volume, test_volume) + self.assertEqual(self.driver.common.luncopy_id, "7") + + def test_initialize_connection_success(self): + self.driver.common.login() + conn = self.driver.initialize_connection(test_volume, FakeConnector) + self.assertEqual(conn['data']['target_lun'], 1) + + def test_terminate_connection_success(self): + self.driver.common.login() + self.driver.terminate_connection(test_volume, FakeConnector) + self.assertEqual(self.driver.common.termin_flag, True) + + def test_initialize_connection_no_view_success(self): + self.driver.common.login() + self.driver.common.other_flag = False + conn = self.driver.initialize_connection(test_volume, FakeConnector) + self.assertEqual(conn['data']['target_lun'], 1) + + def test_terminate_connectio_no_view_success(self): + self.driver.common.login() + self.driver.common.other_flag = False + self.driver.terminate_connection(test_volume, FakeConnector) + self.assertEqual(self.driver.common.termin_flag, True) + + def test_get_volume_stats(self): + self.driver.common.login() + status = self.driver.get_volume_stats() + self.assertIsNotNone(status['free_capacity_gb']) + + def test_create_snapshot_fail(self): + self.driver.common.login() + self.driver.common.test_normal = False + self.assertRaises(exception.CinderException, + self.driver.create_snapshot, test_volume) + + def test_create_volume_fail(self): + self.driver.common.login() + self.driver.common.test_normal = False + self.assertRaises(exception.CinderException, + self.driver.create_volume, test_volume) + + def test_delete_volume_fail(self): + self.driver.common.login() + self.driver.common.test_normal = False + self.assertRaises(exception.CinderException, + self.driver.delete_volume, test_volume) + + def test_delete_snapshot_fail(self): + self.driver.common.login() + self.driver.common.test_normal = False + self.assertRaises(exception.CinderException, + self.driver.delete_snapshot, test_volume) + + def test_initialize_connection_fail(self): + self.driver.common.login() + self.driver.common.test_normal = False + self.assertRaises(exception.CinderException, + self.driver.initialize_connection, + test_volume, FakeConnector) + + def create_fake_conf_file(self): + doc = Document() + + config = doc.createElement('config') + doc.appendChild(config) + + storage = doc.createElement('Storage') + config.appendChild(storage) + + product = doc.createElement('Product') + product_text = doc.createTextNode('HVS') + product.appendChild(product_text) + storage.appendChild(product) + + protocol = doc.createElement('Protocol') + protocol_text = doc.createTextNode('iSCSI') + protocol.appendChild(protocol_text) + storage.appendChild(protocol) + + username = doc.createElement('UserName') + username_text = doc.createTextNode('admin') + username.appendChild(username_text) + storage.appendChild(username) + userpassword = doc.createElement('UserPassword') + userpassword_text = doc.createTextNode('Admin@storage') + userpassword.appendChild(userpassword_text) + storage.appendChild(userpassword) + url = doc.createElement('HVSURL') + url_text = doc.createTextNode('http://100.115.10.69:8082/' + 'deviceManager/rest/') + url.appendChild(url_text) + storage.appendChild(url) + lun = doc.createElement('LUN') + config.appendChild(lun) + storagepool = doc.createElement('StoragePool') + pool_text = doc.createTextNode('OpenStack_Pool') + storagepool.appendChild(pool_text) + lun.appendChild(storagepool) + + luntype = doc.createElement('LUNType') + luntype_text = doc.createTextNode('Thick') + luntype.appendChild(luntype_text) + lun.appendChild(luntype) + + writetype = doc.createElement('WriteType') + writetype_text = doc.createTextNode('1') + writetype.appendChild(writetype_text) + lun.appendChild(writetype) + + prefetchType = doc.createElement('Prefetch') + prefetchType.setAttribute('Type', '2') + prefetchType.setAttribute('Value', '20') + lun.appendChild(prefetchType) + + iscsi = doc.createElement('iSCSI') + config.appendChild(iscsi) + defaulttargetip = doc.createElement('DefaultTargetIP') + defaulttargetip_text = doc.createTextNode('100.115.10.68') + defaulttargetip.appendChild(defaulttargetip_text) + iscsi.appendChild(defaulttargetip) + + initiator = doc.createElement('Initiator') + initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3') + initiator.setAttribute('TargetIP', '100.115.10.68') + iscsi.appendChild(initiator) + + newefile = open(self.fake_conf_file, 'w') + newefile.write(doc.toprettyxml(indent='')) + newefile.close() + + +class HVSRESTFCDriverTestCase(test.TestCase): + def setUp(self): + super(HVSRESTFCDriverTestCase, self).setUp() + self.tmp_dir = tempfile.mkdtemp() + self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml' + self.create_fake_conf_file() + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.cinder_huawei_conf_file = self.fake_conf_file + self.configuration.append_config_values(mox.IgnoreArg()) + + self.stubs.Set(time, 'sleep', Fake_sleep) + + self.driver = FakeHVSFCStorage(configuration=self.configuration) + self.driver.do_setup({}) + self.driver.common.test_normal = True + + def tearDown(self): + if os.path.exists(self.fake_conf_file): + os.remove(self.fake_conf_file) + shutil.rmtree(self.tmp_dir) + super(HVSRESTFCDriverTestCase, self).tearDown() + + def test_log_in_Success(self): + deviceid = self.driver.common.login() + self.assertIsNotNone(deviceid) + + def test_create_volume_success(self): + self.driver.common.login() + self.driver.create_volume(test_volume) + self.assertEqual(self.driver.common.lun_id, "0") + + def test_extend_volume_success(self): + self.driver.common.login() + self.driver.extend_volume(test_volume, volume_size) + self.assertEqual(self.driver.common.lun_id, "0") + + def test_create_snapshot_success(self): + self.driver.common.login() + self.driver.create_snapshot(test_volume) + self.assertEqual(self.driver.common.snapshot_id, "3") + + def test_delete_volume_success(self): + self.driver.common.login() + self.driver.delete_volume(test_volume) + self.assertIsNone(self.driver.common.lun_id) + + def test_delete_snapshot_success(self): + self.driver.common.login() + self.driver.delete_snapshot(test_snap) + self.assertIsNone(self.driver.common.snapshot_id) + + def test_colone_volume_success(self): + self.driver.common.login() + self.driver.create_cloned_volume(test_volume, test_volume) + self.assertEqual(self.driver.common.luncopy_id, "7") + + def test_create_volume_from_snapshot_success(self): + self.driver.common.login() + self.driver.create_volume_from_snapshot(test_volume, test_volume) + self.assertEqual(self.driver.common.luncopy_id, "7") + + def test_initialize_connection_success(self): + self.driver.common.login() + conn = self.driver.initialize_connection(test_volume, FakeConnector) + self.assertEqual(conn['data']['target_lun'], 1) + + def test_terminate_connection_success(self): + self.driver.common.login() + self.driver.terminate_connection(test_volume, FakeConnector) + self.assertEqual(self.driver.common.termin_flag, True) + + def test_initialize_connection_no_view_success(self): + self.driver.common.login() + self.driver.common.other_flag = False + conn = self.driver.initialize_connection(test_volume, FakeConnector) + self.assertEqual(conn['data']['target_lun'], 1) + + def test_terminate_connection_no_viewn_success(self): + self.driver.common.login() + self.driver.common.other_flag = False + self.driver.terminate_connection(test_volume, FakeConnector) + self.assertEqual(self.driver.common.termin_flag, True) + + def test_get_volume_stats(self): + self.driver.common.login() + status = self.driver.get_volume_stats() + self.assertIsNotNone(status['free_capacity_gb']) + + def test_create_snapshot_fail(self): + self.driver.common.login() + self.driver.common.test_normal = False + self.assertRaises(exception.CinderException, + self.driver.create_snapshot, test_volume) + + def test_create_volume_fail(self): + self.driver.common.login() + self.driver.common.test_normal = False + self.assertRaises(exception.CinderException, + self.driver.create_volume, test_volume) + + def test_delete_volume_fail(self): + self.driver.common.login() + self.driver.common.test_normal = False + self.assertRaises(exception.CinderException, + self.driver.delete_volume, test_volume) + + def test_delete_snapshot_fail(self): + self.driver.common.login() + self.driver.common.test_normal = False + self.assertRaises(exception.CinderException, + self.driver.delete_snapshot, test_volume) + + def create_fake_conf_file(self): + doc = Document() + + config = doc.createElement('config') + doc.appendChild(config) + + storage = doc.createElement('Storage') + config.appendChild(storage) + + product = doc.createElement('Product') + product_text = doc.createTextNode('HVS') + product.appendChild(product_text) + storage.appendChild(product) + + protocol = doc.createElement('Protocol') + protocol_text = doc.createTextNode('FC') + protocol.appendChild(protocol_text) + storage.appendChild(protocol) + + username = doc.createElement('UserName') + username_text = doc.createTextNode('admin') + username.appendChild(username_text) + storage.appendChild(username) + + userpassword = doc.createElement('UserPassword') + userpassword_text = doc.createTextNode('Admin@storage') + userpassword.appendChild(userpassword_text) + storage.appendChild(userpassword) + url = doc.createElement('HVSURL') + url_text = doc.createTextNode('http://100.115.10.69:8082/' + 'deviceManager/rest/') + url.appendChild(url_text) + storage.appendChild(url) + + lun = doc.createElement('LUN') + config.appendChild(lun) + storagepool = doc.createElement('StoragePool') + pool_text = doc.createTextNode('OpenStack_Pool') + storagepool.appendChild(pool_text) + lun.appendChild(storagepool) + + luntype = doc.createElement('LUNType') + luntype_text = doc.createTextNode('Thick') + luntype.appendChild(luntype_text) + lun.appendChild(luntype) + + writetype = doc.createElement('WriteType') + writetype_text = doc.createTextNode('1') + writetype.appendChild(writetype_text) + lun.appendChild(writetype) + + prefetchType = doc.createElement('Prefetch') + prefetchType.setAttribute('Type', '2') + prefetchType.setAttribute('Value', '20') + lun.appendChild(prefetchType) + + newfile = open(self.fake_conf_file, 'w') + newfile.write(doc.toprettyxml(indent='')) + newfile.close() diff --git a/cinder/tests/test_huawei_t_dorado.py b/cinder/tests/test_huawei_t_dorado.py new file mode 100644 index 0000000000..97d963eaa4 --- /dev/null +++ b/cinder/tests/test_huawei_t_dorado.py @@ -0,0 +1,1753 @@ + +# Copyright (c) 2013 Huawei Technologies Co., Ltd. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for Huawei T and Dorado volume drivers. +""" + +import mox +import os +import shutil +import socket +import tempfile +import time + +from xml.dom.minidom import Document +from xml.etree import ElementTree as ET + +from cinder import context +from cinder import exception +from cinder import test +from cinder import utils +from cinder.volume import configuration as conf +from cinder.volume.drivers.huawei import huawei_utils +from cinder.volume.drivers.huawei import HuaweiVolumeDriver +from cinder.volume.drivers.huawei import ssh_common +from cinder.volume import volume_types + + +LUN_INFO = {'ID': None, + 'Name': None, + 'Size': None, + 'LUN WWN': None, + 'Status': None, + 'Visible Capacity': None, + 'Disk Pool ID': None, + 'Cache Prefetch Strategy': None, + 'Lun Type': None, + 'Consumed Capacity': None, + 'Pool ID': None, + 'SnapShot ID': None, + 'LunCopy ID': None, + 'Owner Controller': None, + 'Worker Controller': None, + 'RAID Group ID': None} + +CLONED_LUN_INFO = {'ID': None, + 'Name': None, + 'Size': None, + 'LUN WWN': None, + 'Status': None, + 'Visible Capacity': None, + 'Disk Pool ID': None, + 'Cache Prefetch Strategy': None, + 'Lun Type': None, + 'Consumed Capacity': None, + 'Pool ID': None, + 'SnapShot ID': None, + 'LunCopy ID': None, + 'Owner Controller': None, + 'Worker Controller': None, + 'RAID Group ID': None} + +SNAPSHOT_INFO = {'Source LUN ID': None, + 'Source LUN Name': None, + 'ID': None, + 'Name': None, + 'Type': 'Public', + 'Status': None} + +MAP_INFO = {'Host Group ID': None, + 'Host Group Name': None, + 'Host ID': None, + 'Host Name': None, + 'Os Type': None, + 'INI Port ID': None, + 'INI Port Name': None, + 'INI Port Info': None, + 'INI Port WWN': None, + 'INI Port Type': None, + 'Link Status': None, + 'LUN WWN': None, + 'DEV LUN ID': None, + 'Host LUN ID': None, + 'CHAP status': False} + +HOST_PORT_INFO = {'ID': None, + 'Name': None, + 'Info': None, + 'WWN': None, + 'Type': None} + +LUNCOPY_INFO = {'Name': None, + 'ID': None, + 'Type': None, + 'State': None, + 'Status': None} + +LUNCOPY_SETTING = {'ID': '1', + 'Type': 'FULL', + 'State': 'Created', + 'Status': 'Normal'} + +POOL_SETTING = {'ID': '2', + 'Level': 'RAID6', + 'Status': 'Normal', + 'Free Capacity': '10240', + 'Disk List': '0,1;0,2;0,3;0,4;0,5;0,6', + 'Name': 'RAID_001', + 'Type': 'Thick'} + +INITIATOR_SETTING = {'TargetIQN': 'iqn.2006-08.com.huawei:oceanspace:2103037:', + 'TargetIQN-form': 'iqn.2006-08.com.huawei:oceanspace:' + '2103037::1020001:192.168.100.2', + 'Initiator Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', + 'Initiator TargetIP': '192.168.100.2', + 'WWN': ['2011666666666565']} + +FAKE_VOLUME = {'name': 'Volume-lele34fe-223f-dd33-4423-asdfghjklqwe', + 'id': 'lele34fe-223f-dd33-4423-asdfghjklqwe', + 'size': '2', + 'provider_auth': None, + 'volume_type_id': None, + 'provider_location': None} + +FAKE_CLONED_VOLUME = {'name': 'Volume-jeje34fe-223f-dd33-4423-asdfghjklqwg', + 'id': 'jeje34fe-223f-dd33-4423-asdfghjklqwg', + 'size': '3', + 'provider_auth': None, + 'volume_type_id': None, + 'provider_location': None} + +FAKE_SNAPSHOT = {'name': 'keke34fe-223f-dd33-4423-asdfghjklqwf', + 'id': '223f-dd33-4423-asdfghjklqwf', + 'volume_name': 'Volume-lele34fe-223f-dd33-4423-asdfghjklqwe', + 'provider_location': None} + +FAKE_CONNECTOR = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3', + 'wwpns': ['1000000164s45126'], + 'wwnns': ['2000666666666565'], + 'host': 'fakehost', + 'ip': '10.10.0.1'} + +RESPOOL_A_SIM = {'Size': '10240', 'Valid Size': '5120'} +RESPOOL_B_SIM = {'Size': '10240', 'Valid Size': '10240'} +VOLUME_SNAP_ID = {'vol': '0', 'vol_copy': '1', 'snap': '2'} + +cmd_error_list = [] # CLI cmds in this list will run failed +Curr_test = [''] # show current testing driver + + +class FakeChannel(): + def __init__(self): + if Curr_test[0] == 'T': + self.simu = HuaweiTCLIResSimulator() + elif Curr_test[0] == 'Dorado5100': + self.simu = HuaweiDorado5100CLIResSimulator() + else: + self.simu = HuaweiDorado2100G2CLIResSimulator() + + def resize_pty(self, width=80, height=24): + pass + + def settimeout(self, time): + pass + + def send(self, s): + self.command = s + + def recv(self, nbytes): + command = self.command.split() + cmd = command[0] + params = command[1:] + if cmd in cmd_error_list: + reset_error_flg(cmd) + out = self.command[:-1] + 'ERROR' + '\nadmin:/>' + return out.replace('\n', '\r\n') + func_name = 'cli_' + cmd + cli_func = getattr(self.simu, func_name) + out = cli_func(params) + out = self.command[:-1] + out + '\nadmin:/>' + return out.replace('\n', '\r\n') + + def close(self): + pass + + +class FakeSSHClient(): + def invoke_shell(self): + return FakeChannel() + + def get_transport(self): + + class transport(): + def __init__(self): + self.sock = sock() + + class sock(): + def settimeout(self, time): + pass + + return transport() + + def close(self): + pass + + +class FakeSSHPool(): + def __init__(self, ip, port, conn_timeout, login, password=None, + *args, **kwargs): + self.ip = ip + self.port = port + self.login = login + self.password = password + + def create(self): + return FakeSSHClient() + + def get(self): + return FakeSSHClient() + + def put(self, ssh): + pass + + def remove(self, ssh): + pass + + +def Fake_sleep(time): + pass + + +def Fake_change_file_mode(obj, filepath): + pass + + +def create_fake_conf_file(filename): + doc = Document() + + config = doc.createElement('config') + doc.appendChild(config) + + storage = doc.createElement('Storage') + config.appendChild(storage) + product = doc.createElement('Product') + product_text = doc.createTextNode('T') + product.appendChild(product_text) + storage.appendChild(product) + config.appendChild(storage) + protocol = doc.createElement('Protocol') + protocol_text = doc.createTextNode('iSCSI') + protocol.appendChild(protocol_text) + storage.appendChild(protocol) + controllerip0 = doc.createElement('ControllerIP0') + controllerip0_text = doc.createTextNode('10.10.10.1') + controllerip0.appendChild(controllerip0_text) + storage.appendChild(controllerip0) + controllerip1 = doc.createElement('ControllerIP1') + controllerip1_text = doc.createTextNode('10.10.10.2') + controllerip1.appendChild(controllerip1_text) + storage.appendChild(controllerip1) + username = doc.createElement('UserName') + username_text = doc.createTextNode('admin') + username.appendChild(username_text) + storage.appendChild(username) + userpassword = doc.createElement('UserPassword') + userpassword_text = doc.createTextNode('123456') + userpassword.appendChild(userpassword_text) + storage.appendChild(userpassword) + + lun = doc.createElement('LUN') + config.appendChild(lun) + storagepool = doc.createElement('StoragePool') + storagepool.setAttribute('Name', 'RAID_001') + lun.appendChild(storagepool) + luntype = doc.createElement('LUNType') + luntype_text = doc.createTextNode('Thick') + luntype.appendChild(luntype_text) + lun.appendChild(luntype) + + iscsi = doc.createElement('iSCSI') + config.appendChild(iscsi) + defaulttargetip = doc.createElement('DefaultTargetIP') + defaulttargetip_text = doc.createTextNode('192.168.100.1') + defaulttargetip.appendChild(defaulttargetip_text) + iscsi.appendChild(defaulttargetip) + initiator = doc.createElement('Initiator') + initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3') + initiator.setAttribute('TargetIP', '192.168.100.2') + iscsi.appendChild(initiator) + + os_type = doc.createElement('Host') + os_type.setAttribute('OSType', 'Linux') + os_type.setAttribute('HostIP', '10.10.0.1') + config.appendChild(os_type) + + tmp_file = open(filename, 'w') + tmp_file.write(doc.toprettyxml(indent='')) + tmp_file.close() + + +def modify_conf(conf, item, val, attrib=None): + tree = ET.parse(conf) + root = tree.getroot() + conf_item = root.find('%s' % item) + if not attrib: + conf_item.text = '%s' % val + else: + conf_item.attrib['%s' % attrib] = '%s' % val + tree.write(conf, 'UTF-8') + + +def set_error_flg(cmd): + cmd_error_list.append(cmd) + + +def reset_error_flg(cmd): + cmd_error_list.remove(cmd) + + +class HuaweiTCLIResSimulator(): + def _paras_name(self, params): + index = params.index('-n') + return params[index + 1] + + def cli_showsys(self, params): + pass + + def cli_createlun(self, params): + lun_type = ('THIN' if '-pool' in params else 'THICK') + if LUN_INFO['ID'] is None: + LUN_INFO['Name'] = self._paras_name(params) + LUN_INFO['ID'] = VOLUME_SNAP_ID['vol'] + LUN_INFO['Size'] = FAKE_VOLUME['size'] + LUN_INFO['Lun Type'] = lun_type + LUN_INFO['Owner Controller'] = 'A' + LUN_INFO['Worker Controller'] = 'A' + LUN_INFO['RAID Group ID'] = POOL_SETTING['ID'] + FAKE_VOLUME['provider_location'] = LUN_INFO['ID'] + else: + CLONED_LUN_INFO['Name'] = self._paras_name(params) + CLONED_LUN_INFO['ID'] = VOLUME_SNAP_ID['vol_copy'] + CLONED_LUN_INFO['Size'] = FAKE_CLONED_VOLUME['size'] + CLONED_LUN_INFO['Lun Type'] = lun_type + CLONED_LUN_INFO['Owner Controller'] = 'A' + CLONED_LUN_INFO['Worker Controller'] = 'A' + CLONED_LUN_INFO['RAID Group ID'] = POOL_SETTING['ID'] + FAKE_CLONED_VOLUME['provider_location'] = CLONED_LUN_INFO['ID'] + out = 'command operates successfully' + return out + + def cli_showlun(self, params): + if '-lun' not in params: + if LUN_INFO['ID'] is None: + out = 'command operates successfully, but no information.' + elif CLONED_LUN_INFO['ID'] is None: + out = """/>showlun +=========================================================================== + LUN Information +--------------------------------------------------------------------------- + ID RAID Group ID Disk Pool ID Status Controller Visible Capacity(MB) \ + LUN Name Stripe Unit Size(KB) Lun Type +--------------------------------------------------------------------------- + %s %s -- Normal %s %s %s 64 THICK +=========================================================================== +""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], + str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name']) + else: + out = """/>showlun +============================================================================ + LUN Information +---------------------------------------------------------------------------- + ID RAID Group ID Disk Pool ID Status Controller Visible Capacity(MB)\ + LUN Name Stripe Unit Size(KB) Lun Type +---------------------------------------------------------------------------- + %s %s -- Normal %s %s %s 64 THICK + %s %s -- Normal %s %s %s 64 THICK +============================================================================ +""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], + str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'], + CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['RAID Group ID'], + CLONED_LUN_INFO['Owner Controller'], + str(int(CLONED_LUN_INFO['Size']) * 1024), + CLONED_LUN_INFO['Name']) + + elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values(): + out = """/>showlun +================================================ + LUN Information +------------------------------------------------ + ID | %s + Name | %s + LUN WWN | -- + Visible Capacity | %s + RAID GROUP ID | %s + Owning Controller | %s + Workong Controller | %s + Lun Type | %s + SnapShot ID | %s + LunCopy ID | %s +================================================ +""" % ((LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'], + LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], + LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'], + LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID']) + if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol'] else + (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'], + CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'], + CLONED_LUN_INFO['Owner Controller'], + CLONED_LUN_INFO['Worker Controller'], + CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'], + CLONED_LUN_INFO['LunCopy ID'])) + else: + out = 'ERROR: The object does not exist.' + return out + + def cli_dellun(self, params): + if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol']: + LUN_INFO['Name'] = None + LUN_INFO['ID'] = None + LUN_INFO['Size'] = None + LUN_INFO['Lun Type'] = None + LUN_INFO['LUN WWN'] = None + LUN_INFO['Owner Controller'] = None + LUN_INFO['Worker Controller'] = None + LUN_INFO['RAID Group ID'] = None + FAKE_VOLUME['provider_location'] = None + else: + CLONED_LUN_INFO['Name'] = None + CLONED_LUN_INFO['ID'] = None + CLONED_LUN_INFO['Size'] = None + CLONED_LUN_INFO['Lun Type'] = None + CLONED_LUN_INFO['LUN WWN'] = None + CLONED_LUN_INFO['Owner Controller'] = None + CLONED_LUN_INFO['Worker Controller'] = None + CLONED_LUN_INFO['RAID Group ID'] = None + CLONED_LUN_INFO['provider_location'] = None + FAKE_CLONED_VOLUME['provider_location'] = None + out = 'command operates successfully' + return out + + def cli_showrg(self, params): + out = """/>showrg +===================================================================== + RAID Group Information +--------------------------------------------------------------------- + ID Level Status Free Capacity(MB) Disk List Name +--------------------------------------------------------------------- + 0 RAID6 Normal 1024 0,0;0,2; RAID003 + %s %s %s %s %s %s +===================================================================== +-""" % (POOL_SETTING['ID'], POOL_SETTING['Level'], + POOL_SETTING['Status'], POOL_SETTING['Free Capacity'], + POOL_SETTING['Disk List'], POOL_SETTING['Name']) + return out + + def cli_showpool(self, params): + out = """/>showpool +===================================================================== + Pool Information +--------------------------------------------------------------------- + Level Status Available Capacity(MB) Disk List +--------------------------------------------------------------------- + RAID6 Normal %s 0,0;0,2;0,4;0,5; +===================================================================== +-""" % POOL_SETTING['Free Capacity'] + return out + + def cli_createluncopy(self, params): + src_id = params[params.index('-slun') + 1] + tgt_id = params[params.index('-tlun') + 1] + LUNCOPY_INFO['Name'] = 'OpenStack_%s_%s' % (src_id, tgt_id) + LUNCOPY_INFO['ID'] = LUNCOPY_SETTING['ID'] + LUNCOPY_INFO['Type'] = LUNCOPY_SETTING['Type'] + LUNCOPY_INFO['State'] = LUNCOPY_SETTING['State'] + LUNCOPY_INFO['Status'] = LUNCOPY_SETTING['Status'] + out = 'command operates successfully' + return out + + def cli_chgluncopystatus(self, params): + LUNCOPY_INFO['State'] = 'Start' + out = 'command operates successfully' + return out + + def cli_showluncopy(self, params): + if LUNCOPY_INFO['State'] == 'Start': + LUNCOPY_INFO['State'] = 'Copying' + elif LUNCOPY_INFO['State'] == 'Copying': + LUNCOPY_INFO['State'] = 'Complete' + out = """/>showluncopy +============================================================================ + LUN Copy Information +---------------------------------------------------------------------------- + LUN Copy Name LUN Copy ID Type LUN Copy State LUN Copy Status +---------------------------------------------------------------------------- + %s %s %s %s %s +============================================================================ +""" % (LUNCOPY_INFO['Name'], LUNCOPY_INFO['ID'], LUNCOPY_INFO['Type'], + LUNCOPY_INFO['State'], LUNCOPY_INFO['Status']) + return out + + def cli_delluncopy(self, params): + LUNCOPY_INFO['Name'] = None + LUNCOPY_INFO['ID'] = None + LUNCOPY_INFO['Type'] = None + LUNCOPY_INFO['State'] = None + LUNCOPY_INFO['Status'] = None + out = 'command operates successfully' + return out + + def cli_createsnapshot(self, params): + SNAPSHOT_INFO['Source LUN ID'] = LUN_INFO['ID'] + SNAPSHOT_INFO['Source LUN Name'] = LUN_INFO['Name'] + SNAPSHOT_INFO['ID'] = VOLUME_SNAP_ID['snap'] + SNAPSHOT_INFO['Name'] = self._paras_name(params) + SNAPSHOT_INFO['Status'] = 'Disable' + out = 'command operates successfully' + return out + + def cli_showsnapshot(self, params): + if SNAPSHOT_INFO['ID'] is None: + out = 'command operates successfully, but no information.' + else: + out = """/>showsnapshot +========================================================================== + Snapshot Information +-------------------------------------------------------------------------- + Name ID Type Status Time Stamp +-------------------------------------------------------------------------- + %s %s Public %s 2013-01-15 14:21:13 +========================================================================== +""" % (SNAPSHOT_INFO['Name'], SNAPSHOT_INFO['ID'], SNAPSHOT_INFO['Status']) + return out + + def cli_actvsnapshot(self, params): + SNAPSHOT_INFO['Status'] = 'Active' + FAKE_SNAPSHOT['provider_location'] = SNAPSHOT_INFO['ID'] + out = 'command operates successfully' + return out + + def cli_disablesnapshot(self, params): + SNAPSHOT_INFO['Status'] = 'Disable' + out = 'command operates successfully' + return out + + def cli_delsnapshot(self, params): + SNAPSHOT_INFO['Source LUN ID'] = None + SNAPSHOT_INFO['Source LUN Name'] = None + SNAPSHOT_INFO['ID'] = None + SNAPSHOT_INFO['Name'] = None + SNAPSHOT_INFO['Status'] = None + FAKE_SNAPSHOT['provider_location'] = None + out = 'command operates successfully' + return out + + def cli_showrespool(self, params): + out = """/>showrespool +=========================================================================== + Resource Pool Information +--------------------------------------------------------------------------- + Pool ID Size(MB) Usage(MB) Valid Size(MB) Alarm Threshold +--------------------------------------------------------------------------- + A %s 0.0 %s 80 + B %s 0.0 %s 80 +=========================================================================== +-""" % (RESPOOL_A_SIM['Size'], RESPOOL_A_SIM['Valid Size'], + RESPOOL_B_SIM['Size'], RESPOOL_B_SIM['Valid Size']) + return out + + def cli_showiscsitgtname(self, params): + iqn = INITIATOR_SETTING['TargetIQN'] + out = """/>showiscsitgtname +=================================================================== + ISCSI Name +------------------------------------------------------------------- + Iscsi Name | %s +=================================================================== +""" % iqn + return out + + def cli_showiscsiip(self, params): + out = """/>showiscsiip +============================================================================ + iSCSI IP Information +---------------------------------------------------------------------------- + Controller ID Interface Module ID Port ID IP Address Mask +---------------------------------------------------------------------------- + B 0 P1 %s 255.255.255.0 +============================================================================ +-""" % INITIATOR_SETTING['Initiator TargetIP'] + return out + + def cli_showhostgroup(self, params): + if MAP_INFO['Host Group ID'] is None: + out = """/>showhostgroup +============================================================ + Host Group Information +------------------------------------------------------------ + Host Group ID Name File Engine Cluster +------------------------------------------------------------ + 0 Default Group NO +============================================================ +""" + else: + out = """/>showhostgroup +============================================================ + Host Group Information +------------------------------------------------------------ + Host Group ID Name File Engine Cluster +------------------------------------------------------------ + 0 Default Group NO + %s %s NO +============================================================ +""" % (MAP_INFO['Host Group ID'], MAP_INFO['Host Group Name']) + return out + + def cli_createhostgroup(self, params): + MAP_INFO['Host Group ID'] = '1' + MAP_INFO['Host Group Name'] = 'HostGroup_OpenStack' + out = 'command operates successfully' + return out + + def cli_showhost(self, params): + if MAP_INFO['Host ID'] is None: + out = 'command operates successfully, but no information.' + else: + out = """/>showhost +======================================================= + Host Information +------------------------------------------------------- + Host ID Host Name Host Group ID Os Type +------------------------------------------------------- + %s %s %s Linux +======================================================= +""" % (MAP_INFO['Host ID'], MAP_INFO['Host Name'], MAP_INFO['Host Group ID']) + return out + + def cli_addhost(self, params): + MAP_INFO['Host ID'] = '1' + MAP_INFO['Host Name'] = 'Host_' + FAKE_CONNECTOR['host'] + MAP_INFO['Os Type'] = 'Linux' + out = 'command operates successfully' + return out + + def cli_delhost(self, params): + MAP_INFO['Host ID'] = None + MAP_INFO['Host Name'] = None + MAP_INFO['Os Type'] = None + out = 'command operates successfully' + return out + + def cli_showiscsiini(self, params): + if HOST_PORT_INFO['ID'] is None: + out = 'Error: The parameter is wrong.' + else: + out = """/>showiscsiini +======================================================== + Initiator Information +-------------------------------------------------------- + Initiator Name Chap Status +-------------------------------------------------------- + %s Disable +======================================================== +""" % HOST_PORT_INFO['Info'] + return out + + def cli_addiscsiini(self, params): + HOST_PORT_INFO['ID'] = '1' + HOST_PORT_INFO['Name'] = 'iSCSIInitiator001' + HOST_PORT_INFO['Info'] = INITIATOR_SETTING['Initiator Name'] + HOST_PORT_INFO['Type'] = 'ISCSITGT' + out = 'command operates successfully' + return out + + def cli_deliscsiini(self, params): + HOST_PORT_INFO['ID'] = None + HOST_PORT_INFO['Name'] = None + HOST_PORT_INFO['Info'] = None + HOST_PORT_INFO['Type'] = None + out = 'command operates successfully' + return out + + def cli_showhostport(self, params): + if MAP_INFO['INI Port ID'] is None: + out = 'command operates successfully, but no information.' + else: + out = """/>showhostport +============================================================================ + Host Port Information +---------------------------------------------------------------------------- +Port ID Port Name Port Information Port Type Host ID Link Status \ +Multipath Type +---------------------------------------------------------------------------- + %s %s %s %s %s Unconnected Default +============================================================================ +""" % (MAP_INFO['INI Port ID'], MAP_INFO['INI Port Name'], + MAP_INFO['INI Port Info'], MAP_INFO['INI Port Type'], + MAP_INFO['Host ID']) + return out + + def cli_addhostport(self, params): + MAP_INFO['INI Port ID'] = HOST_PORT_INFO['ID'] + MAP_INFO['INI Port Name'] = HOST_PORT_INFO['Name'] + MAP_INFO['INI Port Info'] = HOST_PORT_INFO['Info'] + MAP_INFO['INI Port Type'] = HOST_PORT_INFO['Type'] + out = 'command operates successfully' + return out + + def cli_delhostport(self, params): + MAP_INFO['INI Port ID'] = None + MAP_INFO['INI Port Name'] = None + MAP_INFO['INI Port Info'] = None + MAP_INFO['INI Port Type'] = None + HOST_PORT_INFO['ID'] = None + HOST_PORT_INFO['Name'] = None + HOST_PORT_INFO['Info'] = None + HOST_PORT_INFO['Type'] = None + out = 'command operates successfully' + return out + + def cli_showhostmap(self, params): + if MAP_INFO['DEV LUN ID'] is None: + out = 'command operates successfully, but no information.' + else: + out = """/>showhostmap +=========================================================================== + Map Information +--------------------------------------------------------------------------- + Map ID Working Controller Dev LUN ID LUN WWN Host LUN ID Mapped to\ + RAID ID Dev LUN Cap(MB) Map Type Whether Command LUN Pool ID +---------------------------------------------------------------------------- + 2147483649 %s %s %s %s Host: %s %s %s HOST No -- +============================================================================ +""" % (LUN_INFO['Worker Controller'], LUN_INFO['ID'], LUN_INFO['LUN WWN'], + MAP_INFO['Host LUN ID'], MAP_INFO['Host ID'], LUN_INFO['RAID Group ID'], + str(int(LUN_INFO['Size']) * 1024)) + return out + + def cli_addhostmap(self, params): + MAP_INFO['DEV LUN ID'] = LUN_INFO['ID'] + MAP_INFO['LUN WWN'] = LUN_INFO['LUN WWN'] + MAP_INFO['Host LUN ID'] = '2' + MAP_INFO['Link Status'] = 'Linked' + out = 'command operates successfully' + return out + + def cli_delhostmap(self, params): + if MAP_INFO['Link Status'] == 'Linked': + MAP_INFO['Link Status'] = 'Deleting' + out = 'there are IOs accessing the system, please try later' + else: + MAP_INFO['Link Status'] = None + MAP_INFO['DEV LUN ID'] = None + MAP_INFO['LUN WWN'] = None + MAP_INFO['Host LUN ID'] = None + out = 'command operates successfully' + return out + + def cli_showfreeport(self, params): + out = """/>showfreeport +======================================================================= + Host Free Port Information +----------------------------------------------------------------------- + WWN Or MAC Type Location Connection Status +----------------------------------------------------------------------- + 1000000164s45126 FC Primary Controller Connected +======================================================================= +""" + HOST_PORT_INFO['ID'] = '2' + HOST_PORT_INFO['Name'] = 'FCInitiator001' + HOST_PORT_INFO['Info'] = '1000000164s45126' + HOST_PORT_INFO['Type'] = 'FC' + return out + + def cli_showhostpath(self, params): + host = params[params.index('-host') + 1] + out = """/>showhostpath -host 1 +======================================= + Multi Path Information +--------------------------------------- + Host ID | %s + Controller ID | B + Port Type | FC + Initiator WWN | 1000000164s45126 + Target WWN | %s + Host Port ID | 0 + Link Status | Normal +======================================= +""" % (host, INITIATOR_SETTING['WWN'][0]) + return out + + def cli_showfcmode(self, params): + out = """/>showfcport +========================================================================= + FC Port Topology Mode +------------------------------------------------------------------------- + Controller ID Interface Module ID Port ID WWN Current Mode +------------------------------------------------------------------------- + B 1 P0 %s -- +========================================================================= +-""" % INITIATOR_SETTING['WWN'][0] + return out + + def cli_chglun(self, params): + if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol']: + LUN_INFO['Owner Controller'] = 'B' + else: + CLONED_LUN_INFO['Owner Controller'] = 'B' + out = 'command operates successfully' + return out + + def cli_addluntoextlun(self, params): + LUN_INFO['Size'] = int(LUN_INFO['Size']) + int(CLONED_LUN_INFO['Size']) + out = 'command operates successfully' + return out + + def cli_rmlunfromextlun(self, patams): + LUN_INFO['Size'] = int(LUN_INFO['Size']) - int(CLONED_LUN_INFO['Size']) + out = 'command operates successfully' + return out + + +class HuaweiDorado5100CLIResSimulator(HuaweiTCLIResSimulator): + def cli_showsys(self, params): + out = """/>showsys +============================================================= + System Information +------------------------------------------------------------- + System Name | SN_Dorado5100 + Device Type | Oceanstor Dorado5100 + Current System Mode | Double Controllers Normal + Mirroring Link Status | Link Up + Location | + Time | 2013-01-01 01:01:01 + Product Version | V100R001C00 +============================================================= +""" + return out + + def cli_showlun(self, params): + if '-lun' not in params: + if LUN_INFO['ID'] is None: + out = 'command operates successfully, but no information.' + elif CLONED_LUN_INFO['ID'] is None: + out = """/>showlun +=========================================================================== + LUN Information +--------------------------------------------------------------------------- + ID RAIDgroup ID Status Controller Visible Capacity(MB) LUN Name..\ + Strip Unit Size(KB) Lun Type +--------------------------------------------------------------------------- + %s %s Normal %s %s %s 64 THICK +=========================================================================== +""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], + LUN_INFO['Owner Controller'], str(int(LUN_INFO['Size']) * 1024), + LUN_INFO['Name']) + else: + out = """/>showlun +=========================================================================== + LUN Information +--------------------------------------------------------------------------- + ID RAIDgroup ID Status Controller Visible Capacity(MB) LUN Name \ + Strip Unit Size(KB) Lun Type +--------------------------------------------------------------------------- + %s %s Normal %s %s %s 64 THICK + %s %s Norma %s %s %s 64 THICK +=========================================================================== +""" % (LUN_INFO['ID'], LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], + str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'], + CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['RAID Group ID'], + CLONED_LUN_INFO['Owner Controller'], + str(int(CLONED_LUN_INFO['Size']) * 1024), + CLONED_LUN_INFO['Name']) + elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values(): + out = """/>showlun +================================================ + LUN Information +------------------------------------------------ + ID | %s + Name | %s + LUN WWN | -- + Visible Capacity | %s + RAID GROUP ID | %s + Owning Controller | %s + Workong Controller | %s + Lun Type | %s + SnapShot ID | %s + LunCopy ID | %s +================================================ +""" % ((LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'], + LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], + LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'], + LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID']) + if params[params.index('-lun') + 1] == VOLUME_SNAP_ID['vol'] else + (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'], + CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'], + CLONED_LUN_INFO['Owner Controller'], + CLONED_LUN_INFO['Worker Controller'], + CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'], + CLONED_LUN_INFO['LunCopy ID'])) + else: + out = 'ERROR: The object does not exist.' + return out + + +class HuaweiDorado2100G2CLIResSimulator(HuaweiTCLIResSimulator): + def cli_showsys(self, params): + out = """/>showsys +========================================================================== + System Information +-------------------------------------------------------------------------- + System Name | SN_Dorado2100_G2 + Device Type | Oceanstor Dorado2100 G2 + Current System Mode | Double Controllers Normal + Mirroring Link Status | Link Up + Location | + Time | 2013-01-01 01:01:01 + Product Version | V100R001C00 +=========================================================================== +""" + return out + + def cli_createlun(self, params): + lun_type = ('THIN' if params[params.index('-type') + 1] == '2' else + 'THICK') + if LUN_INFO['ID'] is None: + LUN_INFO['Name'] = self._paras_name(params) + LUN_INFO['ID'] = VOLUME_SNAP_ID['vol'] + LUN_INFO['Size'] = FAKE_VOLUME['size'] + LUN_INFO['Lun Type'] = lun_type + LUN_INFO['Owner Controller'] = 'A' + LUN_INFO['Worker Controller'] = 'A' + LUN_INFO['RAID Group ID'] = POOL_SETTING['ID'] + FAKE_VOLUME['provider_location'] = LUN_INFO['ID'] + else: + CLONED_LUN_INFO['Name'] = self._paras_name(params) + CLONED_LUN_INFO['ID'] = VOLUME_SNAP_ID['vol_copy'] + CLONED_LUN_INFO['Size'] = FAKE_CLONED_VOLUME['size'] + CLONED_LUN_INFO['Lun Type'] = lun_type + CLONED_LUN_INFO['Owner Controller'] = 'A' + CLONED_LUN_INFO['Worker Controller'] = 'A' + CLONED_LUN_INFO['RAID Group ID'] = POOL_SETTING['ID'] + CLONED_LUN_INFO['provider_location'] = CLONED_LUN_INFO['ID'] + FAKE_CLONED_VOLUME['provider_location'] = CLONED_LUN_INFO['ID'] + out = 'command operates successfully' + return out + + def cli_showlun(self, params): + if '-lun' not in params: + if LUN_INFO['ID'] is None: + out = 'command operates successfully, but no information.' + elif CLONED_LUN_INFO['ID'] is None: + out = """/>showlun +=========================================================================== + LUN Information +--------------------------------------------------------------------------- + ID Status Controller Visible Capacity(MB) LUN Name Lun Type +--------------------------------------------------------------------------- + %s Normal %s %s %s THICK +=========================================================================== +""" % (LUN_INFO['ID'], LUN_INFO['Owner Controller'], + str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name']) + else: + out = """/>showlun +=========================================================================== + LUN Information +--------------------------------------------------------------------------- + ID Status Controller Visible Capacity(MB) LUN Name Lun Type +--------------------------------------------------------------------------- + %s Normal %s %s %s THICK + %s Normal %s %s %s THICK +=========================================================================== +""" % (LUN_INFO['ID'], LUN_INFO['Owner Controller'], + str(int(LUN_INFO['Size']) * 1024), LUN_INFO['Name'], + CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Owner Controller'], + str(int(CLONED_LUN_INFO['Size']) * 1024), CLONED_LUN_INFO['Name']) + + elif params[params.index('-lun') + 1] in VOLUME_SNAP_ID.values(): + out = """/>showlun +================================================ + LUN Information +------------------------------------------------ + ID | %s + Name | %s + LUN WWN | -- + Visible Capacity | %s + RAID GROUP ID | %s + Owning Controller | %s + Workong Controller | %s + Lun Type | %s + SnapShot ID | %s + LunCopy ID | %s +================================================ +""" % ((LUN_INFO['ID'], LUN_INFO['Name'], LUN_INFO['Visible Capacity'], + LUN_INFO['RAID Group ID'], LUN_INFO['Owner Controller'], + LUN_INFO['Worker Controller'], LUN_INFO['Lun Type'], + LUN_INFO['SnapShot ID'], LUN_INFO['LunCopy ID']) + if params[params.index('-lun')] == VOLUME_SNAP_ID['vol'] else + (CLONED_LUN_INFO['ID'], CLONED_LUN_INFO['Name'], + CLONED_LUN_INFO['Visible Capacity'], CLONED_LUN_INFO['RAID Group ID'], + CLONED_LUN_INFO['Owner Controller'], + CLONED_LUN_INFO['Worker Controller'], + CLONED_LUN_INFO['Lun Type'], CLONED_LUN_INFO['SnapShot ID'], + CLONED_LUN_INFO['LunCopy ID'])) + + else: + out = 'ERROR: The object does not exist.' + + return out + + +class HuaweiTISCSIDriverTestCase(test.TestCase): + def __init__(self, *args, **kwargs): + super(HuaweiTISCSIDriverTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + super(HuaweiTISCSIDriverTestCase, self).setUp() + + self.tmp_dir = tempfile.mkdtemp() + self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml' + create_fake_conf_file(self.fake_conf_file) + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.cinder_huawei_conf_file = self.fake_conf_file + self.configuration.append_config_values(mox.IgnoreArg()) + + self.stubs.Set(time, 'sleep', Fake_sleep) + self.stubs.Set(utils, 'SSHPool', FakeSSHPool) + self.stubs.Set(ssh_common.TseriesCommon, '_change_file_mode', + Fake_change_file_mode) + self._init_driver() + + def _init_driver(self): + Curr_test[0] = 'T' + self.driver = HuaweiVolumeDriver(configuration=self.configuration) + self.driver.do_setup(None) + + def tearDown(self): + if os.path.exists(self.fake_conf_file): + os.remove(self.fake_conf_file) + shutil.rmtree(self.tmp_dir) + super(HuaweiTISCSIDriverTestCase, self).tearDown() + + def test_conf_invalid(self): + # Test config file not found + tmp_fonf_file = '/xxx/cinder_huawei_conf.xml' + tmp_configuration = mox.MockObject(conf.Configuration) + tmp_configuration.cinder_huawei_conf_file = tmp_fonf_file + tmp_configuration.append_config_values(mox.IgnoreArg()) + self.assertRaises(IOError, + HuaweiVolumeDriver, + configuration=tmp_configuration) + # Test Product and Protocol invalid + tmp_dict = {'Storage/Product': 'T', 'Storage/Protocol': 'iSCSI'} + for k, v in tmp_dict.items(): + modify_conf(self.fake_conf_file, k, 'xx') + self.assertRaises(exception.InvalidInput, + HuaweiVolumeDriver, + configuration=self.configuration) + modify_conf(self.fake_conf_file, k, v) + # Test ctr ip, UserName and password unspecified + tmp_dict = {'Storage/ControllerIP0': '10.10.10.1', + 'Storage/ControllerIP1': '10.10.10.2', + 'Storage/UserName': 'admin', + 'Storage/UserPassword': '123456'} + for k, v in tmp_dict.items(): + modify_conf(self.fake_conf_file, k, '') + tmp_driver = HuaweiVolumeDriver(configuration=self.configuration) + self.assertRaises(exception.InvalidInput, + tmp_driver.do_setup, None) + modify_conf(self.fake_conf_file, k, v) + # Test StoragePool unspecified + modify_conf(self.fake_conf_file, 'LUN/StoragePool', '', attrib='Name') + tmp_driver = HuaweiVolumeDriver(configuration=self.configuration) + self.assertRaises(exception.InvalidInput, + tmp_driver.do_setup, None) + modify_conf(self.fake_conf_file, 'LUN/StoragePool', 'RAID_001', + attrib='Name') + # Test LUN type invalid + modify_conf(self.fake_conf_file, 'LUN/LUNType', 'thick') + tmp_driver = HuaweiVolumeDriver(configuration=self.configuration) + tmp_driver.do_setup(None) + self.assertRaises(exception.InvalidInput, + tmp_driver.create_volume, FAKE_VOLUME) + modify_conf(self.fake_conf_file, 'LUN/LUNType', 'Thick') + # Test OSType invalid + modify_conf(self.fake_conf_file, 'Host', 'invalid_type', + attrib='OSType') + tmp_driver = HuaweiVolumeDriver(configuration=self.configuration) + self.assertRaises(exception.InvalidInput, + tmp_driver.do_setup, None) + modify_conf(self.fake_conf_file, 'Host', 'Linux', attrib='OSType') + # Test TargetIP not found + modify_conf(self.fake_conf_file, 'iSCSI/DefaultTargetIP', '') + modify_conf(self.fake_conf_file, 'iSCSI/Initiator', '', attrib='Name') + tmp_driver = HuaweiVolumeDriver(configuration=self.configuration) + tmp_driver.do_setup(None) + tmp_driver.create_volume(FAKE_VOLUME) + self.assertRaises(exception.InvalidInput, + tmp_driver.initialize_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + tmp_driver.delete_volume(FAKE_VOLUME) + modify_conf(self.fake_conf_file, 'iSCSI/DefaultTargetIP', + '192.168.100.1') + modify_conf(self.fake_conf_file, 'iSCSI/Initiator', + 'iqn.1993-08.debian:01:ec2bff7ac3a3', attrib='Name') + + def test_volume_type(self): + ctxt = context.get_admin_context() + extra_specs = {'drivers:LUNType': 'Thin'} + type_ref = volume_types.create(ctxt, 'THIN', extra_specs) + FAKE_VOLUME['volume_type_id'] = type_ref['id'] + self.driver.create_volume(FAKE_VOLUME) + self.assertEqual(LUN_INFO["ID"], VOLUME_SNAP_ID['vol']) + self.assertEqual(LUN_INFO['Lun Type'], 'THIN') + self.driver.delete_volume(FAKE_VOLUME) + FAKE_VOLUME['volume_type_id'] = None + + # Test volume type invalid + extra_specs = {'drivers:InvalidLUNType': 'Thin'} + type_ref = volume_types.create(ctxt, 'Invalid_THIN', extra_specs) + FAKE_VOLUME['volume_type_id'] = type_ref['id'] + self.driver.create_volume(FAKE_VOLUME) + self.assertEqual(LUN_INFO["ID"], VOLUME_SNAP_ID['vol']) + self.assertNotEqual(LUN_INFO['Lun Type'], 'THIN') + self.driver.delete_volume(FAKE_VOLUME) + FAKE_VOLUME['volume_type_id'] = None + + def test_create_delete_volume(self): + # Test create lun cli exception + set_error_flg('createlun') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, FAKE_VOLUME) + + ret = self.driver.create_volume(FAKE_VOLUME) + self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) + self.assertEqual(ret['provider_location'], LUN_INFO['ID']) + + # Test delete lun cli exception + set_error_flg('dellun') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_volume, FAKE_VOLUME) + + self.driver.delete_volume(FAKE_VOLUME) + self.assertIsNone(LUN_INFO['ID']) + self.assertIsNone(FAKE_VOLUME['provider_location']) + + def test_create_delete_cloned_volume(self): + # Test no source volume + self.assertRaises(exception.VolumeNotFound, + self.driver.create_cloned_volume, + FAKE_CLONED_VOLUME, FAKE_VOLUME) + + self.driver.create_volume(FAKE_VOLUME) + # Test create luncopy failed + self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) + set_error_flg('createluncopy') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + FAKE_CLONED_VOLUME, FAKE_VOLUME) + self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) + self.driver.delete_volume(FAKE_CLONED_VOLUME) + self.assertIsNone(CLONED_LUN_INFO['ID']) + # Test start luncopy failed + self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) + set_error_flg('chgluncopystatus') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + FAKE_CLONED_VOLUME, FAKE_VOLUME) + self.assertIsNone(CLONED_LUN_INFO['ID']) + self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) + # Test luncopy status abnormal + LUNCOPY_SETTING['Status'] = 'Disable' + self.assertEqual(LUN_INFO['ID'], '0') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + FAKE_CLONED_VOLUME, FAKE_VOLUME) + self.assertIsNone(CLONED_LUN_INFO['ID']) + self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) + LUNCOPY_SETTING['Status'] = 'Normal' + # Test delete luncopy failed + set_error_flg('delluncopy') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + FAKE_CLONED_VOLUME, FAKE_VOLUME) + self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) + self.driver.delete_volume(FAKE_CLONED_VOLUME) + self.assertIsNone(CLONED_LUN_INFO['ID']) + # need to clean up LUNCopy + LUNCOPY_INFO['Name'] = None + LUNCOPY_INFO['ID'] = None + LUNCOPY_INFO['Type'] = None + LUNCOPY_INFO['State'] = None + LUNCOPY_INFO['Status'] = None + + # Test normal create and delete cloned volume + self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) + ret = self.driver.create_cloned_volume(FAKE_CLONED_VOLUME, FAKE_VOLUME) + self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) + self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID']) + self.driver.delete_volume(FAKE_CLONED_VOLUME) + self.assertIsNone(CLONED_LUN_INFO['ID']) + self.assertIsNone(FAKE_CLONED_VOLUME['provider_location']) + self.driver.delete_volume(FAKE_VOLUME) + self.assertIsNone(LUN_INFO['ID']) + + def test_extend_volume(self): + VOLUME_SIZE = 5 + # Test no extended volume + self.assertRaises(exception.VolumeNotFound, + self.driver.extend_volume, FAKE_VOLUME, VOLUME_SIZE) + + self.driver.create_volume(FAKE_VOLUME) + self.assertEqual(LUN_INFO['Size'], '2') + # Test extend volume cli exception + set_error_flg('addluntoextlun') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.extend_volume, FAKE_VOLUME, VOLUME_SIZE) + self.assertEqual(CLONED_LUN_INFO['Name'], None) + + self.driver.extend_volume(FAKE_VOLUME, VOLUME_SIZE) + self.assertEqual(LUN_INFO['Size'], VOLUME_SIZE) + self.driver.delete_volume(FAKE_VOLUME) + self.assertEqual(LUN_INFO['Name'], None) + + def test_create_delete_snapshot(self): + # Test no resource pool + RESPOOL_A_SIM['Valid Size'] = '0' + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, FAKE_SNAPSHOT) + RESPOOL_A_SIM['Valid Size'] = '5120' + # Test no source volume + self.assertRaises(exception.VolumeNotFound, + self.driver.create_snapshot, FAKE_SNAPSHOT) + # Test create snapshot cli exception + self.driver.create_volume(FAKE_VOLUME) + set_error_flg('createsnapshot') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, + FAKE_SNAPSHOT) + self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) + # Test active snapshot failed + set_error_flg('actvsnapshot') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, + FAKE_SNAPSHOT) + self.assertIsNone(SNAPSHOT_INFO['ID']) + self.assertIsNone(SNAPSHOT_INFO['Status']) + # Test disable snapshot failed + set_error_flg('disablesnapshot') + self.driver.create_snapshot(FAKE_SNAPSHOT) + self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap']) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_snapshot, + FAKE_SNAPSHOT) + self.assertEqual(SNAPSHOT_INFO['Status'], 'Active') + # Test delsnapshot failed + set_error_flg('delsnapshot') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_snapshot, + FAKE_SNAPSHOT) + self.assertEqual(SNAPSHOT_INFO['Status'], 'Disable') + + self.driver.delete_snapshot(FAKE_SNAPSHOT) + + # Test normal create and delete snapshot + self.driver.create_volume(FAKE_VOLUME) + ret = self.driver.create_snapshot(FAKE_SNAPSHOT) + self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap']) + self.assertEqual(SNAPSHOT_INFO['Status'], 'Active') + self.assertEqual(ret['provider_location'], SNAPSHOT_INFO['ID']) + self.driver.delete_snapshot(FAKE_SNAPSHOT) + self.assertIsNone(SNAPSHOT_INFO['ID']) + self.assertIsNone(SNAPSHOT_INFO['Status']) + + def test_create_delete_snapshot_volume(self): + # Test no source snapshot + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) + # Test normal create and delete snapshot volume + self.driver.create_volume(FAKE_VOLUME) + self.driver.create_snapshot(FAKE_SNAPSHOT) + self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) + self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap']) + ret = self.driver.create_volume_from_snapshot(FAKE_CLONED_VOLUME, + FAKE_SNAPSHOT) + self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) + self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID']) + self.driver.delete_snapshot(FAKE_SNAPSHOT) + self.driver.delete_volume(FAKE_VOLUME) + self.driver.delete_volume(FAKE_CLONED_VOLUME) + self.assertIsNone(LUN_INFO['ID']) + self.assertIsNone(CLONED_LUN_INFO['ID']) + self.assertIsNone(SNAPSHOT_INFO['ID']) + + def test_initialize_connection(self): + # Test can not get iscsi iqn + set_error_flg('showiscsitgtname') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Test failed to get iSCSI port info + set_error_flg('showiscsiip') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Test create hostgroup failed + set_error_flg('createhostgroup') + MAP_INFO['Host Group ID'] = None + MAP_INFO['Host Group Name'] = None + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Test create host failed + set_error_flg('addhost') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Test add iSCSI initiator failed + set_error_flg('addiscsiini') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Test add hostport failed + set_error_flg('addhostport') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Test no volume + FAKE_VOLUME['provider_location'] = '100' + self.assertRaises(exception.VolumeNotFound, + self.driver.initialize_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + FAKE_VOLUME['provider_location'] = None + # Test map volume failed + self.driver.create_volume(FAKE_VOLUME) + set_error_flg('addhostmap') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Test normal initialize connection + self.assertEqual(FAKE_VOLUME['provider_location'], + VOLUME_SNAP_ID['vol']) + self.assertEqual(LUN_INFO['Owner Controller'], 'A') + ret = self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR) + iscsi_propers = ret['data'] + self.assertEqual(iscsi_propers['target_iqn'], + INITIATOR_SETTING['TargetIQN-form']) + self.assertEqual(iscsi_propers['target_portal'], + INITIATOR_SETTING['Initiator TargetIP'] + ':3260') + self.assertEqual(MAP_INFO["DEV LUN ID"], LUN_INFO['ID']) + self.assertEqual(MAP_INFO["INI Port Info"], + FAKE_CONNECTOR['initiator']) + self.assertEqual(LUN_INFO['Owner Controller'], 'B') + self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR) + self.driver.delete_volume(FAKE_VOLUME) + self.assertIsNone(LUN_INFO['ID']) + + def test_terminate_connection(self): + # Test no host was found + self.assertRaises(exception.HostNotFound, + self.driver.terminate_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Test no volume was found + self.driver .create_volume(FAKE_VOLUME) + self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR) + FAKE_VOLUME['provider_location'] = None + self.assertRaises(exception.VolumeNotFound, + self.driver.terminate_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + FAKE_VOLUME['provider_location'] = LUN_INFO['ID'] + # Test delete map failed + set_error_flg('delhostmap') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.terminate_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Delete hostport failed + set_error_flg('delhostport') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.terminate_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Test delete initiator failed + set_error_flg('deliscsiini') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.terminate_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Test delete host failed + set_error_flg('delhost') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.terminate_connection, + FAKE_VOLUME, FAKE_CONNECTOR) + # Test normal terminate connection + self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) + self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR) + self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR) + self.assertIsNone(MAP_INFO["DEV LUN ID"]) + self.driver.delete_volume(FAKE_VOLUME) + self.assertIsNone(LUN_INFO['ID']) + + def test_get_volume_stats(self): + stats = self.driver.get_volume_stats(True) + free_capacity = float(POOL_SETTING['Free Capacity']) / 1024 + self.assertEqual(stats['free_capacity_gb'], free_capacity) + self.assertEqual(stats['storage_protocol'], 'iSCSI') + + +class HuaweiTFCDriverTestCase(test.TestCase): + def __init__(self, *args, **kwargs): + super(HuaweiTFCDriverTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + super(HuaweiTFCDriverTestCase, self).setUp() + + self.tmp_dir = tempfile.mkdtemp() + self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml' + create_fake_conf_file(self.fake_conf_file) + modify_conf(self.fake_conf_file, 'Storage/Protocol', 'FC') + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.cinder_huawei_conf_file = self.fake_conf_file + self.configuration.append_config_values(mox.IgnoreArg()) + + self.stubs.Set(time, 'sleep', Fake_sleep) + self.stubs.Set(utils, 'SSHPool', FakeSSHPool) + self.stubs.Set(ssh_common.TseriesCommon, '_change_file_mode', + Fake_change_file_mode) + self._init_driver() + + def _init_driver(self): + Curr_test[0] = 'T' + self.driver = HuaweiVolumeDriver(configuration=self.configuration) + self.driver.do_setup(None) + + def tearDown(self): + if os.path.exists(self.fake_conf_file): + os.remove(self.fake_conf_file) + shutil.rmtree(self.tmp_dir) + super(HuaweiTFCDriverTestCase, self).tearDown() + + def test_validate_connector_failed(self): + invalid_connector = {'host': 'testhost'} + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.validate_connector, + invalid_connector) + + def test_create_delete_volume(self): + self.driver.create_volume(FAKE_VOLUME) + self.assertEqual(LUN_INFO['ID'], VOLUME_SNAP_ID['vol']) + self.driver.delete_volume(FAKE_VOLUME) + self.assertIsNone(LUN_INFO['ID']) + + def test_create_delete_snapshot(self): + self.driver.create_volume(FAKE_VOLUME) + self.driver.create_snapshot(FAKE_SNAPSHOT) + self.assertEqual(SNAPSHOT_INFO['ID'], VOLUME_SNAP_ID['snap']) + self.driver.delete_snapshot(FAKE_SNAPSHOT) + self.assertIsNone(SNAPSHOT_INFO['ID']) + self.driver.delete_volume(FAKE_VOLUME) + self.assertIsNone(LUN_INFO['ID']) + + def test_create_cloned_volume(self): + self.driver.create_volume(FAKE_VOLUME) + ret = self.driver.create_cloned_volume(FAKE_CLONED_VOLUME, FAKE_VOLUME) + self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) + self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID']) + self.driver.delete_volume(FAKE_CLONED_VOLUME) + self.driver.delete_volume(FAKE_VOLUME) + self.assertIsNone(CLONED_LUN_INFO['ID']) + self.assertIsNone(LUN_INFO['ID']) + + def test_create_snapshot_volume(self): + self.driver.create_volume(FAKE_VOLUME) + self.driver.create_snapshot(FAKE_SNAPSHOT) + ret = self.driver.create_volume_from_snapshot(FAKE_CLONED_VOLUME, + FAKE_SNAPSHOT) + self.assertEqual(CLONED_LUN_INFO['ID'], VOLUME_SNAP_ID['vol_copy']) + self.assertEqual(ret['provider_location'], CLONED_LUN_INFO['ID']) + self.driver.delete_volume(FAKE_CLONED_VOLUME) + self.driver.delete_volume(FAKE_VOLUME) + self.assertIsNone(CLONED_LUN_INFO['ID']) + self.assertIsNone(LUN_INFO['ID']) + + def test_initialize_terminitat_connection(self): + self.driver.create_volume(FAKE_VOLUME) + ret = self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR) + fc_properties = ret['data'] + self.assertEqual(fc_properties['target_wwn'], + INITIATOR_SETTING['WWN']) + self.assertEqual(MAP_INFO["DEV LUN ID"], LUN_INFO['ID']) + + self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR) + self.assertIsNone(MAP_INFO["DEV LUN ID"]) + self.assertIsNone(MAP_INFO["Host LUN ID"]) + self.driver.delete_volume(FAKE_VOLUME) + self.assertIsNone(LUN_INFO['ID']) + + def _test_get_volume_stats(self): + stats = self.driver.get_volume_stats(True) + fakecapacity = float(POOL_SETTING['Free Capacity']) / 1024 + self.assertEqual(stats['free_capacity_gb'], fakecapacity) + self.assertEqual(stats['storage_protocol'], 'FC') + + +class HuaweiDorado5100FCDriverTestCase(HuaweiTFCDriverTestCase): + def __init__(self, *args, **kwargs): + super(HuaweiDorado5100FCDriverTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + super(HuaweiDorado5100FCDriverTestCase, self).setUp() + + def _init_driver(self): + Curr_test[0] = 'Dorado5100' + modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado') + self.driver = HuaweiVolumeDriver(configuration=self.configuration) + self.driver.do_setup(None) + + def test_create_cloned_volume(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + FAKE_CLONED_VOLUME, FAKE_VOLUME) + + def test_create_snapshot_volume(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) + + +class HuaweiDorado2100G2FCDriverTestCase(HuaweiTFCDriverTestCase): + def __init__(self, *args, **kwargs): + super(HuaweiDorado2100G2FCDriverTestCase, self).__init__(*args, + **kwargs) + + def setUp(self): + super(HuaweiDorado2100G2FCDriverTestCase, self).setUp() + + def _init_driver(self): + Curr_test[0] = 'Dorado2100G2' + modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado') + self.driver = HuaweiVolumeDriver(configuration=self.configuration) + self.driver.do_setup(None) + + def test_create_cloned_volume(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + FAKE_CLONED_VOLUME, FAKE_VOLUME) + + def test_create_delete_snapshot(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, FAKE_SNAPSHOT) + + def test_create_snapshot_volume(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) + + def test_extend_volume(self): + NEWSIZE = 5 + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.extend_volume, + FAKE_VOLUME, NEWSIZE) + + +class HuaweiDorado5100ISCSIDriverTestCase(HuaweiTISCSIDriverTestCase): + def __init__(self, *args, **kwargs): + super(HuaweiDorado5100ISCSIDriverTestCase, self).__init__(*args, + **kwargs) + + def setUp(self): + super(HuaweiDorado5100ISCSIDriverTestCase, self).setUp() + + def _init_driver(self): + Curr_test[0] = 'Dorado5100' + modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado') + self.driver = HuaweiVolumeDriver(configuration=self.configuration) + self.driver.do_setup(None) + + def test_create_delete_cloned_volume(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + FAKE_CLONED_VOLUME, FAKE_VOLUME) + + def test_create_delete_snapshot_volume(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) + + def test_volume_type(self): + pass + + +class HuaweiDorado2100G2ISCSIDriverTestCase(HuaweiTISCSIDriverTestCase): + def __init__(self, *args, **kwargs): + super(HuaweiDorado2100G2ISCSIDriverTestCase, self).__init__(*args, + **kwargs) + + def setUp(self): + super(HuaweiDorado2100G2ISCSIDriverTestCase, self).setUp() + + def _init_driver(self): + Curr_test[0] = 'Dorado2100G2' + modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado') + self.driver = HuaweiVolumeDriver(configuration=self.configuration) + self.driver.do_setup(None) + + def test_conf_invalid(self): + pass + + def test_create_delete_cloned_volume(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + FAKE_CLONED_VOLUME, FAKE_VOLUME) + + def test_create_delete_snapshot(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, FAKE_SNAPSHOT) + + def test_create_delete_snapshot_volume(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + FAKE_CLONED_VOLUME, FAKE_SNAPSHOT) + + def test_initialize_connection(self): + self.driver.create_volume(FAKE_VOLUME) + ret = self.driver.initialize_connection(FAKE_VOLUME, FAKE_CONNECTOR) + iscsi_propers = ret['data'] + self.assertEqual(iscsi_propers['target_iqn'], + INITIATOR_SETTING['TargetIQN-form']) + self.assertEqual(iscsi_propers['target_portal'], + INITIATOR_SETTING['Initiator TargetIP'] + ':3260') + self.assertEqual(MAP_INFO["DEV LUN ID"], LUN_INFO['ID']) + self.assertEqual(MAP_INFO["INI Port Info"], + FAKE_CONNECTOR['initiator']) + self.driver.terminate_connection(FAKE_VOLUME, FAKE_CONNECTOR) + self.driver.delete_volume(FAKE_VOLUME) + self.assertIsNone(LUN_INFO['ID']) + + def test_extend_volume(self): + NEWSIZE = 5 + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.extend_volume, + FAKE_VOLUME, NEWSIZE) + + +class SSHMethodTestCase(test.TestCase): + def __init__(self, *args, **kwargs): + super(SSHMethodTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + super(SSHMethodTestCase, self).setUp() + + self.tmp_dir = tempfile.mkdtemp() + self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml' + create_fake_conf_file(self.fake_conf_file) + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.cinder_huawei_conf_file = self.fake_conf_file + self.configuration.append_config_values(mox.IgnoreArg()) + + self.stubs.Set(time, 'sleep', Fake_sleep) + self.stubs.Set(utils, 'SSHPool', FakeSSHPool) + self.stubs.Set(ssh_common.TseriesCommon, '_change_file_mode', + Fake_change_file_mode) + Curr_test[0] = 'T' + self.driver = HuaweiVolumeDriver(configuration=self.configuration) + self.driver.do_setup(None) + + def tearDown(self): + if os.path.exists(self.fake_conf_file): + os.remove(self.fake_conf_file) + shutil.rmtree(self.tmp_dir) + super(SSHMethodTestCase, self).tearDown() + + def test_reach_max_connection_limit(self): + self.stubs.Set(FakeChannel, 'recv', self._fake_recv1) + self.assertRaises(exception.CinderException, + self.driver.create_volume, FAKE_VOLUME) + + def test_socket_timeout(self): + self.stubs.Set(FakeChannel, 'recv', self._fake_recv2) + self.assertRaises(socket.timeout, + self.driver.create_volume, FAKE_VOLUME) + + def _fake_recv1(self, nbytes): + return "No response message" + + def _fake_recv2(self, nBytes): + raise socket.timeout() + + +class HuaweiUtilsTestCase(test.TestCase): + def __init__(self, *args, **kwargs): + super(HuaweiUtilsTestCase, self).__init__(*args, **kwargs) + + def setUp(self): + super(HuaweiUtilsTestCase, self).setUp() + + self.tmp_dir = tempfile.mkdtemp() + self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml' + create_fake_conf_file(self.fake_conf_file) + + def tearDown(self): + if os.path.exists(self.fake_conf_file): + os.remove(self.fake_conf_file) + shutil.rmtree(self.tmp_dir) + super(HuaweiUtilsTestCase, self).tearDown() + + def test_parse_xml_file_ioerror(self): + tmp_fonf_file = '/xxx/cinder_huawei_conf.xml' + self.assertRaises(IOError, huawei_utils.parse_xml_file, tmp_fonf_file) + + def test_is_xml_item_exist(self): + root = huawei_utils.parse_xml_file(self.fake_conf_file) + res = huawei_utils.is_xml_item_exist(root, 'Storage/UserName') + self.assertTrue(res) + res = huawei_utils.is_xml_item_exist(root, 'xxx') + self.assertFalse(res) + res = huawei_utils.is_xml_item_exist(root, 'LUN/StoragePool', 'Name') + self.assertTrue(res) + res = huawei_utils.is_xml_item_exist(root, 'LUN/StoragePool', 'xxx') + self.assertFalse(res) + + def test_is_xml_item_valid(self): + root = huawei_utils.parse_xml_file(self.fake_conf_file) + res = huawei_utils.is_xml_item_valid(root, 'LUN/LUNType', + ['Thin', 'Thick']) + self.assertTrue(res) + res = huawei_utils.is_xml_item_valid(root, 'LUN/LUNType', ['test']) + self.assertFalse(res) + res = huawei_utils.is_xml_item_valid(root, 'Host', + ['Linux', 'Windows'], 'OSType') + self.assertTrue(res) + res = huawei_utils.is_xml_item_valid(root, 'Host', ['test'], 'OSType') + self.assertFalse(res) + + def test_get_conf_host_os_type(self): + # Default os is Linux + res = huawei_utils.get_conf_host_os_type('10.10.10.1', + self.fake_conf_file) + self.assertEqual(res, '0') + modify_conf(self.fake_conf_file, 'Host', 'Windows', 'OSType') + res = huawei_utils.get_conf_host_os_type(FAKE_CONNECTOR['ip'], + self.fake_conf_file) + self.assertEqual(res, '1') diff --git a/cinder/tests/test_image_utils.py b/cinder/tests/test_image_utils.py new file mode 100644 index 0000000000..03c4dcb400 --- /dev/null +++ b/cinder/tests/test_image_utils.py @@ -0,0 +1,667 @@ + +# Copyright (c) 2013 eNovance , Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit tests for image utils.""" + +import contextlib +import mox +import tempfile + +from cinder import context +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import processutils +from cinder import test +from cinder import units +from cinder import utils + + +class FakeImageService: + def __init__(self): + self._imagedata = {} + + def download(self, context, image_id, data): + self.show(context, image_id) + data.write(self._imagedata.get(image_id, '')) + + def show(self, context, image_id): + return {'size': 2 * units.GiB, + 'disk_format': 'qcow2', + 'container_format': 'bare'} + + def update(self, context, image_id, metadata, path): + pass + + +class TestUtils(test.TestCase): + TEST_IMAGE_ID = 321 + TEST_DEV_PATH = "/dev/ether/fake_dev" + + def setUp(self): + super(TestUtils, self).setUp() + self._mox = mox.Mox() + self._image_service = FakeImageService() + + self.addCleanup(self._mox.UnsetStubs) + + def test_resize_image(self): + mox = self._mox + mox.StubOutWithMock(utils, 'execute') + + TEST_IMG_SOURCE = 'boobar.img' + TEST_IMG_SIZE_IN_GB = 1 + + utils.execute('qemu-img', 'resize', TEST_IMG_SOURCE, + '%sG' % TEST_IMG_SIZE_IN_GB, run_as_root=False) + + mox.ReplayAll() + + image_utils.resize_image(TEST_IMG_SOURCE, TEST_IMG_SIZE_IN_GB) + + mox.VerifyAll() + + def test_convert_image(self): + mox = self._mox + mox.StubOutWithMock(utils, 'execute') + + TEST_OUT_FORMAT = 'vmdk' + TEST_SOURCE = 'img/qemu.img' + TEST_DEST = '/img/vmware.vmdk' + + utils.execute('qemu-img', 'convert', '-O', TEST_OUT_FORMAT, + TEST_SOURCE, TEST_DEST, run_as_root=True) + + mox.ReplayAll() + + image_utils.convert_image(TEST_SOURCE, TEST_DEST, TEST_OUT_FORMAT) + + mox.VerifyAll() + + def test_qemu_img_info(self): + TEST_PATH = "img/qemu.qcow2" + TEST_RETURN = "image: qemu.qcow2\n"\ + "backing_file: qemu.qcow2 (actual path: qemu.qcow2)\n"\ + "file_format: qcow2\n"\ + "virtual_size: 50M (52428800 bytes)\n"\ + "cluster_size: 65536\n"\ + "disk_size: 196K (200704 bytes)\n"\ + "Snapshot list:\n"\ + "ID TAG VM SIZE DATE VM CLOCK\n"\ + "1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974" + TEST_STR = "image: qemu.qcow2\n"\ + "file_format: qcow2\n"\ + "virtual_size: 52428800\n"\ + "disk_size: 200704\n"\ + "cluster_size: 65536\n"\ + "backing_file: qemu.qcow2\n"\ + "snapshots: [{'date': '2011-10-04', "\ + "'vm_clock': '19:04:00 32:06:34.974', "\ + "'vm_size': '1.7G', 'tag': 'snap1', 'id': '1'}]" + + mox = self._mox + mox.StubOutWithMock(utils, 'execute') + + utils.execute( + 'env', 'LC_ALL=C', 'qemu-img', 'info', + TEST_PATH, run_as_root=True).AndReturn( + (TEST_RETURN, 'ignored') + ) + + mox.ReplayAll() + + inf = image_utils.qemu_img_info(TEST_PATH) + + self.assertEqual(inf.image, 'qemu.qcow2') + self.assertEqual(inf.backing_file, 'qemu.qcow2') + self.assertEqual(inf.file_format, 'qcow2') + self.assertEqual(inf.virtual_size, 52428800) + self.assertEqual(inf.cluster_size, 65536) + self.assertEqual(inf.disk_size, 200704) + + self.assertEqual(inf.snapshots[0]['id'], '1') + self.assertEqual(inf.snapshots[0]['tag'], 'snap1') + self.assertEqual(inf.snapshots[0]['vm_size'], '1.7G') + self.assertEqual(inf.snapshots[0]['date'], '2011-10-04') + self.assertEqual(inf.snapshots[0]['vm_clock'], '19:04:00 32:06:34.974') + + self.assertEqual(str(inf), TEST_STR) + + def test_qemu_img_info_alt(self): + """Test a slightly different variation of qemu-img output. + + (Based on Fedora 19's qemu-img 1.4.2.) + """ + + TEST_PATH = "img/qemu.qcow2" + TEST_RETURN = "image: qemu.qcow2\n"\ + "backing file: qemu.qcow2 (actual path: qemu.qcow2)\n"\ + "file format: qcow2\n"\ + "virtual size: 50M (52428800 bytes)\n"\ + "cluster_size: 65536\n"\ + "disk size: 196K (200704 bytes)\n"\ + "Snapshot list:\n"\ + "ID TAG VM SIZE DATE VM CLOCK\n"\ + "1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974" + TEST_STR = "image: qemu.qcow2\n"\ + "file_format: qcow2\n"\ + "virtual_size: 52428800\n"\ + "disk_size: 200704\n"\ + "cluster_size: 65536\n"\ + "backing_file: qemu.qcow2\n"\ + "snapshots: [{'date': '2011-10-04', "\ + "'vm_clock': '19:04:00 32:06:34.974', "\ + "'vm_size': '1.7G', 'tag': 'snap1', 'id': '1'}]" + + mox = self._mox + mox.StubOutWithMock(utils, 'execute') + + cmd = ['env', 'LC_ALL=C', 'qemu-img', 'info', TEST_PATH] + utils.execute(*cmd, run_as_root=True).AndReturn( + (TEST_RETURN, 'ignored')) + + mox.ReplayAll() + + inf = image_utils.qemu_img_info(TEST_PATH) + + self.assertEqual(inf.image, 'qemu.qcow2') + self.assertEqual(inf.backing_file, 'qemu.qcow2') + self.assertEqual(inf.file_format, 'qcow2') + self.assertEqual(inf.virtual_size, 52428800) + self.assertEqual(inf.cluster_size, 65536) + self.assertEqual(inf.disk_size, 200704) + + self.assertEqual(inf.snapshots[0]['id'], '1') + self.assertEqual(inf.snapshots[0]['tag'], 'snap1') + self.assertEqual(inf.snapshots[0]['vm_size'], '1.7G') + self.assertEqual(inf.snapshots[0]['date'], '2011-10-04') + self.assertEqual(inf.snapshots[0]['vm_clock'], + '19:04:00 32:06:34.974') + + self.assertEqual(str(inf), TEST_STR) + + def _test_fetch_to_raw(self, has_qemu=True, src_inf=None, dest_inf=None): + mox = self._mox + mox.StubOutWithMock(image_utils, 'create_temporary_file') + mox.StubOutWithMock(utils, 'execute') + mox.StubOutWithMock(image_utils, 'fetch') + + TEST_INFO = ("image: qemu.qcow2\n" + "file format: raw\n" + "virtual size: 0 (0 bytes)\n" + "disk size: 0") + + image_utils.create_temporary_file().AndReturn(self.TEST_DEV_PATH) + + test_qemu_img = utils.execute( + 'env', 'LC_ALL=C', 'qemu-img', 'info', self.TEST_DEV_PATH, + run_as_root=True) + + if has_qemu: + test_qemu_img.AndReturn((TEST_INFO, 'ignored')) + image_utils.fetch(context, self._image_service, self.TEST_IMAGE_ID, + self.TEST_DEV_PATH, None, None) + else: + test_qemu_img.AndRaise(processutils.ProcessExecutionError()) + + if has_qemu and src_inf: + utils.execute( + 'env', 'LC_ALL=C', 'qemu-img', 'info', + self.TEST_DEV_PATH, run_as_root=True).AndReturn( + (src_inf, 'ignored') + ) + + if has_qemu and dest_inf: + utils.execute( + 'qemu-img', 'convert', '-O', 'raw', + self.TEST_DEV_PATH, self.TEST_DEV_PATH, run_as_root=True) + + utils.execute( + 'env', 'LC_ALL=C', 'qemu-img', 'info', + self.TEST_DEV_PATH, run_as_root=True).AndReturn( + (dest_inf, 'ignored') + ) + + self._mox.ReplayAll() + + def test_fetch_to_raw(self): + SRC_INFO = ("image: qemu.qcow2\n" + "file_format: qcow2 \n" + "virtual_size: 50M (52428800 bytes)\n" + "cluster_size: 65536\n" + "disk_size: 196K (200704 bytes)") + DST_INFO = ("image: qemu.raw\n" + "file_format: raw\n" + "virtual_size: 50M (52428800 bytes)\n" + "cluster_size: 65536\n" + "disk_size: 196K (200704 bytes)\n") + + self._test_fetch_to_raw(src_inf=SRC_INFO, dest_inf=DST_INFO) + + image_utils.fetch_to_raw(context, self._image_service, + self.TEST_IMAGE_ID, self.TEST_DEV_PATH, + mox.IgnoreArg()) + self._mox.VerifyAll() + + def test_fetch_to_raw_no_qemu_img(self): + self._test_fetch_to_raw(has_qemu=False) + + self.assertRaises(exception.ImageUnacceptable, + image_utils.fetch_to_raw, + context, self._image_service, + self.TEST_IMAGE_ID, self.TEST_DEV_PATH, + mox.IgnoreArg()) + + self._mox.VerifyAll() + + def test_fetch_to_raw_on_error_parsing_failed(self): + SRC_INFO_NO_FORMAT = ("image: qemu.qcow2\n" + "virtual_size: 50M (52428800 bytes)\n" + "cluster_size: 65536\n" + "disk_size: 196K (200704 bytes)") + + self._test_fetch_to_raw(src_inf=SRC_INFO_NO_FORMAT) + + self.assertRaises(exception.ImageUnacceptable, + image_utils.fetch_to_raw, + context, self._image_service, + self.TEST_IMAGE_ID, self.TEST_DEV_PATH, + mox.IgnoreArg()) + self._mox.VerifyAll() + + def test_fetch_to_raw_on_error_backing_file(self): + SRC_INFO_BACKING_FILE = ("image: qemu.qcow2\n" + "backing_file: qemu.qcow2\n" + "file_format: qcow2 \n" + "virtual_size: 50M (52428800 bytes)\n" + "cluster_size: 65536\n" + "disk_size: 196K (200704 bytes)") + + self._test_fetch_to_raw(src_inf=SRC_INFO_BACKING_FILE) + + self.assertRaises(exception.ImageUnacceptable, + image_utils.fetch_to_raw, + context, self._image_service, + self.TEST_IMAGE_ID, self.TEST_DEV_PATH, + mox.IgnoreArg()) + self._mox.VerifyAll() + + def test_fetch_to_raw_on_error_not_convert_to_raw(self): + IMG_INFO = ("image: qemu.qcow2\n" + "file_format: qcow2 \n" + "virtual_size: 50M (52428800 bytes)\n" + "cluster_size: 65536\n" + "disk_size: 196K (200704 bytes)") + + self._test_fetch_to_raw(src_inf=IMG_INFO, dest_inf=IMG_INFO) + + self.assertRaises(exception.ImageUnacceptable, + image_utils.fetch_to_raw, + context, self._image_service, + self.TEST_IMAGE_ID, self.TEST_DEV_PATH, + mox.IgnoreArg()) + + def test_fetch_to_raw_on_error_image_size(self): + TEST_VOLUME_SIZE = 1 + SRC_INFO = ("image: qemu.qcow2\n" + "file_format: qcow2 \n" + "virtual_size: 2G (2147483648 bytes)\n" + "cluster_size: 65536\n" + "disk_size: 196K (200704 bytes)") + + self._test_fetch_to_raw(src_inf=SRC_INFO) + + self.assertRaises(exception.ImageUnacceptable, + image_utils.fetch_to_raw, + context, self._image_service, + self.TEST_IMAGE_ID, self.TEST_DEV_PATH, + mox.IgnoreArg(), size=TEST_VOLUME_SIZE) + + def _test_fetch_verify_image(self, qemu_info, volume_size=1): + fake_image_service = FakeImageService() + mox = self._mox + mox.StubOutWithMock(image_utils, 'fetch') + mox.StubOutWithMock(utils, 'execute') + image_utils.fetch(context, fake_image_service, + self.TEST_IMAGE_ID, self.TEST_DEV_PATH, None, None) + + utils.execute( + 'env', 'LC_ALL=C', 'qemu-img', 'info', + self.TEST_DEV_PATH, run_as_root=True).AndReturn( + (qemu_info, 'ignored') + ) + + self._mox.ReplayAll() + self.assertRaises(exception.ImageUnacceptable, + image_utils.fetch_verify_image, + context, fake_image_service, + self.TEST_IMAGE_ID, self.TEST_DEV_PATH, + size=volume_size) + + def test_fetch_verify_image_with_backing_file(self): + TEST_RETURN = "image: qemu.qcow2\n"\ + "backing_file: qemu.qcow2 (actual path: qemu.qcow2)\n"\ + "file_format: qcow2\n"\ + "virtual_size: 50M (52428800 bytes)\n"\ + "cluster_size: 65536\n"\ + "disk_size: 196K (200704 bytes)\n"\ + "Snapshot list:\n"\ + "ID TAG VM SIZE DATE VM CLOCK\n"\ + "1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974" + + self._test_fetch_verify_image(TEST_RETURN) + + def test_fetch_verify_image_without_file_format(self): + TEST_RETURN = "image: qemu.qcow2\n"\ + "virtual_size: 50M (52428800 bytes)\n"\ + "cluster_size: 65536\n"\ + "disk_size: 196K (200704 bytes)\n"\ + "Snapshot list:\n"\ + "ID TAG VM SIZE DATE VM CLOCK\n"\ + "1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974" + + self._test_fetch_verify_image(TEST_RETURN) + + def test_fetch_verify_image_image_size(self): + TEST_RETURN = "image: qemu.qcow2\n"\ + "file_format: qcow2\n"\ + "virtual_size: 2G (2147483648 bytes)\n"\ + "cluster_size: 65536\n"\ + "disk_size: 196K (200704 bytes)\n"\ + "Snapshot list:\n"\ + "ID TAG VM SIZE DATE VM CLOCK\n"\ + "1 snap1 1.7G 2011-10-04 19:04:00 32:06:34.974" + + self._test_fetch_verify_image(TEST_RETURN) + + def test_upload_volume(self): + image_meta = {'id': 1, 'disk_format': 'qcow2'} + TEST_RET = "image: qemu.qcow2\n"\ + "file_format: qcow2 \n"\ + "virtual_size: 50M (52428800 bytes)\n"\ + "cluster_size: 65536\n"\ + "disk_size: 196K (200704 bytes)" + + m = self._mox + m.StubOutWithMock(utils, 'execute') + + utils.execute('qemu-img', 'convert', '-O', 'qcow2', + mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True) + utils.execute( + 'env', 'LC_ALL=C', 'qemu-img', 'info', + mox.IgnoreArg(), run_as_root=True).AndReturn( + (TEST_RET, 'ignored') + ) + + m.ReplayAll() + + image_utils.upload_volume(context, FakeImageService(), + image_meta, '/dev/loop1') + m.VerifyAll() + + def test_upload_volume_with_raw_image(self): + image_meta = {'id': 1, 'disk_format': 'raw'} + mox = self._mox + + mox.StubOutWithMock(image_utils, 'convert_image') + + mox.ReplayAll() + + with tempfile.NamedTemporaryFile() as f: + image_utils.upload_volume(context, FakeImageService(), + image_meta, f.name) + mox.VerifyAll() + + def test_upload_volume_on_error(self): + image_meta = {'id': 1, 'disk_format': 'qcow2'} + TEST_RET = "image: qemu.vhd\n"\ + "file_format: vhd \n"\ + "virtual_size: 50M (52428800 bytes)\n"\ + "cluster_size: 65536\n"\ + "disk_size: 196K (200704 bytes)" + + m = self._mox + m.StubOutWithMock(utils, 'execute') + + utils.execute('qemu-img', 'convert', '-O', 'qcow2', + mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True) + utils.execute( + 'env', 'LC_ALL=C', 'qemu-img', 'info', + mox.IgnoreArg(), run_as_root=True).AndReturn( + (TEST_RET, 'ignored') + ) + + m.ReplayAll() + + self.assertRaises(exception.ImageUnacceptable, + image_utils.upload_volume, + context, FakeImageService(), + image_meta, '/dev/loop1') + m.VerifyAll() + + +class TestExtractTo(test.TestCase): + def test_extract_to_calls_tar(self): + mox = self.mox + mox.StubOutWithMock(utils, 'execute') + + utils.execute( + 'tar', '-xzf', 'archive.tgz', '-C', 'targetpath').AndReturn( + ('ignored', 'ignored') + ) + + mox.ReplayAll() + + image_utils.extract_targz('archive.tgz', 'targetpath') + mox.VerifyAll() + + +class TestSetVhdParent(test.TestCase): + def test_vhd_util_call(self): + mox = self.mox + mox.StubOutWithMock(utils, 'execute') + + utils.execute( + 'vhd-util', 'modify', '-n', 'child', '-p', 'parent').AndReturn( + ('ignored', 'ignored') + ) + + mox.ReplayAll() + + image_utils.set_vhd_parent('child', 'parent') + mox.VerifyAll() + + +class TestFixVhdChain(test.TestCase): + def test_empty_chain(self): + mox = self.mox + mox.StubOutWithMock(image_utils, 'set_vhd_parent') + + mox.ReplayAll() + image_utils.fix_vhd_chain([]) + + def test_single_vhd_file_chain(self): + mox = self.mox + mox.StubOutWithMock(image_utils, 'set_vhd_parent') + + mox.ReplayAll() + image_utils.fix_vhd_chain(['0.vhd']) + + def test_chain_with_two_elements(self): + mox = self.mox + mox.StubOutWithMock(image_utils, 'set_vhd_parent') + + image_utils.set_vhd_parent('0.vhd', '1.vhd') + + mox.ReplayAll() + image_utils.fix_vhd_chain(['0.vhd', '1.vhd']) + + +class TestGetSize(test.TestCase): + def test_vhd_util_call(self): + mox = self.mox + mox.StubOutWithMock(utils, 'execute') + + utils.execute( + 'vhd-util', 'query', '-n', 'vhdfile', '-v').AndReturn( + ('1024', 'ignored') + ) + + mox.ReplayAll() + + result = image_utils.get_vhd_size('vhdfile') + mox.VerifyAll() + + self.assertEqual(1024, result) + + +class TestResize(test.TestCase): + def test_vhd_util_call(self): + mox = self.mox + mox.StubOutWithMock(utils, 'execute') + + utils.execute( + 'vhd-util', 'resize', '-n', 'vhdfile', '-s', '1024', + '-j', 'journal').AndReturn(('ignored', 'ignored')) + + mox.ReplayAll() + + image_utils.resize_vhd('vhdfile', 1024, 'journal') + mox.VerifyAll() + + +class TestCoalesce(test.TestCase): + def test_vhd_util_call(self): + mox = self.mox + mox.StubOutWithMock(utils, 'execute') + + utils.execute( + 'vhd-util', 'coalesce', '-n', 'vhdfile' + ).AndReturn(('ignored', 'ignored')) + + mox.ReplayAll() + + image_utils.coalesce_vhd('vhdfile') + mox.VerifyAll() + + +@contextlib.contextmanager +def fake_context(return_value): + yield return_value + + +class TestTemporaryFile(test.TestCase): + def test_file_unlinked(self): + mox = self.mox + mox.StubOutWithMock(image_utils, 'create_temporary_file') + mox.StubOutWithMock(image_utils.os, 'unlink') + + image_utils.create_temporary_file().AndReturn('somefile') + image_utils.os.unlink('somefile') + + mox.ReplayAll() + + with image_utils.temporary_file(): + pass + + def test_file_unlinked_on_error(self): + mox = self.mox + mox.StubOutWithMock(image_utils, 'create_temporary_file') + mox.StubOutWithMock(image_utils.os, 'unlink') + + image_utils.create_temporary_file().AndReturn('somefile') + image_utils.os.unlink('somefile') + + mox.ReplayAll() + + def sut(): + with image_utils.temporary_file(): + raise test.TestingException() + + self.assertRaises(test.TestingException, sut) + + +class TestCoalesceChain(test.TestCase): + def test_single_vhd(self): + mox = self.mox + mox.StubOutWithMock(image_utils, 'get_vhd_size') + mox.StubOutWithMock(image_utils, 'resize_vhd') + mox.StubOutWithMock(image_utils, 'coalesce_vhd') + + mox.ReplayAll() + + result = image_utils.coalesce_chain(['0.vhd']) + mox.VerifyAll() + + self.assertEqual('0.vhd', result) + + def test_chain_of_two_vhds(self): + self.mox.StubOutWithMock(image_utils, 'get_vhd_size') + self.mox.StubOutWithMock(image_utils, 'temporary_dir') + self.mox.StubOutWithMock(image_utils, 'resize_vhd') + self.mox.StubOutWithMock(image_utils, 'coalesce_vhd') + self.mox.StubOutWithMock(image_utils, 'temporary_file') + + image_utils.get_vhd_size('0.vhd').AndReturn(1024) + image_utils.temporary_dir().AndReturn(fake_context('tdir')) + image_utils.resize_vhd('1.vhd', 1024, 'tdir/vhd-util-resize-journal') + image_utils.coalesce_vhd('0.vhd') + self.mox.ReplayAll() + + result = image_utils.coalesce_chain(['0.vhd', '1.vhd']) + self.mox.VerifyAll() + self.assertEqual('1.vhd', result) + + +class TestDiscoverChain(test.TestCase): + def test_discovery_calls(self): + mox = self.mox + mox.StubOutWithMock(image_utils, 'file_exist') + + image_utils.file_exist('some/path/0.vhd').AndReturn(True) + image_utils.file_exist('some/path/1.vhd').AndReturn(True) + image_utils.file_exist('some/path/2.vhd').AndReturn(False) + + mox.ReplayAll() + result = image_utils.discover_vhd_chain('some/path') + mox.VerifyAll() + + self.assertEqual( + ['some/path/0.vhd', 'some/path/1.vhd'], result) + + +class TestXenServerImageToCoalescedVhd(test.TestCase): + def test_calls(self): + mox = self.mox + mox.StubOutWithMock(image_utils, 'temporary_dir') + mox.StubOutWithMock(image_utils, 'extract_targz') + mox.StubOutWithMock(image_utils, 'discover_vhd_chain') + mox.StubOutWithMock(image_utils, 'fix_vhd_chain') + mox.StubOutWithMock(image_utils, 'coalesce_chain') + mox.StubOutWithMock(image_utils.os, 'unlink') + mox.StubOutWithMock(image_utils, 'rename_file') + + image_utils.temporary_dir().AndReturn(fake_context('somedir')) + image_utils.extract_targz('image', 'somedir') + image_utils.discover_vhd_chain('somedir').AndReturn( + ['somedir/0.vhd', 'somedir/1.vhd']) + image_utils.fix_vhd_chain(['somedir/0.vhd', 'somedir/1.vhd']) + image_utils.coalesce_chain( + ['somedir/0.vhd', 'somedir/1.vhd']).AndReturn('somedir/1.vhd') + image_utils.os.unlink('image') + image_utils.rename_file('somedir/1.vhd', 'image') + + mox.ReplayAll() + image_utils.replace_xenserver_image_with_coalesced_vhd('image') + mox.VerifyAll() diff --git a/cinder/tests/test_iscsi.py b/cinder/tests/test_iscsi.py index 38c760835b..a4c2e0f32f 100644 --- a/cinder/tests/test_iscsi.py +++ b/cinder/tests/test_iscsi.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Red Hat, Inc. # @@ -19,8 +18,9 @@ import string import tempfile +from cinder.brick.iscsi import iscsi from cinder import test -from cinder.volume import iscsi +from cinder.volume import driver class TargetAdminTestCase(object): @@ -29,15 +29,31 @@ def setUp(self): self.cmds = [] self.tid = 1 - self.target_name = 'iqn.2011-09.org.foo.bar:blaa' + self.target_name = 'iqn.2011-09.org.foo.bar:volume-blaa' self.lun = 10 self.path = '/foo' self.vol_id = 'blaa' + self.vol_name = 'volume-blaa' self.script_template = None self.stubs.Set(os.path, 'isfile', lambda _: True) self.stubs.Set(os, 'unlink', lambda _: '') self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target) + self.stubs.Set(iscsi.LioAdm, '_get_target', self.fake_get_target) + self.stubs.Set(iscsi.LioAdm, + '_verify_rtstool', + self.fake_verify_rtstool) + self.driver = driver.ISCSIDriver() + self.stubs.Set(iscsi.TgtAdm, '_verify_backing_lun', + self.fake_verify_backing_lun) + self.driver = driver.ISCSIDriver() + self.flags(iscsi_target_prefix='iqn.2011-09.org.foo.bar:') + + def fake_verify_backing_lun(obj, iqn, tid): + return True + + def fake_verify_rtstool(obj): + pass def fake_get_target(obj, iqn): return 1 @@ -56,12 +72,12 @@ def fake_execute(self, *cmd, **kwargs): return "", None def clear_cmds(self): - cmds = [] + self.cmds = [] def verify_cmds(self, cmds): self.assertEqual(len(cmds), len(self.cmds)) - for a, b in zip(cmds, self.cmds): - self.assertEqual(a, b) + for cmd in self.cmds: + self.assertTrue(cmd in cmds) def verify(self): script = self.get_script() @@ -73,12 +89,13 @@ def verify(self): self.verify_cmds(cmds) def run_commands(self): - tgtadm = iscsi.get_target_admin() + tgtadm = self.driver.get_target_admin() tgtadm.set_execute(self.fake_execute) tgtadm.create_iscsi_target(self.target_name, self.tid, - self.lun, self.path) + self.lun, self.path) tgtadm.show_target(self.tid, iqn=self.target_name) - tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id) + tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id, + self.vol_name) def test_target_admin(self): self.clear_cmds() @@ -95,8 +112,10 @@ def setUp(self): self.flags(iscsi_helper='tgtadm') self.flags(volumes_dir=self.persist_tempdir) self.script_template = "\n".join([ - 'tgt-admin --update iqn.2011-09.org.foo.bar:blaa', - 'tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa']) + 'tgt-admin --update %(target_name)s', + 'tgt-admin --force ' + '--delete %(target_name)s', + 'tgtadm --lld iscsi --op show --mode target']) def tearDown(self): try: @@ -113,9 +132,78 @@ def setUp(self): TargetAdminTestCase.setUp(self) self.flags(iscsi_helper='ietadm') self.script_template = "\n".join([ - 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s', - 'ietadm --op new --tid=%(tid)s --lun=%(lun)s ' - '--params Path=%(path)s,Type=fileio', - 'ietadm --op show --tid=%(tid)s', - 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s', - 'ietadm --op delete --tid=%(tid)s']) + 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s', + 'ietadm --op new --tid=%(tid)s --lun=%(lun)s ' + '--params Path=%(path)s,Type=fileio', + 'ietadm --op show --tid=%(tid)s', + 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s', + 'ietadm --op delete --tid=%(tid)s']) + + +class IetAdmBlockIOTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(IetAdmBlockIOTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.flags(iscsi_helper='ietadm') + self.flags(iscsi_iotype='blockio') + self.script_template = "\n".join([ + 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s', + 'ietadm --op new --tid=%(tid)s --lun=%(lun)s ' + '--params Path=%(path)s,Type=blockio', + 'ietadm --op show --tid=%(tid)s', + 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s', + 'ietadm --op delete --tid=%(tid)s']) + + +class IetAdmFileIOTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(IetAdmFileIOTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.flags(iscsi_helper='ietadm') + self.flags(iscsi_iotype='fileio') + self.script_template = "\n".join([ + 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s', + 'ietadm --op new --tid=%(tid)s --lun=%(lun)s ' + '--params Path=%(path)s,Type=fileio', + 'ietadm --op show --tid=%(tid)s', + 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s', + 'ietadm --op delete --tid=%(tid)s']) + + +class IetAdmAutoIOTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(IetAdmAutoIOTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.stubs.Set(iscsi.IetAdm, '_is_block', lambda a, b: True) + self.flags(iscsi_helper='ietadm') + self.flags(iscsi_iotype='auto') + self.script_template = "\n".join([ + 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s', + 'ietadm --op new --tid=%(tid)s --lun=%(lun)s ' + '--params Path=%(path)s,Type=blockio', + 'ietadm --op show --tid=%(tid)s', + 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s', + 'ietadm --op delete --tid=%(tid)s']) + + +class LioAdmTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(LioAdmTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.persist_tempdir = tempfile.mkdtemp() + self.flags(iscsi_helper='lioadm') + self.script_template = "\n".join([ + 'cinder-rtstool create ' + '%(path)s %(target_name)s test_id test_pass', + 'cinder-rtstool delete %(target_name)s']) + + +class ISERTgtAdmTestCase(TgtAdmTestCase): + + def setUp(self): + super(ISERTgtAdmTestCase, self).setUp() + self.flags(iscsi_helper='iseradm') diff --git a/cinder/tests/test_migrations.py b/cinder/tests/test_migrations.py index 913baae593..26ebf68cdf 100644 --- a/cinder/tests/test_migrations.py +++ b/cinder/tests/test_migrations.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010-2011 OpenStack, LLC +# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -25,15 +24,17 @@ """ import ConfigParser -import commands import os +import subprocess import urlparse +import uuid from migrate.versioning import repository import sqlalchemy +import testtools -import cinder.db.sqlalchemy.migrate_repo import cinder.db.migration as migration +import cinder.db.sqlalchemy.migrate_repo from cinder.db.sqlalchemy.migration import versioning_api as migration_api from cinder.openstack.common import log as logging from cinder import test @@ -41,44 +42,72 @@ LOG = logging.getLogger('cinder.tests.test_migrations') -def _mysql_get_connect_string(user="openstack_citest", - passwd="openstack_citest", - database="openstack_citest"): - """ - Try to get a connection with a very specfic set of values, if we get - these then we'll run the mysql tests, otherwise they are skipped +def _get_connect_string(backend, + user="openstack_citest", + passwd="openstack_citest", + database="openstack_citest"): + """Return connect string. + + Try to get a connection with a very specific set of values, if we get + these then we'll run the tests, otherwise they are skipped. """ - return "mysql://%(user)s:%(passwd)s@localhost/%(database)s" % locals() + if backend == "postgres": + backend = "postgresql+psycopg2" + + return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % + {'backend': backend, 'user': user, 'passwd': passwd, + 'database': database}) -def _is_mysql_avail(user="openstack_citest", - passwd="openstack_citest", - database="openstack_citest"): +def _is_mysql_avail(**kwargs): + return _is_backend_avail('mysql', **kwargs) + + +def _is_backend_avail(backend, + user="openstack_citest", + passwd="openstack_citest", + database="openstack_citest"): try: - connect_uri = _mysql_get_connect_string( - user=user, passwd=passwd, database=database) + if backend == "mysql": + connect_uri = _get_connect_string("mysql", user=user, + passwd=passwd, database=database) + elif backend == "postgres": + connect_uri = _get_connect_string("postgres", user=user, + passwd=passwd, database=database) engine = sqlalchemy.create_engine(connect_uri) connection = engine.connect() except Exception: - # intential catch all to handle exceptions even if we don't - # have mysql code loaded at all. + # intentionally catch all to handle exceptions even if we don't + # have any backend code loaded. + LOG.exception("Backend %s is not available", backend) return False else: connection.close() + engine.dispose() return True def _have_mysql(): present = os.environ.get('NOVA_TEST_MYSQL_PRESENT') if present is None: - return _is_mysql_avail() + return _is_backend_avail('mysql') return present.lower() in ('', 'true') +def get_table(engine, name): + """Returns an sqlalchemy table dynamically from db. + + Needed because the models don't work for us in migrations + as models will be far out of sync with the current data. + """ + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + return sqlalchemy.Table(name, metadata, autoload=True) + + class TestMigrations(test.TestCase): - """Test sqlalchemy-migrate migrations""" + """Test sqlalchemy-migrate migrations.""" - TEST_DATABASES = {} DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), 'test_migrations.conf') # Test machines can set the CINDER_TEST_MIGRATIONS_CONF variable @@ -87,26 +116,27 @@ class TestMigrations(test.TestCase): DEFAULT_CONFIG_FILE) MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__ REPOSITORY = repository.Repository( - os.path.abspath(os.path.dirname(MIGRATE_FILE))) + os.path.abspath(os.path.dirname(MIGRATE_FILE))) def setUp(self): super(TestMigrations, self).setUp() self.snake_walk = False + self.test_databases = {} # Load test databases from the config file. Only do this # once. No need to re-run this on each test... LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH) - if not TestMigrations.TEST_DATABASES: + if not self.test_databases: if os.path.exists(TestMigrations.CONFIG_FILE_PATH): cp = ConfigParser.RawConfigParser() try: cp.read(TestMigrations.CONFIG_FILE_PATH) defaults = cp.defaults() for key, value in defaults.items(): - TestMigrations.TEST_DATABASES[key] = value + self.test_databases[key] = value self.snake_walk = cp.getboolean('walk_style', 'snake_walk') - except ConfigParser.ParsingError, e: + except ConfigParser.ParsingError as e: self.fail("Failed to read test_migrations.conf config " "file. Got error: %s" % e) else: @@ -114,9 +144,17 @@ def setUp(self): "file.") self.engines = {} - for key, value in TestMigrations.TEST_DATABASES.items(): + for key, value in self.test_databases.items(): self.engines[key] = sqlalchemy.create_engine(value) + # Set-up a dict of types for those column types that + # are not uniform for all databases. + self.bool_type = {} + for (key, engine) in self.engines.items(): + self.bool_type[engine.name] = sqlalchemy.types.BOOLEAN + if engine.name == 'mysql': + self.bool_type[engine.name] = sqlalchemy.dialects.mysql.TINYINT + # We start each test case with a completely blank slate. self._reset_databases() @@ -126,22 +164,20 @@ def tearDown(self): # and recreate it, which ensures that we have no side-effects # from the tests self._reset_databases() - - # remove these from the list so they aren't used in the migration tests - if "mysqlcitest" in self.engines: - del self.engines["mysqlcitest"] - if "mysqlcitest" in TestMigrations.TEST_DATABASES: - del TestMigrations.TEST_DATABASES["mysqlcitest"] super(TestMigrations, self).tearDown() def _reset_databases(self): def execute_cmd(cmd=None): - status, output = commands.getstatusoutput(cmd) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, shell=True) + output = proc.communicate()[0] LOG.debug(output) - self.assertEqual(0, status) + self.assertEqual(0, proc.returncode) + for key, engine in self.engines.items(): - conn_string = TestMigrations.TEST_DATABASES[key] + conn_string = self.test_databases[key] conn_pieces = urlparse.urlparse(conn_string) + engine.dispose() if conn_string.startswith('sqlite'): # We can just delete the SQLite database, which is # the easiest and cleanest solution @@ -163,45 +199,47 @@ def execute_cmd(cmd=None): if len(auth_pieces) > 1: if auth_pieces[1].strip(): password = "-p\"%s\"" % auth_pieces[1] - sql = ("drop database if exists %(database)s; " - "create database %(database)s;") % locals() + sql = ("drop database if exists %(database)s; create database " + "%(database)s;") % {'database': database} cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s " - "-e \"%(sql)s\"") % locals() + "-e \"%(sql)s\"") % {'user': user, 'password': password, + 'host': host, 'sql': sql} execute_cmd(cmd) elif conn_string.startswith('postgresql'): database = conn_pieces.path.strip('/') loc_pieces = conn_pieces.netloc.split('@') host = loc_pieces[1] + auth_pieces = loc_pieces[0].split(':') user = auth_pieces[0] password = "" if len(auth_pieces) > 1: - if auth_pieces[1].strip(): - password = auth_pieces[1] - cmd = ("touch ~/.pgpass;" - "chmod 0600 ~/.pgpass;" - "sed -i -e" - "'1{s/^.*$/\*:\*:\*:%(user)s:%(password)s/};" - "1!d' ~/.pgpass") % locals() - execute_cmd(cmd) - sql = ("UPDATE pg_catalog.pg_database SET datallowconn=false " - "WHERE datname='%(database)s';") % locals() - cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() - execute_cmd(cmd) - sql = ("SELECT pg_catalog.pg_terminate_backend(procpid) " - "FROM pg_catalog.pg_stat_activity " - "WHERE datname='%(database)s';") % locals() - cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() - execute_cmd(cmd) - sql = ("drop database if exists %(database)s;") % locals() - cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() - execute_cmd(cmd) - sql = ("create database %(database)s;") % locals() - cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() - execute_cmd(cmd) + password = auth_pieces[1].strip() + # note(krtaylor): File creation problems with tests in + # venv using .pgpass authentication, changed to + # PGPASSWORD environment variable which is no longer + # planned to be deprecated + os.environ['PGPASSWORD'] = password + os.environ['PGUSER'] = user + # note(boris-42): We must create and drop database, we can't + # drop database which we have connected to, so for such + # operations there is a special database template1. + sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" + " '%(sql)s' -d template1") + sql = ("drop database if exists %(database)s;") % {'database': + database} + droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql} + execute_cmd(droptable) + sql = ("create database %(database)s;") % {'database': + database} + createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql} + execute_cmd(createtable) + os.unsetenv('PGPASSWORD') + os.unsetenv('PGUSER') def test_walk_versions(self): - """ + """Test walk versions. + Walks all version scripts for each tested database, ensuring that there are no errors in the version scripts for each engine """ @@ -209,37 +247,37 @@ def test_walk_versions(self): self._walk_versions(engine, self.snake_walk) def test_mysql_connect_fail(self): - """ + """Test for mysql connection failure. + Test that we can trigger a mysql connection failure and we fail gracefully to ensure we don't break people without mysql """ if _is_mysql_avail(user="openstack_cifail"): self.fail("Shouldn't have connected") - @test.skip_unless(_have_mysql(), "mysql not available") + @testtools.skipUnless(_have_mysql(), "mysql not available") def test_mysql_innodb(self): - """ - Test that table creation on mysql only builds InnoDB tables - """ + """Test that table creation on mysql only builds InnoDB tables.""" # add this to the global lists to make reset work with it, it's removed # automaticaly in tearDown so no need to clean it up here. - connect_string = _mysql_get_connect_string() + connect_string = _get_connect_string('mysql') engine = sqlalchemy.create_engine(connect_string) self.engines["mysqlcitest"] = engine - TestMigrations.TEST_DATABASES["mysqlcitest"] = connect_string + self.test_databases["mysqlcitest"] = connect_string # build a fully populated mysql database with all the tables self._reset_databases() self._walk_versions(engine, False, False) - uri = _mysql_get_connect_string(database="information_schema") + uri = _get_connect_string('mysql', database="information_schema") connection = sqlalchemy.create_engine(uri).connect() # sanity check total = connection.execute("SELECT count(*) " "from information_schema.TABLES " "where TABLE_SCHEMA='openstack_citest'") - self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") + self.assertGreater(total.scalar(), 0, + msg="No tables found. Wrong schema?") noninnodb = connection.execute("SELECT count(*) " "from information_schema.TABLES " @@ -249,6 +287,29 @@ def test_mysql_innodb(self): count = noninnodb.scalar() self.assertEqual(count, 0, "%d non InnoDB tables created" % count) + def test_postgresql_connect_fail(self): + """Test connection failure on PostgrSQL. + + Test that we can trigger a postgres connection failure and we fail + gracefully to ensure we don't break people without postgres. + """ + if _is_backend_avail('postgres', user="openstack_cifail"): + self.fail("Shouldn't have connected") + + @testtools.skipUnless(_is_backend_avail('postgres'), + "postgresql not available") + def test_postgresql_opportunistically(self): + # add this to the global lists to make reset work with it, it's removed + # automatically in tearDown so no need to clean it up here. + connect_string = _get_connect_string("postgres") + engine = sqlalchemy.create_engine(connect_string) + self.engines["postgresqlcitest"] = engine + self.test_databases["postgresqlcitest"] = connect_string + + # build a fully populated postgresql database with all the tables + self._reset_databases() + self._walk_versions(engine, False, False) + def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data @@ -256,21 +317,22 @@ def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): # upgrades successfully. # Place the database under version control - migration_api.version_control(engine, TestMigrations.REPOSITORY, - migration.INIT_VERSION) - self.assertEqual(migration.INIT_VERSION, - migration_api.db_version(engine, - TestMigrations.REPOSITORY)) + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + self.assertEqual(migration.db_initial_version(), + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) migration_api.upgrade(engine, TestMigrations.REPOSITORY, - migration.INIT_VERSION + 1) + migration.db_initial_version() + 1) LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest) - for version in xrange(migration.INIT_VERSION + 2, - TestMigrations.REPOSITORY.latest + 1): + for version in xrange(migration.db_initial_version() + 2, + TestMigrations.REPOSITORY.latest + 1): # upgrade -> downgrade -> upgrade - self._migrate_up(engine, version) + self._migrate_up(engine, version, with_data=True) if snake_walk: self._migrate_down(engine, version - 1) self._migrate_up(engine, version) @@ -279,7 +341,7 @@ def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): # Now walk it back down to 0 from the latest, testing # the downgrade paths. for version in reversed( - xrange(migration.INIT_VERSION + 1, + xrange(migration.db_initial_version() + 1, TestMigrations.REPOSITORY.latest)): # downgrade -> upgrade -> downgrade self._migrate_down(engine, version) @@ -295,10 +357,716 @@ def _migrate_down(self, engine, version): migration_api.db_version(engine, TestMigrations.REPOSITORY)) - def _migrate_up(self, engine, version): - migration_api.upgrade(engine, - TestMigrations.REPOSITORY, - version) - self.assertEqual(version, + def _migrate_up(self, engine, version, with_data=False): + """Migrate up to a new version of the db. + + We allow for data insertion and post checks at every + migration version with special _prerun_### and + _check_### functions in the main test. + """ + # NOTE(sdague): try block is here because it's impossible to debug + # where a failed data migration happens otherwise + try: + if with_data: + data = None + prerun = getattr(self, "_prerun_%3.3d" % version, None) + if prerun: + data = prerun(engine) + + migration_api.upgrade(engine, + TestMigrations.REPOSITORY, + version) + self.assertEqual( + version, migration_api.db_version(engine, TestMigrations.REPOSITORY)) + + if with_data: + check = getattr(self, "_check_%3.3d" % version, None) + if check: + check(engine, data) + except Exception: + LOG.error("Failed to migrate to version %s on engine %s" % + (version, engine)) + raise + + # migration 004 - change volume types to UUID + def _prerun_004(self, engine): + data = { + 'volumes': [{'id': str(uuid.uuid4()), 'host': 'test1', + 'volume_type_id': 1}, + {'id': str(uuid.uuid4()), 'host': 'test2', + 'volume_type_id': 1}, + {'id': str(uuid.uuid4()), 'host': 'test3', + 'volume_type_id': 3}, + ], + 'volume_types': [{'name': 'vtype1'}, + {'name': 'vtype2'}, + {'name': 'vtype3'}, + ], + 'volume_type_extra_specs': [{'volume_type_id': 1, + 'key': 'v1', + 'value': 'hotep', + }, + {'volume_type_id': 1, + 'key': 'v2', + 'value': 'bending rodrigez', + }, + {'volume_type_id': 2, + 'key': 'v3', + 'value': 'bending rodrigez', + }, + ]} + + volume_types = get_table(engine, 'volume_types') + for vtype in data['volume_types']: + r = volume_types.insert().values(vtype).execute() + vtype['id'] = r.inserted_primary_key[0] + + volume_type_es = get_table(engine, 'volume_type_extra_specs') + for vtes in data['volume_type_extra_specs']: + r = volume_type_es.insert().values(vtes).execute() + vtes['id'] = r.inserted_primary_key[0] + + volumes = get_table(engine, 'volumes') + for vol in data['volumes']: + r = volumes.insert().values(vol).execute() + vol['id'] = r.inserted_primary_key[0] + + return data + + def _check_004(self, engine, data): + volumes = get_table(engine, 'volumes') + v1 = volumes.select(volumes.c.id == + data['volumes'][0]['id'] + ).execute().first() + v2 = volumes.select(volumes.c.id == + data['volumes'][1]['id'] + ).execute().first() + v3 = volumes.select(volumes.c.id == + data['volumes'][2]['id'] + ).execute().first() + + volume_types = get_table(engine, 'volume_types') + vt1 = volume_types.select(volume_types.c.name == + data['volume_types'][0]['name'] + ).execute().first() + vt2 = volume_types.select(volume_types.c.name == + data['volume_types'][1]['name'] + ).execute().first() + vt3 = volume_types.select(volume_types.c.name == + data['volume_types'][2]['name'] + ).execute().first() + + vtes = get_table(engine, 'volume_type_extra_specs') + vtes1 = vtes.select(vtes.c.key == + data['volume_type_extra_specs'][0]['key'] + ).execute().first() + vtes2 = vtes.select(vtes.c.key == + data['volume_type_extra_specs'][1]['key'] + ).execute().first() + vtes3 = vtes.select(vtes.c.key == + data['volume_type_extra_specs'][2]['key'] + ).execute().first() + + self.assertEqual(v1['volume_type_id'], vt1['id']) + self.assertEqual(v2['volume_type_id'], vt1['id']) + self.assertEqual(v3['volume_type_id'], vt3['id']) + + self.assertEqual(vtes1['volume_type_id'], vt1['id']) + self.assertEqual(vtes2['volume_type_id'], vt1['id']) + self.assertEqual(vtes3['volume_type_id'], vt2['id']) + + def test_migration_005(self): + """Test that adding source_volid column works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 4) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 5) + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + self.assertIsInstance(volumes.c.source_volid.type, + sqlalchemy.types.VARCHAR) + + def _metadatas(self, upgrade_to, downgrade_to=None): + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, + TestMigrations.REPOSITORY, + upgrade_to) + + if downgrade_to is not None: + migration_api.downgrade( + engine, TestMigrations.REPOSITORY, downgrade_to) + + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + yield metadata + + def metadatas_upgraded_to(self, revision): + return self._metadatas(revision) + + def metadatas_downgraded_from(self, revision): + return self._metadatas(revision, revision - 1) + + def test_upgrade_006_adds_provider_location(self): + for metadata in self.metadatas_upgraded_to(6): + snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True) + self.assertIsInstance(snapshots.c.provider_location.type, + sqlalchemy.types.VARCHAR) + + def test_downgrade_006_removes_provider_location(self): + for metadata in self.metadatas_downgraded_from(6): + snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True) + + self.assertNotIn('provider_location', snapshots.c) + + def test_upgrade_007_adds_fk(self): + for metadata in self.metadatas_upgraded_to(7): + snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True) + volumes = sqlalchemy.Table('volumes', metadata, autoload=True) + + fkey, = snapshots.c.volume_id.foreign_keys + + self.assertEqual(volumes.c.id, fkey.column) + + def test_downgrade_007_removes_fk(self): + for metadata in self.metadatas_downgraded_from(7): + snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True) + + self.assertEqual(0, len(snapshots.c.volume_id.foreign_keys)) + + def test_migration_008(self): + """Test that adding and removing the backups table works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 7) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 8) + + self.assertTrue(engine.dialect.has_table(engine.connect(), + "backups")) + backups = sqlalchemy.Table('backups', + metadata, + autoload=True) + + self.assertIsInstance(backups.c.created_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(backups.c.updated_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(backups.c.deleted_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(backups.c.deleted.type, + self.bool_type[engine.name]) + self.assertIsInstance(backups.c.id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.volume_id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.user_id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.project_id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.host.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.availability_zone.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.display_name.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.display_description.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.container.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.status.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.fail_reason.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.service_metadata.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.service.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(backups.c.size.type, + sqlalchemy.types.INTEGER) + self.assertIsInstance(backups.c.object_count.type, + sqlalchemy.types.INTEGER) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 7) + + self.assertFalse(engine.dialect.has_table(engine.connect(), + "backups")) + + def test_migration_009(self): + """Test adding snapshot_metadata table works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 8) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 9) + + self.assertTrue(engine.dialect.has_table(engine.connect(), + "snapshot_metadata")) + snapshot_metadata = sqlalchemy.Table('snapshot_metadata', + metadata, + autoload=True) + + self.assertIsInstance(snapshot_metadata.c.created_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(snapshot_metadata.c.updated_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(snapshot_metadata.c.deleted_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(snapshot_metadata.c.deleted.type, + self.bool_type[engine.name]) + self.assertIsInstance(snapshot_metadata.c.deleted.type, + self.bool_type[engine.name]) + self.assertIsInstance(snapshot_metadata.c.id.type, + sqlalchemy.types.INTEGER) + self.assertIsInstance(snapshot_metadata.c.snapshot_id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(snapshot_metadata.c.key.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(snapshot_metadata.c.value.type, + sqlalchemy.types.VARCHAR) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 8) + + self.assertFalse(engine.dialect.has_table(engine.connect(), + "snapshot_metadata")) + + def test_migration_010(self): + """Test adding transfers table works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 9) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 10) + self.assertTrue(engine.dialect.has_table(engine.connect(), + "transfers")) + transfers = sqlalchemy.Table('transfers', + metadata, + autoload=True) + + self.assertIsInstance(transfers.c.created_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(transfers.c.updated_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(transfers.c.deleted_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(transfers.c.deleted.type, + self.bool_type[engine.name]) + self.assertIsInstance(transfers.c.id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(transfers.c.volume_id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(transfers.c.display_name.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(transfers.c.salt.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(transfers.c.crypt_hash.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(transfers.c.expires_at.type, + sqlalchemy.types.DATETIME) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 9) + + self.assertFalse(engine.dialect.has_table(engine.connect(), + "transfers")) + + def test_migration_011(self): + """Test adding transfers table works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 10) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + volumes_v10 = sqlalchemy.Table('volumes', + metadata, + autoload=True) + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 11) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + self.assertTrue(engine.dialect.has_table(engine.connect(), + "volumes")) + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + + # Make sure we didn't miss any columns in the upgrade + for column in volumes_v10.c: + self.assertTrue(volumes.c.__contains__(column.name)) + + self.assertIsInstance(volumes.c.bootable.type, + self.bool_type[engine.name]) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 10) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + self.assertNotIn('bootable', volumes.c) + + # Make sure we put all the columns back + for column in volumes_v10.c: + self.assertTrue(volumes.c.__contains__(column.name)) + + def test_migration_012(self): + """Test that adding attached_host column works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 11) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 12) + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + self.assertIsInstance(volumes.c.attached_host.type, + sqlalchemy.types.VARCHAR) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 11) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + self.assertNotIn('attached_host', volumes.c) + + def test_migration_013(self): + """Test that adding provider_geometry column works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 12) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 13) + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + self.assertIsInstance(volumes.c.provider_geometry.type, + sqlalchemy.types.VARCHAR) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 12) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + self.assertNotIn('provider_geometry', volumes.c) + + def test_migration_014(self): + """Test that adding _name_id column works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 13) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 14) + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + self.assertIsInstance(volumes.c._name_id.type, + sqlalchemy.types.VARCHAR) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 13) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + self.assertNotIn('_name_id', volumes.c) + + def test_migration_015(self): + """Test removing migrations table works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 15) + + self.assertFalse(engine.dialect.has_table(engine.connect(), + "migrations")) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 14) + + self.assertTrue(engine.dialect.has_table(engine.connect(), + "migrations")) + + def test_migration_016(self): + """Test that dropping xen storage manager tables works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 15) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 16) + self.assertFalse(engine.dialect.has_table(engine.connect(), + 'sm_flavors')) + self.assertFalse(engine.dialect.has_table(engine.connect(), + 'sm_backend_config')) + self.assertFalse(engine.dialect.has_table(engine.connect(), + 'sm_volume')) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 15) + self.assertTrue(engine.dialect.has_table(engine.connect(), + 'sm_flavors')) + self.assertTrue(engine.dialect.has_table(engine.connect(), + 'sm_backend_config')) + self.assertTrue(engine.dialect.has_table(engine.connect(), + 'sm_volume')) + + def test_migration_017(self): + """Test that added encryption information works correctly.""" + + # upgrade schema + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 16) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 17) + + # encryption key UUID + volumes = sqlalchemy.Table('volumes', metadata, autoload=True) + self.assertIn('encryption_key_id', volumes.c) + self.assertIsInstance(volumes.c.encryption_key_id.type, + sqlalchemy.types.VARCHAR) + + snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True) + self.assertIn('encryption_key_id', snapshots.c) + self.assertIsInstance(snapshots.c.encryption_key_id.type, + sqlalchemy.types.VARCHAR) + self.assertIn('volume_type_id', snapshots.c) + self.assertIsInstance(snapshots.c.volume_type_id.type, + sqlalchemy.types.VARCHAR) + + # encryption types table + encryption = sqlalchemy.Table('encryption', + metadata, + autoload=True) + self.assertIsInstance(encryption.c.volume_type_id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(encryption.c.cipher.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(encryption.c.key_size.type, + sqlalchemy.types.INTEGER) + self.assertIsInstance(encryption.c.provider.type, + sqlalchemy.types.VARCHAR) + + # downgrade schema + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 16) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + volumes = sqlalchemy.Table('volumes', metadata, autoload=True) + self.assertNotIn('encryption_key_id', volumes.c) + + snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True) + self.assertNotIn('encryption_key_id', snapshots.c) + + self.assertFalse(engine.dialect.has_table(engine.connect(), + 'encryption')) + + def test_migration_018(self): + """Test that added qos_specs table works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 17) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 18) + self.assertTrue(engine.dialect.has_table( + engine.connect(), "quality_of_service_specs")) + qos_specs = sqlalchemy.Table('quality_of_service_specs', + metadata, + autoload=True) + self.assertIsInstance(qos_specs.c.created_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(qos_specs.c.updated_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(qos_specs.c.deleted_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(qos_specs.c.deleted.type, + self.bool_type[engine.name]) + self.assertIsInstance(qos_specs.c.id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(qos_specs.c.specs_id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(qos_specs.c.key.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(qos_specs.c.value.type, + sqlalchemy.types.VARCHAR) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 17) + + self.assertFalse(engine.dialect.has_table( + engine.connect(), "quality_of_service_specs")) + + def test_migration_019(self): + """Test that adding migration_status column works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 18) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 19) + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + self.assertIsInstance(volumes.c.migration_status.type, + sqlalchemy.types.VARCHAR) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 18) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + volumes = sqlalchemy.Table('volumes', + metadata, + autoload=True) + self.assertNotIn('migration_status', volumes.c) + + def test_migration_020(self): + """Test adding volume_admin_metadata table works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 19) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 20) + + self.assertTrue(engine.dialect.has_table(engine.connect(), + "volume_admin_metadata")) + volume_admin_metadata = sqlalchemy.Table('volume_admin_metadata', + metadata, + autoload=True) + + self.assertIsInstance(volume_admin_metadata.c.created_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(volume_admin_metadata.c.updated_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(volume_admin_metadata.c.deleted_at.type, + sqlalchemy.types.DATETIME) + self.assertIsInstance(volume_admin_metadata.c.deleted.type, + self.bool_type[engine.name]) + self.assertIsInstance(volume_admin_metadata.c.id.type, + sqlalchemy.types.INTEGER) + self.assertIsInstance(volume_admin_metadata.c.volume_id.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(volume_admin_metadata.c.key.type, + sqlalchemy.types.VARCHAR) + self.assertIsInstance(volume_admin_metadata.c.value.type, + sqlalchemy.types.VARCHAR) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 19) + + self.assertFalse(engine.dialect.has_table(engine.connect(), + "volume_admin_metadata")) + + def test_migration_021(self): + """Test adding default data for quota classes works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 20) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 21) + + quota_class_metadata = sqlalchemy.Table('quota_classes', + metadata, + autoload=True) + + num_defaults = quota_class_metadata.count().\ + where(quota_class_metadata.c.class_name == 'default').\ + execute().scalar() + + self.assertEqual(3, num_defaults) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 20) + + # Defaults should not be deleted during downgrade + num_defaults = quota_class_metadata.count().\ + where(quota_class_metadata.c.class_name == 'default').\ + execute().scalar() + + self.assertEqual(3, num_defaults) + + def test_migration_022(self): + """Test that adding disabled_reason column works correctly.""" + for (key, engine) in self.engines.items(): + migration_api.version_control(engine, + TestMigrations.REPOSITORY, + migration.db_initial_version()) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 21) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 22) + services = sqlalchemy.Table('services', + metadata, + autoload=True) + self.assertIsInstance(services.c.disabled_reason.type, + sqlalchemy.types.VARCHAR) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 21) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + services = sqlalchemy.Table('services', + metadata, + autoload=True) + self.assertNotIn('disabled_reason', services.c) diff --git a/cinder/tests/test_misc.py b/cinder/tests/test_misc.py index 7dd24ca7c9..548b9646e5 100644 --- a/cinder/tests/test_misc.py +++ b/cinder/tests/test_misc.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 OpenStack LLC +# Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -28,6 +27,8 @@ def _raise_exc(exc): raise exc() def test_exceptions_raise(self): + # NOTE(dprince): disable format errors since we are not passing kwargs + self.flags(fatal_exception_format_errors=False) for name in dir(exception): exc = getattr(exception, name) if isinstance(exc, type): @@ -56,4 +57,4 @@ def test_all_migrations_have_downgrade(self): helpful_msg = (_("The following migrations are missing a downgrade:" "\n\t%s") % '\n\t'.join(sorted(missing_downgrade))) - self.assert_(not missing_downgrade, helpful_msg) + self.assertFalse(missing_downgrade, msg=helpful_msg) diff --git a/cinder/tests/test_netapp.py b/cinder/tests/test_netapp.py index cbf0e57f3a..bad9fc85f7 100644 --- a/cinder/tests/test_netapp.py +++ b/cinder/tests/test_netapp.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 NetApp, Inc. # All Rights Reserved. @@ -21,891 +20,429 @@ import BaseHTTPServer import httplib -import StringIO - from lxml import etree +import StringIO +from cinder import exception from cinder.openstack.common import log as logging from cinder import test -from cinder.volume import netapp +from cinder.volume import configuration as conf +from cinder.volume.drivers.netapp import common +from cinder.volume.drivers.netapp.options import netapp_7mode_opts +from cinder.volume.drivers.netapp.options import netapp_basicauth_opts +from cinder.volume.drivers.netapp.options import netapp_cluster_opts +from cinder.volume.drivers.netapp.options import netapp_connection_opts +from cinder.volume.drivers.netapp.options import netapp_provisioning_opts +from cinder.volume.drivers.netapp.options import netapp_transport_opts +from cinder.volume.drivers.netapp import ssc_utils + LOG = logging.getLogger("cinder.volume.driver") -WSDL_HEADER = """ -""" - -WSDL_TYPES = """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -""" - -WSDL_TRAILER = """ - - -""" - -RESPONSE_PREFIX = """ -""" - -RESPONSE_SUFFIX = """""" - -APIS = ['ApiProxy', 'DatasetListInfoIterStart', 'DatasetListInfoIterNext', - 'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit', - 'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout', - 'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext', - 'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart', - 'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd', - 'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd', - 'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd', - 'StorageServiceDatasetProvision'] - -iter_count = 0 -iter_table = {} - - -class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler): - """HTTP handler that fakes enough stuff to allow the driver to run""" + +def create_configuration(): + configuration = conf.Configuration(None) + configuration.append_config_values(netapp_connection_opts) + configuration.append_config_values(netapp_transport_opts) + configuration.append_config_values(netapp_basicauth_opts) + configuration.append_config_values(netapp_cluster_opts) + configuration.append_config_values(netapp_7mode_opts) + configuration.append_config_values(netapp_provisioning_opts) + return configuration + + +class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): + """HTTP handler that doesn't spam the log.""" + + def log_message(self, format, *args): + pass + + +class FakeHttplibSocket(object): + """A fake socket implementation for httplib.HTTPResponse.""" + def __init__(self, value): + self._rbuffer = StringIO.StringIO(value) + self._wbuffer = StringIO.StringIO('') + oldclose = self._wbuffer.close + + def newclose(): + self.result = self._wbuffer.getvalue() + oldclose() + self._wbuffer.close = newclose + + def makefile(self, mode, _other): + """Returns the socket's internal buffer""" + if mode == 'r' or mode == 'rb': + return self._rbuffer + if mode == 'w' or mode == 'wb': + return self._wbuffer + + +RESPONSE_PREFIX_DIRECT_CMODE = """ +""" + +RESPONSE_PREFIX_DIRECT_7MODE = """ +""" + +RESPONSE_PREFIX_DIRECT = """ +""" + +RESPONSE_SUFFIX_DIRECT = """""" + + +class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler): + """HTTP handler that fakes enough stuff to allow the driver to run.""" def do_GET(s): """Respond to a GET request.""" - if '/dfm.wsdl' != s.path: + if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: s.send_response(404) s.end_headers return s.send_response(200) - s.send_header("Content-Type", "application/wsdl+xml") + s.send_header("Content-Type", "text/xml; charset=utf-8") s.end_headers() out = s.wfile - out.write(WSDL_HEADER) - out.write(WSDL_TYPES) - for api in APIS: - out.write('' % api) - out.write('' % api) - out.write('') - out.write('' % api) - out.write('' % api) - out.write('') - out.write('') - for api in APIS: - out.write('' % api) - out.write('' % api) - out.write('' % api) - out.write('') - out.write('') - out.write('') - out.write('') - for api in APIS: - out.write('' % api) - out.write('' % api) - out.write('') - out.write('') - out.write('') - out.write('') - out.write(WSDL_TRAILER) + out.write('' + '') def do_POST(s): """Respond to a POST request.""" - if '/apis/soap/v1' != s.path: + if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: s.send_response(404) s.end_headers return request_xml = s.rfile.read(int(s.headers['Content-Length'])) - ntap_ns = 'http://www.netapp.com/management/v1' - nsmap = {'env': 'http://schemas.xmlsoap.org/soap/envelope/', - 'na': ntap_ns} root = etree.fromstring(request_xml) - - body = root.xpath('/env:Envelope/env:Body', namespaces=nsmap)[0] - request = body.getchildren()[0] + body = [x for x in root.iterchildren()] + request = body[0] tag = request.tag - if not tag.startswith('{' + ntap_ns + '}'): - s.send_response(500) - s.end_headers - return - api = tag[(2 + len(ntap_ns)):] - global iter_count - global iter_table - if 'DatasetListInfoIterStart' == api: - iter_name = 'dataset_%s' % iter_count - iter_count = iter_count + 1 - iter_table[iter_name] = 0 - body = """ - 1 - %s - """ % iter_name - elif 'DatasetListInfoIterNext' == api: - tags = body.xpath('na:DatasetListInfoIterNext/na:Tag', - namespaces=nsmap) - iter_name = tags[0].text - if iter_table[iter_name]: - body = """ - - 0 - """ + api = etree.QName(tag).localname or tag + if 'lun-get-iter' == api: + tag = \ + FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') + if tag is None: + body = """ + + indeterminate + 512 + 1354536362 + + false + true + + falselinux + + true/vol/navneet/lun1 + 0 + false2FfGI$APyN68 + none20971520 + 0false + 0 + cec1f3d7-3d41-11e2-9cf4-123478563412 + navneetben_vserver + + <lun-get-iter-key-td> + <key-0>ben_vserver</key-0> + <key-1>/vol/navneet/lun2</key-1> + <key-2>navneet</key-2> + <key-3></key-3> + <key-4>lun2</key-4> + </lun-get-iter-key-td> + 1""" else: - iter_table[iter_name] = 1 - body = """ - - - 0 - - - OpenStackProject - testproj - - - OpenStackVolType - - - - OpenStack_testproj - - - 1 - """ - elif 'DatasetListInfoIterEnd' == api: - body = """""" - elif 'DatasetEditBegin' == api: - body = """ - 0 - """ - elif 'DatasetEditCommit' == api: - body = """ - false - - - 0 - - - """ - elif 'DatasetProvisionMember' == api: - body = """""" - elif 'DatasetRemoveMember' == api: - body = """""" - elif 'DfmAbout' == api: - body = """""" - elif 'DpJobProgressEventListIterStart' == api: - iter_name = 'dpjobprogress_%s' % iter_count - iter_count = iter_count + 1 - iter_table[iter_name] = 0 - body = """ - 2 - %s - """ % iter_name - elif 'DpJobProgressEventListIterNext' == api: - tags = body.xpath('na:DpJobProgressEventListIterNext/na:Tag', - namespaces=nsmap) - iter_name = tags[0].text - if iter_table[iter_name]: - body = """""" + body = """ + + indeterminate + 512 + 1354536362 + + false + true + + falselinux + + true/vol/navneet/lun3 + 0 + false2FfGI$APyN68 + + none20971520 + 0false + 0 + cec1f3d7-3d41-11e2-9cf4-123478563412 + navneetben_vserver + + 1""" + elif 'volume-get-iter' == api: + tag = \ + FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') + if tag is None: + body = """ + + iscsi + Openstack + + + 214748364 + + true + + falseonline + + + nfsvol + openstack + + + 247483648 + + true + + falseonline + + + <volume-get-iter-key-td> + <key-0>openstack</key-0> + <key-1>nfsvol</key-1> + </volume-get-iter-key-td> + 2""" else: - iter_table[iter_name] = 1 - name = ('filer:/OpenStack_testproj/volume-00000001/' - 'volume-00000001') - body = """ - - - normal - lun-create - - 0 - %s - - - - normal - job-end - - - 2 - """ % name - elif 'DpJobProgressEventListIterEnd' == api: - body = """""" - elif 'DatasetMemberListInfoIterStart' == api: - iter_name = 'datasetmember_%s' % iter_count - iter_count = iter_count + 1 - iter_table[iter_name] = 0 - body = """ - 1 - %s - """ % iter_name - elif 'DatasetMemberListInfoIterNext' == api: - tags = body.xpath('na:DatasetMemberListInfoIterNext/na:Tag', - namespaces=nsmap) - iter_name = tags[0].text - if iter_table[iter_name]: - body = """ - - 0 - """ + body = """ + + iscsi + Openstack + + + 4147483648 + + true + + falseonline + + + nfsvol + openstack + + + 8147483648 + + true + + falseonline + + + 2""" + elif 'lun-create-by-size' == api: + body = """ + 22020096""" + elif 'lun-destroy' == api: + body = """""" + elif 'igroup-get-iter' == api: + init_found = True + query = FakeDirectCMODEServerHandler._get_child_by_name(request, + 'query') + if query is not None: + igroup_info = FakeDirectCMODEServerHandler._get_child_by_name( + query, 'initiator-group-info') + if igroup_info is not None: + inits = FakeDirectCMODEServerHandler._get_child_by_name( + igroup_info, 'initiators') + if inits is not None: + init_info = \ + FakeDirectCMODEServerHandler._get_child_by_name( + inits, 'initiator-info') + init_name = \ + FakeDirectCMODEServerHandler._get_child_content( + init_info, + 'initiator-name') + if init_name == 'iqn.1993-08.org.debian:01:10': + init_found = True + else: + init_found = False + if init_found: + tag = \ + FakeDirectCMODEServerHandler._get_child_by_name( + request, 'tag') + if tag is None: + body = """ + + openstack-01f5297b-00f7-4170-bf30-69b1314b2118 + + windows + iscsi + + + iqn.1993-08.org.debian:01:10 + + openstack + + <igroup-get-iter-key-td> + <key-0>openstack</key-0> + <key-1> + openstack-01f5297b-00f7-4170-bf30-69b1314b2118< + /key-1> + </igroup-get-iter-key-td> + 1""" + else: + body = """ + + openstack-01f5297b-00f7-4170-bf30-69b1314b2118 + + linux + iscsi + + + iqn.1993-08.org.debian:01:10 + + openstack + 1""" else: - iter_table[iter_name] = 1 - name = ('filer:/OpenStack_testproj/volume-00000001/' - 'volume-00000001') - body = """ - - - 0 - %s - - - 1 - """ % name - elif 'DatasetMemberListInfoIterEnd' == api: - body = """""" - elif 'HostListInfoIterStart' == api: - body = """ - 1 - host - """ - elif 'HostListInfoIterNext' == api: - body = """ - - - 1.2.3.4 - 0 - filer - - - 1 - """ - elif 'HostListInfoIterEnd' == api: - body = """""" - elif 'LunListInfoIterStart' == api: - body = """ - 1 - lun - """ - elif 'LunListInfoIterNext' == api: - path = 'OpenStack_testproj/volume-00000001/volume-00000001' - body = """ - - - 0 - %s - - - 1 - """ % path - elif 'LunListInfoIterEnd' == api: - body = """""" - elif 'ApiProxy' == api: - names = body.xpath('na:ApiProxy/na:Request/na:Name', - namespaces=nsmap) - proxy = names[0].text - if 'igroup-list-info' == proxy: - igroup = 'openstack-iqn.1993-08.org.debian:01:23456789' - initiator = 'iqn.1993-08.org.debian:01:23456789' - proxy_body = """ - - %s - iscsi - linux - - - %s - - - - """ % (igroup, initiator) - elif 'igroup-create' == proxy: - proxy_body = '' - elif 'igroup-add' == proxy: - proxy_body = '' - elif 'lun-map-list-info' == proxy: - proxy_body = '' - elif 'lun-map' == proxy: - proxy_body = '0' - elif 'lun-unmap' == proxy: - proxy_body = '' - elif 'iscsi-portal-list-info' == proxy: - proxy_body = """ - - 1.2.3.4 - 3260 - 1000 - - """ - elif 'iscsi-node-get-name' == proxy: - target = 'iqn.1992-08.com.netapp:sn.111111111' - proxy_body = '%s' % target + body = """ + 0 + """ + elif 'lun-map-get-iter' == api: + tag = \ + FakeDirectCMODEServerHandler._get_child_by_name(request, 'tag') + if tag is None: + body = """ + + openstack-44c5e7e1-3306-4800-9623-259e57d56a83 + + 948ae304-06e9-11e2 + 0 + 5587e563-06e9-11e2-9cf4-123478563412 + /vol/openvol/lun1 + openstack + + + <lun-map-get-iter-key-td> + <key-0>openstack</key-0> + <key-1>openstack-01f5297b-00f7-4170-bf30-69b1314b2118< + /key-1> + </lun-map-get-iter-key-td> + + 1 + """ else: - # Unknown proxy API + body = """ + + openstack-44c5e7e1-3306-4800-9623-259e57d56a83 + + 948ae304-06e9-11e2 + 0 + 5587e563-06e9-11e2-9cf4-123478563412 + /vol/openvol/lun1 + openstack + 1 + """ + elif 'lun-map' == api: + body = """1 + + """ + elif 'iscsi-service-get-iter' == api: + body = """ + + openstack + true + iqn.1992-08.com.netapp:sn.fa9:vs.105 + openstack + 1""" + elif 'iscsi-interface-get-iter' == api: + body = """ + + fas3170rre-cmode-01 + e1b-1165 + + iscsi_data_if + 10.63.165.216 + 3260true + + 5 + iscsi_data_if + 1038 + openstack + + 1""" + elif 'igroup-create' == api: + body = """""" + elif 'igroup-add' == api: + body = """""" + elif 'clone-create' == api: + body = """""" + elif 'lun-unmap' == api: + body = """""" + elif 'system-get-ontapi-version' == api: + body = """ + 1 + 19 + """ + elif 'vserver-get-iter' == api: + body = """ + + vserver + node + + + 1""" + elif 'ems-autosupport-log' == api: + body = """""" + elif 'lun-resize' == api: + body = """""" + elif 'lun-get-geometry' == api: + body = """ + 1 + 2 + 8 + 2 + 4 + 5 + """ + elif 'volume-options-list-info' == api: + body = """ + + + + """ + elif 'lun-move' == api: + body = """""" + else: + # Unknown API s.send_response(500) s.end_headers return - api = api + ':' + proxy - proxy_header = '' - proxy_trailer = """passed - """ - body = proxy_header + proxy_body + proxy_trailer - else: - # Unknown API - s.send_response(500) - s.end_headers - return s.send_response(200) s.send_header("Content-Type", "text/xml; charset=utf-8") s.end_headers() - s.wfile.write(RESPONSE_PREFIX) + s.wfile.write(RESPONSE_PREFIX_DIRECT_CMODE) + s.wfile.write(RESPONSE_PREFIX_DIRECT) s.wfile.write(body) - s.wfile.write(RESPONSE_SUFFIX) + s.wfile.write(RESPONSE_SUFFIX_DIRECT) + @staticmethod + def _get_child_by_name(self, name): + for child in self.iterchildren(): + if child.tag == name or etree.QName(child.tag).localname == name: + return child + return None -class FakeHttplibSocket(object): - """A fake socket implementation for httplib.HTTPResponse""" - def __init__(self, value): - self._rbuffer = StringIO.StringIO(value) - self._wbuffer = StringIO.StringIO('') - oldclose = self._wbuffer.close - - def newclose(): - self.result = self._wbuffer.getvalue() - oldclose() - self._wbuffer.close = newclose - - def makefile(self, mode, _other): - """Returns the socket's internal buffer""" - if mode == 'r' or mode == 'rb': - return self._rbuffer - if mode == 'w' or mode == 'wb': - return self._wbuffer + @staticmethod + def _get_child_content(self, name): + """Get the content of the child.""" + for child in self.iterchildren(): + if child.tag == name or etree.QName(child.tag).localname == name: + return child.text + return None -class FakeHTTPConnection(object): +class FakeDirectCmodeHTTPConnection(object): """A fake httplib.HTTPConnection for netapp tests Requests made via this connection actually get translated and routed into - the fake Dfm handler above, we then turn the response into + the fake direct handler above, we then turn the response into the httplib.HTTPResponse that the caller expects. """ def __init__(self, host, timeout=None): @@ -924,8 +461,8 @@ def request(self, method, path, data=None, headers=None): sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) # NOTE(vish): stop the server from trying to look up address from # the fake socket - FakeDfmServerHandler.address_string = lambda x: '127.0.0.1' - self.app = FakeDfmServerHandler(sock, '127.0.0.1:8088', None) + FakeDirectCMODEServerHandler.address_string = lambda x: '127.0.0.1' + self.app = FakeDirectCMODEServerHandler(sock, '127.0.0.1:80', None) self.sock = FakeHttplibSocket(sock.result) self.http_response = httplib.HTTPResponse(self.sock) @@ -941,350 +478,587 @@ def getresponsebody(self): return self.sock.result -class NetAppDriverTestCase(test.TestCase): +class NetAppDirectCmodeISCSIDriverTestCase(test.TestCase): """Test case for NetAppISCSIDriver""" - STORAGE_SERVICE = 'Openstack Service' - STORAGE_SERVICE_PREFIX = 'Openstack Service-' - PROJECT_ID = 'testproj' - VOLUME_NAME = 'volume-00000001' - VOLUME_TYPE = '' - VOLUME_SIZE = 2147483648L # 2 GB - INITIATOR = 'iqn.1993-08.org.debian:01:23456789' + + volume = {'name': 'lun1', 'size': 2, 'volume_name': 'lun1', + 'os_type': 'linux', 'provider_location': 'lun1', + 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', + 'display_name': None, 'display_description': 'lun1', + 'volume_type_id': None} + snapshot = {'name': 'snapshot1', 'size': 2, 'volume_name': 'lun1', + 'volume_size': 2, 'project_id': 'project', + 'display_name': None, 'display_description': 'lun1', + 'volume_type_id': None} + snapshot_fail = {'name': 'snapshot2', 'size': 2, 'volume_name': 'lun1', + 'volume_size': 1, 'project_id': 'project'} + volume_sec = {'name': 'vol_snapshot', 'size': 2, 'volume_name': 'lun1', + 'os_type': 'linux', 'provider_location': 'lun1', + 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', + 'display_name': None, 'display_description': 'lun1', + 'volume_type_id': None} + volume_clone = {'name': 'cl_sm', 'size': 3, 'volume_name': 'lun1', + 'os_type': 'linux', 'provider_location': 'cl_sm', + 'id': 'lun1', 'provider_auth': None, + 'project_id': 'project', 'display_name': None, + 'display_description': 'lun1', + 'volume_type_id': None} + volume_clone_large = {'name': 'cl_lg', 'size': 6, 'volume_name': 'lun1', + 'os_type': 'linux', 'provider_location': 'cl_lg', + 'id': 'lun1', 'provider_auth': None, + 'project_id': 'project', 'display_name': None, + 'display_description': 'lun1', + 'volume_type_id': None} + connector = {'initiator': 'iqn.1993-08.org.debian:01:10'} + vol_fail = {'name': 'lun_fail', 'size': 10000, 'volume_name': 'lun1', + 'os_type': 'linux', 'provider_location': 'lun1', + 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', + 'display_name': None, 'display_description': 'lun1', + 'volume_type_id': None} + vol1 = ssc_utils.NetAppVolume('lun1', 'openstack') + vol1.state['vserver_root'] = False + vol1.state['status'] = 'online' + vol1.state['junction_active'] = True + vol1.space['size_avl_bytes'] = '4000000000' + vol1.space['size_total_bytes'] = '5000000000' + vol1.space['space-guarantee-enabled'] = False + vol1.space['space-guarantee'] = 'file' + vol1.space['thin_provisioned'] = True + vol1.mirror['mirrored'] = True + vol1.qos['qos_policy_group'] = None + vol1.aggr['name'] = 'aggr1' + vol1.aggr['junction'] = '/vola' + vol1.sis['dedup'] = True + vol1.sis['compression'] = True + vol1.aggr['raid_type'] = 'raiddp' + vol1.aggr['ha_policy'] = 'cfo' + vol1.aggr['disk_type'] = 'SSD' + ssc_map = {'mirrored': set([vol1]), 'dedup': set([vol1]), + 'compression': set([vol1]), + 'thin': set([vol1]), 'all': set([vol1])} def setUp(self): - super(NetAppDriverTestCase, self).setUp() - driver = netapp.NetAppISCSIDriver() - self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection) - driver._create_client(wsdl_url='http://localhost:8088/dfm.wsdl', - login='root', password='password', - hostname='localhost', port=8088, cache=False) - driver._set_storage_service(self.STORAGE_SERVICE) - driver._set_storage_service_prefix(self.STORAGE_SERVICE_PREFIX) - driver._set_vfiler('') + super(NetAppDirectCmodeISCSIDriverTestCase, self).setUp() + self._custom_setup() + + def _custom_setup(self): + self.stubs.Set( + ssc_utils, 'refresh_cluster_ssc', + lambda a, b, c, synchronous: None) + configuration = self._set_config(create_configuration()) + driver = common.NetAppDriver(configuration=configuration) + self.stubs.Set(httplib, 'HTTPConnection', + FakeDirectCmodeHTTPConnection) + driver.do_setup(context='') + client = driver.client + client.set_api_version(1, 15) self.driver = driver + self.driver.ssc_vols = self.ssc_map + + def _set_config(self, configuration): + configuration.netapp_storage_protocol = 'iscsi' + configuration.netapp_login = 'admin' + configuration.netapp_password = 'pass' + configuration.netapp_server_hostname = '127.0.0.1' + configuration.netapp_transport_type = 'http' + configuration.netapp_server_port = '80' + configuration.netapp_vserver = 'openstack' + return configuration def test_connect(self): self.driver.check_for_setup_error() def test_create_destroy(self): - self.driver._discover_luns() - self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID, - self.VOLUME_TYPE, self.VOLUME_SIZE) - self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID) + self.driver.create_volume(self.volume) + self.driver.delete_volume(self.volume) + + def test_create_vol_snapshot_destroy(self): + self.driver.create_volume(self.volume) + self.driver.create_snapshot(self.snapshot) + self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot) + self.driver.delete_snapshot(self.snapshot) + self.driver.delete_volume(self.volume) def test_map_unmap(self): - self.driver._discover_luns() - self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID, - self.VOLUME_TYPE, self.VOLUME_SIZE) - volume = {'name': self.VOLUME_NAME, 'project_id': self.PROJECT_ID, - 'id': 0, 'provider_auth': None} - updates = self.driver._get_export(volume) + self.driver.create_volume(self.volume) + updates = self.driver.create_export(None, self.volume) self.assertTrue(updates['provider_location']) - volume['provider_location'] = updates['provider_location'] - connector = {'initiator': self.INITIATOR} - connection_info = self.driver.initialize_connection(volume, connector) + self.volume['provider_location'] = updates['provider_location'] + + connection_info = self.driver.initialize_connection(self.volume, + self.connector) self.assertEqual(connection_info['driver_volume_type'], 'iscsi') properties = connection_info['data'] - self.driver.terminate_connection(volume, connector) - self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID) + if not properties: + raise AssertionError('Target portal is none') + self.driver.terminate_connection(self.volume, self.connector) + self.driver.delete_volume(self.volume) + def test_cloned_volume_destroy(self): + self.driver.create_volume(self.volume) + self.driver.create_cloned_volume(self.snapshot, self.volume) + self.driver.delete_volume(self.snapshot) + self.driver.delete_volume(self.volume) -WSDL_HEADER_CMODE = """ - -""" + def test_map_by_creating_igroup(self): + self.driver.create_volume(self.volume) + updates = self.driver.create_export(None, self.volume) + self.assertTrue(updates['provider_location']) + self.volume['provider_location'] = updates['provider_location'] + connector_new = {'initiator': 'iqn.1993-08.org.debian:01:1001'} + connection_info = self.driver.initialize_connection(self.volume, + connector_new) + self.assertEqual(connection_info['driver_volume_type'], 'iscsi') + properties = connection_info['data'] + if not properties: + raise AssertionError('Target portal is none') + + def test_fail_create_vol(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, self.vol_fail) -WSDL_TYPES_CMODE = """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - """ - -WSDL_TRAILER_CMODE = """ - - - - -""" - -RESPONSE_PREFIX_CMODE = """ - -""" - -RESPONSE_SUFFIX_CMODE = """""" - -CMODE_APIS = ['ProvisionLun', 'DestroyLun', 'CloneLun', 'MapLun', 'UnmapLun', - 'ListLuns', 'GetLunTargetDetails'] - - -class FakeCMODEServerHandler(BaseHTTPServer.BaseHTTPRequestHandler): - """HTTP handler that fakes enough stuff to allow the driver to run""" + def test_vol_stats(self): + self.driver.get_volume_stats(refresh=True) + + def test_create_vol_snapshot_diff_size_resize(self): + self.driver.create_volume(self.volume) + self.driver.create_snapshot(self.snapshot) + self.driver.create_volume_from_snapshot( + self.volume_clone, self.snapshot) + self.driver.delete_snapshot(self.snapshot) + self.driver.delete_volume(self.volume) + + def test_create_vol_snapshot_diff_size_subclone(self): + self.driver.create_volume(self.volume) + self.driver.create_snapshot(self.snapshot) + self.driver.create_volume_from_snapshot( + self.volume_clone_large, self.snapshot) + self.driver.delete_snapshot(self.snapshot) + self.driver.delete_volume(self.volume) + + +class NetAppDriverNegativeTestCase(test.TestCase): + """Test case for NetAppDriver""" + + def setUp(self): + super(NetAppDriverNegativeTestCase, self).setUp() + + def test_incorrect_family(self): + configuration = create_configuration() + configuration.netapp_storage_family = 'xyz_abc' + try: + driver = common.NetAppDriver(configuration=configuration) + raise AssertionError('Wrong storage family is getting accepted.') + except exception.InvalidInput: + pass + + def test_incorrect_protocol(self): + configuration = create_configuration() + configuration.netapp_storage_family = 'ontap' + configuration.netapp_storage_protocol = 'ontap' + try: + driver = common.NetAppDriver(configuration=configuration) + raise AssertionError('Wrong storage protocol is getting accepted.') + except exception.InvalidInput: + pass + + def test_non_netapp_driver(self): + configuration = create_configuration() + common.netapp_unified_plugin_registry['test_family'] =\ + {'iscsi': 'cinder.volume.drivers.arbitrary.IscsiDriver'} + configuration.netapp_storage_family = 'test_family' + configuration.netapp_storage_protocol = 'iscsi' + try: + driver = common.NetAppDriver(configuration=configuration) + raise AssertionError('Non NetApp driver is getting instantiated.') + except exception.InvalidInput: + pass + finally: + common.netapp_unified_plugin_registry.pop('test_family') + + +class FakeDirect7MODEServerHandler(FakeHTTPRequestHandler): + """HTTP handler that fakes enough stuff to allow the driver to run.""" def do_GET(s): """Respond to a GET request.""" - if '/ntap_cloud.wsdl' != s.path: + if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: s.send_response(404) s.end_headers return s.send_response(200) - s.send_header("Content-Type", "application/wsdl+xml") + s.send_header("Content-Type", "text/xml; charset=utf-8") s.end_headers() out = s.wfile - out.write(WSDL_HEADER_CMODE) - out.write(WSDL_TYPES_CMODE) - for api in CMODE_APIS: - out.write('' % api) - out.write('' % api) - out.write('') - out.write('' % api) - out.write('' % api) - out.write('') - out.write('') - for api in CMODE_APIS: - out.write('' % api) - out.write('' % api) - out.write('' % api) - out.write('') - out.write('') - out.write('') - out.write('') - for api in CMODE_APIS: - out.write('' % api) - out.write('') - out.write('') - out.write('') - out.write('') - out.write('') - out.write(WSDL_TRAILER_CMODE) + out.write('' + '') def do_POST(s): """Respond to a POST request.""" - if '/ws/ntapcloud' != s.path: + if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: s.send_response(404) s.end_headers return request_xml = s.rfile.read(int(s.headers['Content-Length'])) - ntap_ns = 'http://cloud.netapp.com/' - nsmap = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/', - 'na': ntap_ns} root = etree.fromstring(request_xml) - - body = root.xpath('/soapenv:Envelope/soapenv:Body', - namespaces=nsmap)[0] - request = body.getchildren()[0] + body = [x for x in root.iterchildren()] + request = body[0] tag = request.tag - if not tag.startswith('{' + ntap_ns + '}'): - s.send_response(500) - s.end_headers - return - api = tag[(2 + len(ntap_ns)):] - if 'ProvisionLun' == api: - body = """ - lun120 - 1d9c006c-a406-42f6-a23f-5ed7a6dc33e3 - OsType - linux - """ - elif 'DestroyLun' == api: - body = """""" - elif 'CloneLun' == api: - body = """ - lun22 - 98ea1791d228453899d422b4611642c3 - OsType - linux - """ - elif 'MapLun' == api: - body = """""" - elif 'Unmap' == api: - body = """""" - elif 'ListLuns' == api: - body = """ - - lun1 - 20 - asdjdnsd - - """ - elif 'GetLunTargetDetails' == api: - body = """ - -
1.2.3.4
- 3260 - 1000 - iqn.199208.com.netapp:sn.123456789 - 0 -
-
""" + api = etree.QName(tag).localname or tag + if 'lun-list-info' == api: + body = """ + false + false + + + /vol/vol1/lun1 + 20971520 + true + false + false + false + none + linux + e867d844-c2c0-11e0-9282-00a09825b3b5 + P3lgP4eTyaNl + 512 + true + 0 + indeterminate + + + /vol/vol1/lun1 + 20971520 + true + false + false + false + none + linux + 8e1e9284-c288-11e0-9282-00a09825b3b5 + P3lgP4eTc3lp + 512 + true + 0 + indeterminate + + + """ + elif 'volume-list-info' == api: + body = """ + + + vol0 + 019c8f7a-9243-11e0-9281-00a09825b3b5 + flex + 32_bit + online + 576914493440 + 13820354560 + 563094110208 + 2 + 20 + 140848264 + 0 + 0 + 0 + 0 + 20907162 + 7010 + 518 + 31142 + 31142 + 0 + false + aggr0 + + + disabled + idle + idle for 70:36:44 + regular + sun-sat@0 + Mon Aug 8 09:34:15 EST 2011 + + Mon Aug 8 09:34:15 EST 2011 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + false + + volume + true + 14 + raid_dp,sis + block + true + false + false + false + false + unmirrored + 3 + 1 + + + /aggr0/plex0 + true + false + + + + + vol1 + 2d50ecf4-c288-11e0-9282-00a09825b3b5 + flex + 32_bit + online + 42949672960 + 44089344 + 42905583616 + 0 + 20 + 10485760 + 8192 + 8192 + 0 + 0 + 1556480 + 110 + 504 + 31142 + 31142 + 0 + false + aggr1 + + + disabled + idle + idle for 89:19:59 + regular + sun-sat@0 + Sun Aug 7 14:51:00 EST 2011 + + Sun Aug 7 14:51:00 EST 2011 + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + false + + volume + true + 7 + raid4,sis + block + true + false + false + false + false + unmirrored + 2 + 1 + + + /aggr1/plex0 + true + false + + + + + """ + elif 'volume-options-list-info' == api: + body = """ + + + snapmirrored + off + + + root + false + + + ha_policy + cfo + + + striping + not_striped + + + compression + off + + + """ + elif 'lun-create-by-size' == api: + body = """ + 22020096""" + elif 'lun-destroy' == api: + body = """""" + elif 'igroup-list-info' == api: + body = """ + + + openstack-8bc96490 + iscsi + b8e1d274-c378-11e0 + linux + 0 + false + + false + false + true + + + + iqn.1993-08.org.debian:01:10 + + + + + iscsi_group + iscsi + ccb8cbe4-c36f + linux + 0 + false + + false + false + true + + + + iqn.1993-08.org.debian:01:10ca + + + + + """ + elif 'lun-map-list-info' == api: + body = """ + + """ + elif 'lun-map' == api: + body = """1 + + """ + elif 'iscsi-node-get-name' == api: + body = """ + iqn.1992-08.com.netapp:sn.135093938 + """ + elif 'iscsi-portal-list-info' == api: + body = """ + + + 10.61.176.156 + 3260 + 1000 + e0a + + + """ + elif 'igroup-create' == api: + body = """""" + elif 'igroup-add' == api: + body = """""" + elif 'clone-start' == api: + body = """ + + + 2d50ecf4-c288-11e0-9282-00a09825b3b5 + 11 + + + """ + elif 'clone-list-status' == api: + body = """ + + + completed + + + """ + elif 'lun-unmap' == api: + body = """""" + elif 'system-get-ontapi-version' == api: + body = """ + 1 + 8 + """ + elif 'lun-set-space-reservation-info' == api: + body = """""" + elif 'ems-autosupport-log' == api: + body = """""" + elif 'lun-resize' == api: + body = """""" + elif 'lun-get-geometry' == api: + body = """ + 1 + 2 + 8 + 2 + 4 + 5 + """ + elif 'volume-options-list-info' == api: + body = """ + + + + """ + elif 'lun-move' == api: + body = """""" else: - # Unknown API - s.send_response(500) - s.end_headers - return + # Unknown API + s.send_response(500) + s.end_headers + return s.send_response(200) s.send_header("Content-Type", "text/xml; charset=utf-8") s.end_headers() - s.wfile.write(RESPONSE_PREFIX_CMODE) + s.wfile.write(RESPONSE_PREFIX_DIRECT_7MODE) + s.wfile.write(RESPONSE_PREFIX_DIRECT) s.wfile.write(body) - s.wfile.write(RESPONSE_SUFFIX_CMODE) + s.wfile.write(RESPONSE_SUFFIX_DIRECT) -class FakeCmodeHTTPConnection(object): +class FakeDirect7modeHTTPConnection(object): """A fake httplib.HTTPConnection for netapp tests Requests made via this connection actually get translated and routed into - the fake Dfm handler above, we then turn the response into + the fake direct handler above, we then turn the response into the httplib.HTTPResponse that the caller expects. """ def __init__(self, host, timeout=None): @@ -1303,8 +1077,8 @@ def request(self, method, path, data=None, headers=None): sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) # NOTE(vish): stop the server from trying to look up address from # the fake socket - FakeCMODEServerHandler.address_string = lambda x: '127.0.0.1' - self.app = FakeCMODEServerHandler(sock, '127.0.0.1:8080', None) + FakeDirect7MODEServerHandler.address_string = lambda x: '127.0.0.1' + self.app = FakeDirect7MODEServerHandler(sock, '127.0.0.1:80', None) self.sock = FakeHttplibSocket(sock.result) self.http_response = httplib.HTTPResponse(self.sock) @@ -1320,59 +1094,93 @@ def getresponsebody(self): return self.sock.result -class NetAppCmodeISCSIDriverTestCase(test.TestCase): - """Test case for NetAppISCSIDriver""" - volume = { - 'name': 'lun1', 'size': 1, 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'lun1', - 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None - } - snapshot = { - 'name': 'lun2', 'size': 1, 'volume_name': 'lun1', - 'volume_size': 1, 'project_id': 'project' - } - volume_sec = { - 'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'lun1', - 'id': 'lun1', 'provider_auth': None, 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None - } - +class NetAppDirect7modeISCSIDriverTestCase_NV( + NetAppDirectCmodeISCSIDriverTestCase): + """Test case for NetAppISCSIDriver + No vfiler + """ def setUp(self): - super(NetAppCmodeISCSIDriverTestCase, self).setUp() - driver = netapp.NetAppCmodeISCSIDriver() - self.stubs.Set(httplib, 'HTTPConnection', FakeCmodeHTTPConnection) - driver._create_client(wsdl_url='http://localhost:8080/ntap_cloud.wsdl', - login='root', password='password', - hostname='localhost', port=8080, cache=False) + super(NetAppDirect7modeISCSIDriverTestCase_NV, self).setUp() + + def _custom_setup(self): + configuration = self._set_config(create_configuration()) + driver = common.NetAppDriver(configuration=configuration) + self.stubs.Set(httplib, 'HTTPConnection', + FakeDirect7modeHTTPConnection) + driver.do_setup(context='') + client = driver.client + client.set_api_version(1, 9) self.driver = driver - def test_connect(self): - self.driver.check_for_setup_error() - - def test_create_destroy(self): - self.driver.create_volume(self.volume) - self.driver.delete_volume(self.volume) - - def test_create_vol_snapshot_destroy(self): + def _set_config(self, configuration): + configuration.netapp_storage_family = 'ontap_7mode' + configuration.netapp_storage_protocol = 'iscsi' + configuration.netapp_login = 'admin' + configuration.netapp_password = 'pass' + configuration.netapp_server_hostname = '127.0.0.1' + configuration.netapp_transport_type = 'http' + configuration.netapp_server_port = '80' + return configuration + + def test_create_on_select_vol(self): + self.driver.volume_list = ['vol0', 'vol1'] self.driver.create_volume(self.volume) - self.driver.create_snapshot(self.snapshot) - self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot) - self.driver.delete_snapshot(self.snapshot) self.driver.delete_volume(self.volume) + self.driver.volume_list = [] + + def test_create_fail_on_select_vol(self): + self.driver.volume_list = ['vol2', 'vol3'] + success = False + try: + self.driver.create_volume(self.volume) + except exception.VolumeBackendAPIException: + success = True + pass + finally: + self.driver.volume_list = [] + if not success: + raise AssertionError('Failed creating on selected volumes') + + def test_check_for_setup_error_version(self): + drv = self.driver + delattr(drv.client, '_api_version') + + # check exception raises when version not found + self.assertRaises(exception.VolumeBackendAPIException, + drv.check_for_setup_error) + + drv.client.set_api_version(1, 8) + + # check exception raises when not supported version + self.assertRaises(exception.VolumeBackendAPIException, + drv.check_for_setup_error) + + +class NetAppDirect7modeISCSIDriverTestCase_WV( + NetAppDirect7modeISCSIDriverTestCase_NV): + """Test case for NetAppISCSIDriver + With vfiler + """ + def setUp(self): + super(NetAppDirect7modeISCSIDriverTestCase_WV, self).setUp() + + def _custom_setup(self): + configuration = self._set_config(create_configuration()) + driver = common.NetAppDriver(configuration=configuration) + self.stubs.Set(httplib, 'HTTPConnection', + FakeDirect7modeHTTPConnection) + driver.do_setup(context='') + client = driver.client + client.set_api_version(1, 9) + self.driver = driver - def test_map_unmap(self): - self.driver.create_volume(self.volume) - updates = self.driver.create_export(None, self.volume) - self.assertTrue(updates['provider_location']) - self.volume['provider_location'] = updates['provider_location'] - connector = {'initiator': 'init1'} - connection_info = self.driver.initialize_connection(self.volume, - connector) - self.assertEqual(connection_info['driver_volume_type'], 'iscsi') - properties = connection_info['data'] - self.driver.terminate_connection(self.volume, connector) - self.driver.delete_volume(self.volume) + def _set_config(self, configuration): + configuration.netapp_storage_family = 'ontap_7mode' + configuration.netapp_storage_protocol = 'iscsi' + configuration.netapp_login = 'admin' + configuration.netapp_password = 'pass' + configuration.netapp_server_hostname = '127.0.0.1' + configuration.netapp_transport_type = 'http' + configuration.netapp_server_port = '80' + configuration.netapp_vfiler = 'openstack' + return configuration diff --git a/cinder/tests/test_netapp_nfs.py b/cinder/tests/test_netapp_nfs.py old mode 100644 new mode 100755 index 1ebd842cf6..235d57f6a5 --- a/cinder/tests/test_netapp_nfs.py +++ b/cinder/tests/test_netapp_nfs.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 NetApp, Inc. # All Rights Reserved. @@ -14,22 +13,37 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)""" +"""Unit tests for the NetApp-specific NFS driver module.""" + +from lxml import etree +import mock +import mox +from mox import IgnoreArg +from mox import IsA +import os from cinder import context -from cinder import test from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.netapp import api +from cinder.volume.drivers.netapp import nfs as netapp_nfs -from cinder.volume import netapp_nfs -from cinder.volume import netapp -from cinder.volume import nfs -from mox import IsA -from mox import IgnoreArg -from mox import MockObject -import mox -import suds -import types +from oslo.config import cfg +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + + +def create_configuration(): + configuration = mox.MockObject(conf.Configuration) + configuration.append_config_values(mox.IgnoreArg()) + configuration.nfs_mount_point_base = '/mnt/test' + configuration.nfs_mount_options = None + return configuration class FakeVolume(object): @@ -41,6 +55,9 @@ def __init__(self, size=0): def __getitem__(self, key): return self.__dict__[key] + def __setitem__(self, key, val): + self.__dict__[key] = val + class FakeSnapshot(object): def __init__(self, volume_size=0): @@ -55,9 +72,10 @@ def __getitem__(self, key): return self.__dict__[key] -class FakeResponce(object): +class FakeResponse(object): def __init__(self, status): - """ + """Initialize FakeResponse. + :param status: Either 'failed' or 'passed' """ self.Status = status @@ -66,37 +84,126 @@ def __init__(self, status): self.Reason = 'Sample error' -class NetappNfsDriverTestCase(test.TestCase): - """Test case for NetApp specific NFS clone driver""" - +class NetappDirectCmodeNfsDriverTestCase(test.TestCase): + """Test direct NetApp C Mode driver.""" def setUp(self): - self._driver = netapp_nfs.NetAppNFSDriver() - self._mox = mox.Mox() + super(NetappDirectCmodeNfsDriverTestCase, self).setUp() + self._custom_setup() + + def test_create_snapshot(self): + """Test snapshot can be created and deleted.""" + mox = self.mox + drv = self._driver + + mox.StubOutWithMock(drv, '_clone_volume') + drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg()) + mox.ReplayAll() + + drv.create_snapshot(FakeSnapshot()) + + mox.VerifyAll() + + def test_create_volume_from_snapshot(self): + """Tests volume creation from snapshot.""" + drv = self._driver + mox = self.mox + volume = FakeVolume(1) + snapshot = FakeSnapshot(1) + + location = '127.0.0.1:/nfs' + expected_result = {'provider_location': location} + mox.StubOutWithMock(drv, '_clone_volume') + mox.StubOutWithMock(drv, '_get_volume_location') + mox.StubOutWithMock(drv, 'local_path') + mox.StubOutWithMock(drv, '_discover_file_till_timeout') + mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') + drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg()) + drv._get_volume_location(IgnoreArg()).AndReturn(location) + drv.local_path(IgnoreArg()).AndReturn('/mnt') + drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True) + drv._set_rw_permissions_for_all(IgnoreArg()) + + mox.ReplayAll() + + loc = drv.create_volume_from_snapshot(volume, snapshot) + + self.assertEqual(loc, expected_result) - def tearDown(self): - self._mox.UnsetStubs() + mox.VerifyAll() + + def _prepare_delete_snapshot_mock(self, snapshot_exists): + drv = self._driver + mox = self.mox + + mox.StubOutWithMock(drv, '_get_provider_location') + mox.StubOutWithMock(drv, '_volume_not_present') + mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc') + + if snapshot_exists: + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_get_volume_path') + drv._get_provider_location(IgnoreArg()) + drv._get_provider_location(IgnoreArg()) + drv._volume_not_present(IgnoreArg(), IgnoreArg())\ + .AndReturn(not snapshot_exists) + + if snapshot_exists: + drv._get_volume_path(IgnoreArg(), IgnoreArg()) + drv._execute('rm', None, run_as_root=True) + + drv._post_prov_deprov_in_ssc(IgnoreArg()) + + mox.ReplayAll() + + return mox + + def test_delete_existing_snapshot(self): + drv = self._driver + mox = self._prepare_delete_snapshot_mock(True) + + drv.delete_snapshot(FakeSnapshot()) + + mox.VerifyAll() + + def test_delete_missing_snapshot(self): + drv = self._driver + mox = self._prepare_delete_snapshot_mock(False) + + drv.delete_snapshot(FakeSnapshot()) + + mox.VerifyAll() + + def _custom_setup(self): + kwargs = {} + kwargs['netapp_mode'] = 'proxy' + kwargs['configuration'] = create_configuration() + self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs) def test_check_for_setup_error(self): - mox = self._mox + mox = self.mox drv = self._driver required_flags = [ - 'netapp_wsdl_url', - 'netapp_login', - 'netapp_password', - 'netapp_server_hostname', - 'netapp_server_port' - ] + 'netapp_transport_type', + 'netapp_login', + 'netapp_password', + 'netapp_server_hostname', + 'netapp_server_port'] + # set required flags + for flag in required_flags: + setattr(drv.configuration, flag, None) # check exception raises when flags are not set self.assertRaises(exception.CinderException, drv.check_for_setup_error) # set required flags for flag in required_flags: - setattr(netapp.FLAGS, flag, 'val') + setattr(drv.configuration, flag, 'val') + setattr(drv, 'ssc_enabled', False) - mox.StubOutWithMock(nfs.NfsDriver, 'check_for_setup_error') - nfs.NfsDriver.check_for_setup_error() + mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags') + + netapp_nfs.NetAppDirectNfsDriver._check_flags() mox.ReplayAll() drv.check_for_setup_error() @@ -105,17 +212,19 @@ def test_check_for_setup_error(self): # restore initial FLAGS for flag in required_flags: - delattr(netapp.FLAGS, flag) + delattr(drv.configuration, flag) def test_do_setup(self): - mox = self._mox + mox = self.mox drv = self._driver - mox.StubOutWithMock(drv, 'check_for_setup_error') - mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, '_get_client') + mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup') + mox.StubOutWithMock(drv, '_get_client') + mox.StubOutWithMock(drv, '_do_custom_setup') - drv.check_for_setup_error() - netapp_nfs.NetAppNFSDriver._get_client() + netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg()) + drv._get_client() + drv._do_custom_setup(IgnoreArg()) mox.ReplayAll() @@ -123,51 +232,789 @@ def test_do_setup(self): mox.VerifyAll() - def test_create_snapshot(self): - """Test snapshot can be created and deleted""" - mox = self._mox + def _prepare_clone_mock(self, status): drv = self._driver + mox = self.mox + + volume = FakeVolume() + setattr(volume, 'provider_location', '127.0.0.1:/nfs') + + mox.StubOutWithMock(drv, '_get_host_ip') + mox.StubOutWithMock(drv, '_get_export_path') + mox.StubOutWithMock(drv, '_get_if_info_by_ip') + mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver') + mox.StubOutWithMock(drv, '_clone_file') + mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc') + + drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1') + drv._get_export_path(IgnoreArg()).AndReturn('/nfs') + drv._get_if_info_by_ip('127.0.0.1').AndReturn( + self._prepare_info_by_ip_response()) + drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol') + drv._clone_file('nfsvol', 'volume_name', 'clone_name', + 'openstack') + drv._post_prov_deprov_in_ssc(IgnoreArg()) + return mox + + def _prepare_info_by_ip_response(self): + res = """ + +
127.0.0.1
+ up + fas3170rre-cmode-01 + e1b-1165 + + nfs + + none + + disabled + data + fas3170rre-cmode-01 + e1b-1165 + nfs_data1 + false + true + 255.255.255.0 + 24 + up + data + c10.63.165.0/24 + disabled + openstack +
""" + response_el = etree.XML(res) + return api.NaElement(response_el).get_children() + + def test_clone_volume(self): + drv = self._driver + mox = self._prepare_clone_mock('pass') - mox.StubOutWithMock(drv, '_clone_volume') - drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg()) mox.ReplayAll() - drv.create_snapshot(FakeSnapshot()) + volume_name = 'volume_name' + clone_name = 'clone_name' + volume_id = volume_name + str(hash(volume_name)) + share = 'ip:/share' + + drv._clone_volume(volume_name, clone_name, volume_id, share) mox.VerifyAll() - def test_create_volume_from_snapshot(self): - """Tests volume creation from snapshot""" + def test_register_img_in_cache_noshare(self): + volume = {'id': '1', 'name': 'testvol'} + volume['provider_location'] = '10.61.170.1:/share/path' drv = self._driver - mox = self._mox - volume = FakeVolume(1) - snapshot = FakeSnapshot(2) + mox = self.mox + mox.StubOutWithMock(drv, '_do_clone_rel_img_cache') - self.assertRaises(exception.CinderException, - drv.create_volume_from_snapshot, - volume, - snapshot) + drv._do_clone_rel_img_cache('testvol', 'img-cache-12345', + '10.61.170.1:/share/path', + 'img-cache-12345') - snapshot = FakeSnapshot(1) + mox.ReplayAll() + drv._register_image_in_cache(volume, '12345') + mox.VerifyAll() - location = '127.0.0.1:/nfs' - expected_result = {'provider_location': location} + def test_register_img_in_cache_with_share(self): + volume = {'id': '1', 'name': 'testvol'} + volume['provider_location'] = '10.61.170.1:/share/path' + drv = self._driver + mox = self.mox + mox.StubOutWithMock(drv, '_do_clone_rel_img_cache') + + drv._do_clone_rel_img_cache('testvol', 'img-cache-12345', + '10.61.170.1:/share/path', + 'img-cache-12345') + + mox.ReplayAll() + drv._register_image_in_cache(volume, '12345') + mox.VerifyAll() + + def test_find_image_in_cache_no_shares(self): + drv = self._driver + drv._mounted_shares = [] + result = drv._find_image_in_cache('image_id') + if not result: + pass + else: + self.fail('Return result is unexpected') + + def test_find_image_in_cache_shares(self): + drv = self._driver + mox = self.mox + drv._mounted_shares = ['testshare'] + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + mox.StubOutWithMock(os.path, 'exists') + + drv._get_mount_point_for_share('testshare').AndReturn('/mnt') + os.path.exists('/mnt/img-cache-id').AndReturn(True) + mox.ReplayAll() + result = drv._find_image_in_cache('id') + (share, file_name) = result[0] + mox.VerifyAll() + drv._mounted_shares.remove('testshare') + + if (share == 'testshare' and file_name == 'img-cache-id'): + pass + else: + LOG.warn(_("Share %(share)s and file name %(file_name)s") + % {'share': share, 'file_name': file_name}) + self.fail('Return result is unexpected') + + def test_find_old_cache_files_notexists(self): + drv = self._driver + mox = self.mox + cmd = ['find', '/mnt', '-maxdepth', '1', '-name', + 'img-cache*', '-amin', '+720'] + setattr(drv.configuration, 'expiry_thres_minutes', 720) + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + mox.StubOutWithMock(drv, '_execute') + + drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt') + drv._execute(*cmd, run_as_root=True).AndReturn((None, '')) + mox.ReplayAll() + res = drv._find_old_cache_files('share') + mox.VerifyAll() + if len(res) == 0: + pass + else: + self.fail('No files expected but got return values.') + + def test_find_old_cache_files_exists(self): + drv = self._driver + mox = self.mox + cmd = ['find', '/mnt', '-maxdepth', '1', '-name', + 'img-cache*', '-amin', '+720'] + setattr(drv.configuration, 'expiry_thres_minutes', '720') + files = '/mnt/img-id1\n/mnt/img-id2\n' + r_files = ['img-id1', 'img-id2'] + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + mox.StubOutWithMock(drv, '_execute') + mox.StubOutWithMock(drv, '_shortlist_del_eligible_files') + + drv._get_mount_point_for_share('share').AndReturn('/mnt') + drv._execute(*cmd, run_as_root=True).AndReturn((files, None)) + drv._shortlist_del_eligible_files( + IgnoreArg(), r_files).AndReturn(r_files) + mox.ReplayAll() + res = drv._find_old_cache_files('share') + mox.VerifyAll() + if len(res) == len(r_files): + for f in res: + r_files.remove(f) + else: + self.fail('Returned files not same as expected.') + + def test_delete_files_till_bytes_free_success(self): + drv = self._driver + mox = self.mox + files = [('img-cache-1', 230), ('img-cache-2', 380)] + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + mox.StubOutWithMock(drv, '_delete_file') + + drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt') + drv._delete_file('/mnt/img-cache-2').AndReturn(True) + drv._delete_file('/mnt/img-cache-1').AndReturn(True) + mox.ReplayAll() + drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024) + mox.VerifyAll() + + def test_clean_image_cache_exec(self): + drv = self._driver + mox = self.mox + drv.configuration.thres_avl_size_perc_start = 20 + drv.configuration.thres_avl_size_perc_stop = 50 + drv._mounted_shares = ['testshare'] + + mox.StubOutWithMock(drv, '_find_old_cache_files') + mox.StubOutWithMock(drv, '_delete_files_till_bytes_free') + mox.StubOutWithMock(drv, '_get_capacity_info') + + drv._get_capacity_info('testshare').AndReturn((100, 19, 81)) + drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2']) + drv._delete_files_till_bytes_free( + ['f1', 'f2'], 'testshare', bytes_to_free=31) + mox.ReplayAll() + drv._clean_image_cache() + mox.VerifyAll() + drv._mounted_shares.remove('testshare') + if not drv.cleaning: + pass + else: + self.fail('Clean image cache failed.') + + def test_clean_image_cache_noexec(self): + drv = self._driver + mox = self.mox + drv.configuration.thres_avl_size_perc_start = 20 + drv.configuration.thres_avl_size_perc_stop = 50 + drv._mounted_shares = ['testshare'] + + mox.StubOutWithMock(drv, '_get_capacity_info') + + drv._get_capacity_info('testshare').AndReturn((100, 30, 70)) + mox.ReplayAll() + drv._clean_image_cache() + mox.VerifyAll() + drv._mounted_shares.remove('testshare') + if not drv.cleaning: + pass + else: + self.fail('Clean image cache failed.') + + def test_clone_image_fromcache(self): + drv = self._driver + mox = self.mox + volume = {'name': 'vol', 'size': '20'} + mox.StubOutWithMock(drv, '_find_image_in_cache') + mox.StubOutWithMock(drv, '_do_clone_rel_img_cache') + mox.StubOutWithMock(drv, '_post_clone_image') + mox.StubOutWithMock(drv, '_is_share_vol_compatible') + + drv._find_image_in_cache(IgnoreArg()).AndReturn( + [('share', 'file_name')]) + drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True) + drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name') + drv._post_clone_image(volume) + + mox.ReplayAll() + drv.clone_image(volume, ('image_location', None), 'image_id', {}) + mox.VerifyAll() + + def get_img_info(self, format): + class img_info(object): + def __init__(self, fmt): + self.file_format = fmt + + return img_info(format) + + def test_clone_image_cloneableshare_nospace(self): + drv = self._driver + mox = self.mox + volume = {'name': 'vol', 'size': '20'} + mox.StubOutWithMock(drv, '_find_image_in_cache') + mox.StubOutWithMock(drv, '_is_cloneable_share') + mox.StubOutWithMock(drv, '_is_share_vol_compatible') + + drv._find_image_in_cache(IgnoreArg()).AndReturn([]) + drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share') + drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False) + + mox.ReplayAll() + (prop, cloned) = drv. clone_image( + volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {}) + mox.VerifyAll() + if not cloned and not prop['provider_location']: + pass + else: + self.fail('Expected not cloned, got cloned.') + + def test_clone_image_cloneableshare_raw(self): + drv = self._driver + mox = self.mox + volume = {'name': 'vol', 'size': '20'} + mox.StubOutWithMock(drv, '_find_image_in_cache') + mox.StubOutWithMock(drv, '_is_cloneable_share') + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + mox.StubOutWithMock(image_utils, 'qemu_img_info') mox.StubOutWithMock(drv, '_clone_volume') - mox.StubOutWithMock(drv, '_get_volume_location') - drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg()) - drv._get_volume_location(IgnoreArg()).AndReturn(location) + mox.StubOutWithMock(drv, '_discover_file_till_timeout') + mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') + mox.StubOutWithMock(drv, '_resize_image_file') + mox.StubOutWithMock(drv, '_is_share_vol_compatible') + + drv._find_image_in_cache(IgnoreArg()).AndReturn([]) + drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share') + drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True) + drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt') + image_utils.qemu_img_info('/mnt/img-id').AndReturn( + self.get_img_info('raw')) + drv._clone_volume( + 'img-id', 'vol', share='127.0.0.1:/share', volume_id=None) + drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt') + drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True) + drv._set_rw_permissions_for_all('/mnt/vol') + drv._resize_image_file({'name': 'vol'}, IgnoreArg()) mox.ReplayAll() + drv. clone_image( + volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {}) + mox.VerifyAll() - loc = drv.create_volume_from_snapshot(volume, snapshot) + def test_clone_image_cloneableshare_notraw(self): + drv = self._driver + mox = self.mox + volume = {'name': 'vol', 'size': '20'} + mox.StubOutWithMock(drv, '_find_image_in_cache') + mox.StubOutWithMock(drv, '_is_cloneable_share') + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + mox.StubOutWithMock(drv, '_clone_volume') + mox.StubOutWithMock(drv, '_discover_file_till_timeout') + mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') + mox.StubOutWithMock(drv, '_resize_image_file') + mox.StubOutWithMock(image_utils, 'convert_image') + mox.StubOutWithMock(drv, '_register_image_in_cache') + mox.StubOutWithMock(drv, '_is_share_vol_compatible') + + drv._find_image_in_cache(IgnoreArg()).AndReturn([]) + drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn( + '127.0.0.1:/share') + drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True) + drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') + image_utils.qemu_img_info('/mnt/img-id').AndReturn( + self.get_img_info('notraw')) + image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw') + image_utils.qemu_img_info('/mnt/vol').AndReturn( + self.get_img_info('raw')) + drv._register_image_in_cache(IgnoreArg(), IgnoreArg()) + drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') + drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True) + drv._set_rw_permissions_for_all('/mnt/vol') + drv._resize_image_file({'name': 'vol'}, IgnoreArg()) + + mox.ReplayAll() + drv. clone_image( + volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {}) + mox.VerifyAll() + + def test_clone_image_file_not_discovered(self): + drv = self._driver + mox = self.mox + volume = {'name': 'vol', 'size': '20'} + mox.StubOutWithMock(drv, '_find_image_in_cache') + mox.StubOutWithMock(drv, '_is_cloneable_share') + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + mox.StubOutWithMock(drv, '_clone_volume') + mox.StubOutWithMock(drv, '_discover_file_till_timeout') + mox.StubOutWithMock(image_utils, 'convert_image') + mox.StubOutWithMock(drv, '_register_image_in_cache') + mox.StubOutWithMock(drv, '_is_share_vol_compatible') + mox.StubOutWithMock(drv, 'local_path') + mox.StubOutWithMock(os.path, 'exists') + mox.StubOutWithMock(drv, '_delete_file') + + drv._find_image_in_cache(IgnoreArg()).AndReturn([]) + drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn( + '127.0.0.1:/share') + drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True) + drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') + image_utils.qemu_img_info('/mnt/img-id').AndReturn( + self.get_img_info('notraw')) + image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw') + image_utils.qemu_img_info('/mnt/vol').AndReturn( + self.get_img_info('raw')) + drv._register_image_in_cache(IgnoreArg(), IgnoreArg()) + drv.local_path(IgnoreArg()).AndReturn('/mnt/vol') + drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False) + drv.local_path(IgnoreArg()).AndReturn('/mnt/vol') + os.path.exists('/mnt/vol').AndReturn(True) + drv._delete_file('/mnt/vol') + + mox.ReplayAll() + vol_dict, result = drv. clone_image( + volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {}) + mox.VerifyAll() + self.assertFalse(result) + self.assertFalse(vol_dict['bootable']) + self.assertIsNone(vol_dict['provider_location']) + + def test_clone_image_resizefails(self): + drv = self._driver + mox = self.mox + volume = {'name': 'vol', 'size': '20'} + mox.StubOutWithMock(drv, '_find_image_in_cache') + mox.StubOutWithMock(drv, '_is_cloneable_share') + mox.StubOutWithMock(drv, '_get_mount_point_for_share') + mox.StubOutWithMock(image_utils, 'qemu_img_info') + mox.StubOutWithMock(drv, '_clone_volume') + mox.StubOutWithMock(drv, '_discover_file_till_timeout') + mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') + mox.StubOutWithMock(drv, '_resize_image_file') + mox.StubOutWithMock(image_utils, 'convert_image') + mox.StubOutWithMock(drv, '_register_image_in_cache') + mox.StubOutWithMock(drv, '_is_share_vol_compatible') + mox.StubOutWithMock(drv, 'local_path') + mox.StubOutWithMock(os.path, 'exists') + mox.StubOutWithMock(drv, '_delete_file') + + drv._find_image_in_cache(IgnoreArg()).AndReturn([]) + drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn( + '127.0.0.1:/share') + drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True) + drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt') + image_utils.qemu_img_info('/mnt/img-id').AndReturn( + self.get_img_info('notraw')) + image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw') + image_utils.qemu_img_info('/mnt/vol').AndReturn( + self.get_img_info('raw')) + drv._register_image_in_cache(IgnoreArg(), IgnoreArg()) + drv.local_path(IgnoreArg()).AndReturn('/mnt/vol') + drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True) + drv._set_rw_permissions_for_all('/mnt/vol') + drv._resize_image_file( + IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults()) + drv.local_path(IgnoreArg()).AndReturn('/mnt/vol') + os.path.exists('/mnt/vol').AndReturn(True) + drv._delete_file('/mnt/vol') + + mox.ReplayAll() + vol_dict, result = drv. clone_image( + volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {}) + mox.VerifyAll() + self.assertFalse(result) + self.assertFalse(vol_dict['bootable']) + self.assertIsNone(vol_dict['provider_location']) + + def test_is_cloneable_share_badformats(self): + drv = self._driver + strgs = ['10.61.666.22:/share/img', + 'nfs://10.61.666.22:/share/img', + 'nfs://10.61.666.22//share/img', + 'nfs://com.netapp.com:/share/img', + 'nfs://com.netapp.com//share/img', + 'com.netapp.com://share/im\g', + 'http://com.netapp.com://share/img', + 'nfs://com.netapp.com:/share/img', + 'nfs://com.netapp.com:8080//share/img' + 'nfs://com.netapp.com//img', + 'nfs://[ae::sr::ty::po]/img'] + for strg in strgs: + res = drv._is_cloneable_share(strg) + if res: + msg = 'Invalid format matched for url %s.' % strg + self.fail(msg) + + def test_is_cloneable_share_goodformat1(self): + drv = self._driver + mox = self.mox + strg = 'nfs://10.61.222.333/share/img' + mox.StubOutWithMock(drv, '_check_share_in_use') + drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share') + mox.ReplayAll() + drv._is_cloneable_share(strg) + mox.VerifyAll() + + def test_is_cloneable_share_goodformat2(self): + drv = self._driver + mox = self.mox + strg = 'nfs://10.61.222.333:8080/share/img' + mox.StubOutWithMock(drv, '_check_share_in_use') + drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share') + mox.ReplayAll() + drv._is_cloneable_share(strg) + mox.VerifyAll() + + def test_is_cloneable_share_goodformat3(self): + drv = self._driver + mox = self.mox + strg = 'nfs://com.netapp:8080/share/img' + mox.StubOutWithMock(drv, '_check_share_in_use') + drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share') + mox.ReplayAll() + drv._is_cloneable_share(strg) + mox.VerifyAll() + + def test_is_cloneable_share_goodformat4(self): + drv = self._driver + mox = self.mox + strg = 'nfs://netapp.com/share/img' + mox.StubOutWithMock(drv, '_check_share_in_use') + drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share') + mox.ReplayAll() + drv._is_cloneable_share(strg) + mox.VerifyAll() + + def test_is_cloneable_share_goodformat5(self): + drv = self._driver + mox = self.mox + strg = 'nfs://netapp.com/img' + mox.StubOutWithMock(drv, '_check_share_in_use') + drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share') + mox.ReplayAll() + drv._is_cloneable_share(strg) + mox.VerifyAll() + + def test_check_share_in_use_no_conn(self): + drv = self._driver + share = drv._check_share_in_use(None, '/dir') + if share: + self.fail('Unexpected share detected.') + + def test_check_share_in_use_invalid_conn(self): + drv = self._driver + share = drv._check_share_in_use(':8989', '/dir') + if share: + self.fail('Unexpected share detected.') - self.assertEquals(loc, expected_result) + def test_check_share_in_use_incorrect_host(self): + drv = self._driver + mox = self.mox + mox.StubOutWithMock(drv, '_resolve_hostname') + drv._resolve_hostname(IgnoreArg()).AndRaise(Exception()) + mox.ReplayAll() + share = drv._check_share_in_use('incorrect:8989', '/dir') + mox.VerifyAll() + if share: + self.fail('Unexpected share detected.') + def test_check_share_in_use_success(self): + drv = self._driver + mox = self.mox + drv._mounted_shares = ['127.0.0.1:/dir/share'] + mox.StubOutWithMock(drv, '_resolve_hostname') + mox.StubOutWithMock(drv, '_share_match_for_ip') + drv._resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44') + drv._share_match_for_ip( + '10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share') + mox.ReplayAll() + share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share') mox.VerifyAll() + if not share: + self.fail('Expected share not detected') + + def test_construct_image_url_loc(self): + drv = self._driver + img_loc = (None, + [{'metadata': + {'share_location': 'nfs://host/path', + 'mount_point': '/opt/stack/data/glance', + 'type': 'nfs'}, + 'url': 'file:///opt/stack/data/glance/image-id'}]) + location = drv._construct_image_nfs_url(img_loc) + if location != "nfs://host/path/image-id": + self.fail("Unexpected direct url.") + + def test_construct_image_url_direct(self): + drv = self._driver + img_loc = ("nfs://host/path/image-id", None) + location = drv._construct_image_nfs_url(img_loc) + if location != "nfs://host/path/image-id": + self.fail("Unexpected direct url.") + + +class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase): + """Test direct NetApp C Mode driver only and not inherit.""" + + def setUp(self): + super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp() + self._custom_setup() + + def _custom_setup(self): + kwargs = {} + kwargs['netapp_mode'] = 'proxy' + kwargs['configuration'] = create_configuration() + self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs) + self._driver.ssc_enabled = True + self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path' + + def test_copy_img_to_vol_copyoffload_success(self): + drv = self._driver + context = object() + volume = {'id': 'vol_id', 'name': 'name'} + image_service = object() + image_id = 'image_id' + drv._try_copyoffload = mock.Mock() + drv._get_provider_location = mock.Mock(return_value='share') + drv._get_vol_for_share = mock.Mock(return_value='vol') + drv._update_stale_vols = mock.Mock() + + drv.copy_image_to_volume(context, volume, image_service, image_id) + drv._try_copyoffload.assert_called_once_with(context, volume, + image_service, + image_id) + drv._update_stale_vols.assert_called_once_with('vol') + + def test_copy_img_to_vol_copyoffload_failure(self): + drv = self._driver + mox = self.mox + context = object() + volume = {'id': 'vol_id', 'name': 'name'} + image_service = object() + image_id = 'image_id' + drv._try_copyoffload = mock.Mock(side_effect=Exception()) + netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock() + drv._get_provider_location = mock.Mock(return_value='share') + drv._get_vol_for_share = mock.Mock(return_value='vol') + drv._update_stale_vols = mock.Mock() + + drv.copy_image_to_volume(context, volume, image_service, image_id) + drv._try_copyoffload.assert_called_once_with(context, volume, + image_service, + image_id) + netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\ + assert_called_once_with(context, volume, image_service, image_id) + drv._update_stale_vols.assert_called_once_with('vol') + + def test_copyoffload_frm_cache_success(self): + drv = self._driver + context = object() + volume = {'id': 'vol_id', 'name': 'name'} + image_service = object() + image_id = 'image_id' + drv._client = mock.Mock() + drv._client.get_api_version = mock.Mock(return_value=(1, 20)) + drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')]) + drv._copy_from_cache = mock.Mock(return_value=True) + + drv._try_copyoffload(context, volume, image_service, image_id) + drv._copy_from_cache.assert_called_once_with('cof_path', volume, + image_id, + [('share', 'img')]) + + def test_copyoffload_frm_img_service_success(self): + drv = self._driver + context = object() + volume = {'id': 'vol_id', 'name': 'name'} + image_service = object() + image_id = 'image_id' + drv._client = mock.Mock() + drv._client.get_api_version = mock.Mock(return_value=(1, 20)) + drv._find_image_in_cache = mock.Mock(return_value=[]) + drv._copy_from_img_service = mock.Mock() + + drv._try_copyoffload(context, volume, image_service, image_id) + drv._copy_from_img_service.assert_called_once_with('cof_path', context, + volume, + image_service, + image_id) + + def test_cache_copyoffload_workflow_success(self): + drv = self._driver + volume = {'id': 'vol_id', 'name': 'name', 'size': 1} + image_id = 'image_id' + cache_result = [('ip1:/openstack', 'img-cache-imgid')] + drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') + drv._get_host_ip = mock.Mock(return_value='ip2') + drv._get_export_path = mock.Mock(return_value='/exp_path') + drv._execute = mock.Mock() + drv._set_rw_permissions_for_all = mock.Mock() + drv.local_path = mock.Mock(return_value='vol_path') + drv._resize_image_file = mock.Mock() + drv._register_image_in_cache = mock.Mock() + + copied = drv._copy_from_cache('cof_path', volume, image_id, + cache_result) + self.assertTrue(copied) + drv._get_ip_verify_on_cluster.assert_any_call('ip1') + drv._get_export_path.assert_called_with('vol_id') + drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1', + '/openstack/img-cache-imgid', + '/exp_path/name', + run_as_root=False, + check_exit_code=0) + drv.local_path.assert_called_with(volume) + drv._set_rw_permissions_for_all.assert_called_with('vol_path') + drv._resize_image_file.assert_called_with('vol_path', 1) + + def test_img_service_raw_copyoffload_workflow_success(self): + class img_info(object): + + def __init__(self): + self.file_format = 'raw' + + drv = self._driver + volume = {'id': 'vol_id', 'name': 'name', 'size': 1} + image_id = 'image_id' + context = object() + image_service = mock.Mock() + image_service.get_location = mock.Mock( + return_value=('nfs://ip1/openstack/img', None)) + drv._check_get_nfs_path_segs = mock.Mock(return_value= + ('ip1', '/openstack')) + + drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') + drv._get_host_ip = mock.Mock(return_value='ip2') + drv._get_export_path = mock.Mock(return_value='/exp_path') + drv._get_provider_location = mock.Mock(return_value='share') + drv._execute = mock.Mock() + drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') + img_inf = img_info() + image_utils.qemu_img_info = mock.Mock(return_value=img_inf) + drv._check_volume_can_hold_img = mock.Mock() + drv._set_rw_permissions_for_all = mock.Mock() + drv.local_path = mock.Mock(return_value='vol_path') + drv._resize_image_file = mock.Mock() + drv._move_nfs_file = mock.Mock(return_value=True) + drv._delete_file = mock.Mock() + + drv._copy_from_img_service('cof_path', context, volume, image_service, + image_id) + drv._get_ip_verify_on_cluster.assert_any_call('ip1') + drv._get_export_path.assert_called_with('vol_id') + drv._check_volume_can_hold_img.assert_called_with(volume, img_inf) + + assert drv._execute.call_count == 2 + drv.local_path.assert_called_with(volume) + drv._set_rw_permissions_for_all.assert_called_with('vol_path') + drv._resize_image_file.assert_called_with('vol_path', 1) + + def test_img_service_qcow2_copyoffload_workflow_success(self): + class img_info(object): + + def __init__(self): + self.file_format = 'qcow2' + + def __getattribute__(self, name): + if name == 'file_format': + if object.__getattribute__(self, name) == 'qcow2': + self.file_format = 'raw' + return 'qcow2' + else: + return 'raw' + + drv = self._driver + volume = {'id': 'vol_id', 'name': 'name', 'size': 1} + image_id = 'image_id' + context = object() + image_service = mock.Mock() + image_service.get_location = mock.Mock( + return_value=('nfs://ip1/openstack/img', None)) + os.path.exists = mock.Mock(return_value=True) + drv._check_get_nfs_path_segs = mock.Mock(return_value= + ('ip1', '/openstack')) + + drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') + drv._get_host_ip = mock.Mock(return_value='ip2') + drv._get_export_path = mock.Mock(return_value='/exp_path') + drv._get_provider_location = mock.Mock(return_value='share') + drv._execute = mock.Mock() + drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') + img_inf = img_info() + image_utils.qemu_img_info = mock.Mock(return_value=img_inf) + image_utils.convert_image = mock.Mock() + drv._check_volume_can_hold_img = mock.Mock() + drv._set_rw_permissions_for_all = mock.Mock() + drv.local_path = mock.Mock(return_value='vol_path') + drv._resize_image_file = mock.Mock() + drv._move_nfs_file = mock.Mock(return_value=True) + drv._delete_file = mock.Mock() + + drv._copy_from_img_service('cof_path', context, volume, image_service, + image_id) + drv._get_ip_verify_on_cluster.assert_any_call('ip1') + drv._get_export_path.assert_called_with('vol_id') + drv._check_volume_can_hold_img.assert_called_with(volume, img_inf) + assert image_utils.convert_image.call_count == 1 + LOG.info(_("call count %s"), drv._execute.call_count) + assert drv._execute.call_count == 2 + assert drv._delete_file.call_count == 2 + drv.local_path.assert_called_with(volume) + drv._set_rw_permissions_for_all.assert_called_with('vol_path') + drv._resize_image_file.assert_called_with('vol_path', 1) + + +class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase): + """Test direct NetApp C Mode driver.""" + def _custom_setup(self): + self._driver = netapp_nfs.NetAppDirect7modeNfsDriver( + configuration=create_configuration()) def _prepare_delete_snapshot_mock(self, snapshot_exists): drv = self._driver - mox = self._mox + mox = self.mox mox.StubOutWithMock(drv, '_get_provider_location') mox.StubOutWithMock(drv, '_volume_not_present') @@ -178,7 +1025,7 @@ def _prepare_delete_snapshot_mock(self, snapshot_exists): drv._get_provider_location(IgnoreArg()) drv._volume_not_present(IgnoreArg(), IgnoreArg())\ - .AndReturn(not snapshot_exists) + .AndReturn(not snapshot_exists) if snapshot_exists: drv._get_volume_path(IgnoreArg(), IgnoreArg()) @@ -188,73 +1035,110 @@ def _prepare_delete_snapshot_mock(self, snapshot_exists): return mox - def test_delete_existing_snapshot(self): + def test_check_for_setup_error_version(self): drv = self._driver - mox = self._prepare_delete_snapshot_mock(True) - - drv.delete_snapshot(FakeSnapshot()) - - mox.VerifyAll() + drv._client = api.NaServer("127.0.0.1") - def test_delete_missing_snapshot(self): - drv = self._driver - mox = self._prepare_delete_snapshot_mock(False) + # check exception raises when version not found + self.assertRaises(exception.VolumeBackendAPIException, + drv.check_for_setup_error) - drv.delete_snapshot(FakeSnapshot()) + drv._client.set_api_version(1, 8) - mox.VerifyAll() + # check exception raises when not supported version + self.assertRaises(exception.VolumeBackendAPIException, + drv.check_for_setup_error) - def _prepare_clone_mock(self, status): + def test_check_for_setup_error(self): + mox = self.mox drv = self._driver - mox = self._mox + drv._client = api.NaServer("127.0.0.1") + drv._client.set_api_version(1, 9) + required_flags = [ + 'netapp_transport_type', + 'netapp_login', + 'netapp_password', + 'netapp_server_hostname', + 'netapp_server_port'] - volume = FakeVolume() - setattr(volume, 'provider_location', '127.0.0.1:/nfs') + # set required flags + for flag in required_flags: + setattr(drv.configuration, flag, None) + # check exception raises when flags are not set + self.assertRaises(exception.CinderException, + drv.check_for_setup_error) - drv._client = MockObject(suds.client.Client) - drv._client.factory = MockObject(suds.client.Factory) - drv._client.service = MockObject(suds.client.ServiceSelector) + # set required flags + for flag in required_flags: + setattr(drv.configuration, flag, 'val') + + mox.ReplayAll() - # ApiProxy() method is generated by ServiceSelector at runtime from the - # XML, so mocking is impossible. - setattr(drv._client.service, - 'ApiProxy', - types.MethodType(lambda *args, **kwargs: FakeResponce(status), - suds.client.ServiceSelector)) - mox.StubOutWithMock(drv, '_get_host_id') - mox.StubOutWithMock(drv, '_get_full_export_path') + drv.check_for_setup_error() - drv._get_host_id(IgnoreArg()).AndReturn('10') - drv._get_full_export_path(IgnoreArg(), IgnoreArg()).AndReturn('/nfs') + mox.VerifyAll() - return mox + # restore initial FLAGS + for flag in required_flags: + delattr(drv.configuration, flag) - def test_successfull_clone_volume(self): + def test_do_setup(self): + mox = self.mox drv = self._driver - mox = self._prepare_clone_mock('passed') + mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup') + mox.StubOutWithMock(drv, '_get_client') + mox.StubOutWithMock(drv, '_do_custom_setup') + netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg()) + drv._get_client() + drv._do_custom_setup(IgnoreArg()) mox.ReplayAll() - volume_name = 'volume_name' - clone_name = 'clone_name' - volume_id = volume_name + str(hash(volume_name)) - - drv._clone_volume(volume_name, clone_name, volume_id) + drv.do_setup(IsA(context.RequestContext)) mox.VerifyAll() - def test_failed_clone_volume(self): + def _prepare_clone_mock(self, status): drv = self._driver - mox = self._prepare_clone_mock('failed') + mox = self.mox + + volume = FakeVolume() + setattr(volume, 'provider_location', '127.0.0.1:/nfs') + + mox.StubOutWithMock(drv, '_get_export_ip_path') + mox.StubOutWithMock(drv, '_get_actual_path_for_export') + mox.StubOutWithMock(drv, '_start_clone') + mox.StubOutWithMock(drv, '_wait_for_clone_finish') + if status == 'fail': + mox.StubOutWithMock(drv, '_clear_clone') + + drv._get_export_ip_path( + IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs')) + drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs') + drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2')) + if status == 'fail': + drv._wait_for_clone_finish('1', '2').AndRaise( + api.NaApiError('error', 'error')) + drv._clear_clone('1') + else: + drv._wait_for_clone_finish('1', '2') + return mox + + def test_clone_volume_clear(self): + drv = self._driver + mox = self._prepare_clone_mock('fail') mox.ReplayAll() volume_name = 'volume_name' clone_name = 'clone_name' volume_id = volume_name + str(hash(volume_name)) - - self.assertRaises(exception.CinderException, - drv._clone_volume, - volume_name, clone_name, volume_id) + try: + drv._clone_volume(volume_name, clone_name, volume_id) + except Exception as e: + if isinstance(e, api.NaApiError): + pass + else: + raise mox.VerifyAll() diff --git a/cinder/tests/test_netapp_ssc.py b/cinder/tests/test_netapp_ssc.py new file mode 100644 index 0000000000..99211269cc --- /dev/null +++ b/cinder/tests/test_netapp_ssc.py @@ -0,0 +1,542 @@ + +# Copyright (c) 2012 NetApp, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit tests for the NetApp-specific ssc module.""" + +import BaseHTTPServer +import copy +import httplib +from lxml import etree +from mox import IgnoreArg +import StringIO + +from cinder import exception +from cinder import test +from cinder.volume.drivers.netapp import api +from cinder.volume.drivers.netapp import ssc_utils + + +class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): + """HTTP handler that doesn't spam the log.""" + + def log_message(self, format, *args): + pass + + +class FakeHttplibSocket(object): + """A fake socket implementation for httplib.HTTPResponse.""" + def __init__(self, value): + self._rbuffer = StringIO.StringIO(value) + self._wbuffer = StringIO.StringIO('') + oldclose = self._wbuffer.close + + def newclose(): + self.result = self._wbuffer.getvalue() + oldclose() + self._wbuffer.close = newclose + + def makefile(self, mode, _other): + """Returns the socket's internal buffer""" + if mode == 'r' or mode == 'rb': + return self._rbuffer + if mode == 'w' or mode == 'wb': + return self._wbuffer + + +RESPONSE_PREFIX_DIRECT_CMODE = """ +""" + +RESPONSE_PREFIX_DIRECT = """ +""" + +RESPONSE_SUFFIX_DIRECT = """""" + + +class FakeDirectCMODEServerHandler(FakeHTTPRequestHandler): + """HTTP handler that fakes enough stuff to allow the driver to run.""" + + def do_GET(s): + """Respond to a GET request.""" + if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: + s.send_response(404) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "text/xml; charset=utf-8") + s.end_headers() + out = s.wfile + out.write('' + '') + + def do_POST(s): + """Respond to a POST request.""" + if '/servlets/netapp.servlets.admin.XMLrequest_filer' not in s.path: + s.send_response(404) + s.end_headers + return + request_xml = s.rfile.read(int(s.headers['Content-Length'])) + root = etree.fromstring(request_xml) + body = [x for x in root.iterchildren()] + request = body[0] + tag = request.tag + localname = etree.QName(tag).localname or tag + if 'volume-get-iter' == localname: + body = """ + + + iscsi + Openstack + aggr0 + + /iscsi + rw + + + 214748364 + 224748364 + enabled + file + + + true + + false + online + false + false + true + + + + + nfsvol + Openstack + + aggr0 + + /nfs + rw + + + 14748364 + 24748364 + enabled + + volume + + + true + + false + online + false + false + true + + + + + nfsvol2 + Openstack + + aggr0 + + /nfs2 + rw + + + 14748364 + 24748364 + enabled + + volume + + + true + + false + online + true + true + true + + + + + nfsvol3 + Openstack + + aggr0 + + /nfs3 + rw + + + enabled + + volume + + + + true + + false + online + false + false + true + + + + 4""" + elif 'aggr-options-list-info' == localname: + body = """ + + + ha_policy + cfo + + + raidtype + raid_dp + + + """ + elif 'sis-get-iter' == localname: + body = """ + + + /vol/iscsi + + true + + enabled + + + """ + elif 'storage-disk-get-iter' == localname: + body = """ + + + + SATA + + + + """ + else: + # Unknown API + s.send_response(500) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "text/xml; charset=utf-8") + s.end_headers() + s.wfile.write(RESPONSE_PREFIX_DIRECT_CMODE) + s.wfile.write(RESPONSE_PREFIX_DIRECT) + s.wfile.write(body) + s.wfile.write(RESPONSE_SUFFIX_DIRECT) + + +class FakeDirectCmodeHTTPConnection(object): + """A fake httplib.HTTPConnection for netapp tests. + + Requests made via this connection actually get translated and routed into + the fake direct handler above, we then turn the response into + the httplib.HTTPResponse that the caller expects. + """ + def __init__(self, host, timeout=None): + self.host = host + + def request(self, method, path, data=None, headers=None): + if not headers: + headers = {} + req_str = '%s %s HTTP/1.1\r\n' % (method, path) + for key, value in headers.iteritems(): + req_str += "%s: %s\r\n" % (key, value) + if data: + req_str += '\r\n%s' % data + + # NOTE(vish): normally the http transport normailizes from unicode + sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) + # NOTE(vish): stop the server from trying to look up address from + # the fake socket + FakeDirectCMODEServerHandler.address_string = lambda x: '127.0.0.1' + self.app = FakeDirectCMODEServerHandler(sock, '127.0.0.1:80', None) + + self.sock = FakeHttplibSocket(sock.result) + self.http_response = httplib.HTTPResponse(self.sock) + + def set_debuglevel(self, level): + pass + + def getresponse(self): + self.http_response.begin() + return self.http_response + + def getresponsebody(self): + return self.sock.result + + +def createNetAppVolume(**kwargs): + vol = ssc_utils.NetAppVolume(kwargs['name'], kwargs['vs']) + vol.state['vserver_root'] = kwargs.get('vs_root') + vol.state['status'] = kwargs.get('status') + vol.state['junction_active'] = kwargs.get('junc_active') + vol.space['size_avl_bytes'] = kwargs.get('avl_byt') + vol.space['size_total_bytes'] = kwargs.get('total_byt') + vol.space['space-guarantee-enabled'] = kwargs.get('sg_enabled') + vol.space['space-guarantee'] = kwargs.get('sg') + vol.space['thin_provisioned'] = kwargs.get('thin') + vol.mirror['mirrored'] = kwargs.get('mirrored') + vol.qos['qos_policy_group'] = kwargs.get('qos') + vol.aggr['name'] = kwargs.get('aggr_name') + vol.aggr['junction'] = kwargs.get('junction') + vol.sis['dedup'] = kwargs.get('dedup') + vol.sis['compression'] = kwargs.get('compression') + vol.aggr['raid_type'] = kwargs.get('raid') + vol.aggr['ha_policy'] = kwargs.get('ha') + vol.aggr['disk_type'] = kwargs.get('disk') + return vol + + +class SscUtilsTestCase(test.TestCase): + """Test ssc utis.""" + vol1 = createNetAppVolume(name='vola', vs='openstack', + vs_root=False, status='online', junc_active=True, + avl_byt='1000', total_byt='1500', + sg_enabled=False, + sg='file', thin=False, mirrored=False, + qos=None, aggr_name='aggr1', junction='/vola', + dedup=False, compression=False, + raid='raiddp', ha='cfo', disk='SSD') + + vol2 = createNetAppVolume(name='volb', vs='openstack', + vs_root=False, status='online', junc_active=True, + avl_byt='2000', total_byt='2500', + sg_enabled=True, + sg='file', thin=True, mirrored=False, + qos=None, aggr_name='aggr2', junction='/volb', + dedup=True, compression=False, + raid='raid4', ha='cfo', disk='SSD') + + vol3 = createNetAppVolume(name='volc', vs='openstack', + vs_root=False, status='online', junc_active=True, + avl_byt='3000', total_byt='3500', + sg_enabled=True, + sg='volume', thin=True, mirrored=False, + qos=None, aggr_name='aggr1', junction='/volc', + dedup=True, compression=True, + raid='raiddp', ha='cfo', disk='SAS') + + vol4 = createNetAppVolume(name='vold', vs='openstack', + vs_root=False, status='online', junc_active=True, + avl_byt='4000', total_byt='4500', + sg_enabled=False, + sg='none', thin=False, mirrored=False, + qos=None, aggr_name='aggr1', junction='/vold', + dedup=False, compression=False, + raid='raiddp', ha='cfo', disk='SSD') + + vol5 = createNetAppVolume(name='vole', vs='openstack', + vs_root=False, status='online', junc_active=True, + avl_byt='5000', total_byt='5500', + sg_enabled=True, + sg='none', thin=False, mirrored=True, + qos=None, aggr_name='aggr2', junction='/vole', + dedup=True, compression=False, + raid='raid4', ha='cfo', disk='SAS') + + def setUp(self): + super(SscUtilsTestCase, self).setUp() + self.stubs.Set(httplib, 'HTTPConnection', + FakeDirectCmodeHTTPConnection) + + def test_cl_vols_ssc_all(self): + """Test cluster ssc for all vols.""" + na_server = api.NaServer('127.0.0.1') + vserver = 'openstack' + test_vols = set([copy.deepcopy(self.vol1), + copy.deepcopy(self.vol2), copy.deepcopy(self.vol3)]) + sis = {'vola': {'dedup': False, 'compression': False}, + 'volb': {'dedup': True, 'compression': False}} + mirrored = {'vola': [{'dest_loc': 'openstack1:vol1', + 'rel_type': 'data_protection', + 'mirr_state': 'broken'}, + {'dest_loc': 'openstack2:vol2', + 'rel_type': 'data_protection', + 'mirr_state': 'snapmirrored'}], + 'volb': [{'dest_loc': 'openstack1:vol2', + 'rel_type': 'data_protection', + 'mirr_state': 'broken'}]} + + self.mox.StubOutWithMock(ssc_utils, 'query_cluster_vols_for_ssc') + self.mox.StubOutWithMock(ssc_utils, 'get_sis_vol_dict') + self.mox.StubOutWithMock(ssc_utils, 'get_snapmirror_vol_dict') + self.mox.StubOutWithMock(ssc_utils, 'query_aggr_options') + self.mox.StubOutWithMock(ssc_utils, 'query_aggr_storage_disk') + ssc_utils.query_cluster_vols_for_ssc( + na_server, vserver, None).AndReturn(test_vols) + ssc_utils.get_sis_vol_dict(na_server, vserver, None).AndReturn(sis) + ssc_utils.get_snapmirror_vol_dict(na_server, vserver, None).AndReturn( + mirrored) + raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'} + ssc_utils.query_aggr_options( + na_server, IgnoreArg()).AndReturn(raiddp) + ssc_utils.query_aggr_storage_disk( + na_server, IgnoreArg()).AndReturn('SSD') + raid4 = {'ha_policy': 'cfo', 'raid_type': 'raid4'} + ssc_utils.query_aggr_options( + na_server, IgnoreArg()).AndReturn(raid4) + ssc_utils.query_aggr_storage_disk( + na_server, IgnoreArg()).AndReturn('SAS') + self.mox.ReplayAll() + + res_vols = ssc_utils.get_cluster_vols_with_ssc( + na_server, vserver, volume=None) + + self.mox.VerifyAll() + for vol in res_vols: + if vol.id['name'] == 'volc': + self.assertEqual(vol.sis['compression'], False) + self.assertEqual(vol.sis['dedup'], False) + else: + pass + + def test_cl_vols_ssc_single(self): + """Test cluster ssc for single vol.""" + na_server = api.NaServer('127.0.0.1') + vserver = 'openstack' + test_vols = set([copy.deepcopy(self.vol1)]) + sis = {'vola': {'dedup': False, 'compression': False}} + mirrored = {'vola': [{'dest_loc': 'openstack1:vol1', + 'rel_type': 'data_protection', + 'mirr_state': 'broken'}, + {'dest_loc': 'openstack2:vol2', + 'rel_type': 'data_protection', + 'mirr_state': 'snapmirrored'}]} + + self.mox.StubOutWithMock(ssc_utils, 'query_cluster_vols_for_ssc') + self.mox.StubOutWithMock(ssc_utils, 'get_sis_vol_dict') + self.mox.StubOutWithMock(ssc_utils, 'get_snapmirror_vol_dict') + self.mox.StubOutWithMock(ssc_utils, 'query_aggr_options') + self.mox.StubOutWithMock(ssc_utils, 'query_aggr_storage_disk') + ssc_utils.query_cluster_vols_for_ssc( + na_server, vserver, 'vola').AndReturn(test_vols) + ssc_utils.get_sis_vol_dict( + na_server, vserver, 'vola').AndReturn(sis) + ssc_utils.get_snapmirror_vol_dict( + na_server, vserver, 'vola').AndReturn(mirrored) + raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'} + ssc_utils.query_aggr_options( + na_server, 'aggr1').AndReturn(raiddp) + ssc_utils.query_aggr_storage_disk(na_server, 'aggr1').AndReturn('SSD') + self.mox.ReplayAll() + + res_vols = ssc_utils.get_cluster_vols_with_ssc( + na_server, vserver, volume='vola') + + self.mox.VerifyAll() + self.assertEqual(len(res_vols), 1) + + def test_get_cluster_ssc(self): + """Test get cluster ssc map.""" + na_server = api.NaServer('127.0.0.1') + vserver = 'openstack' + test_vols = set( + [self.vol1, self.vol2, self.vol3, self.vol4, self.vol5]) + + self.mox.StubOutWithMock(ssc_utils, 'get_cluster_vols_with_ssc') + ssc_utils.get_cluster_vols_with_ssc( + na_server, vserver).AndReturn(test_vols) + self.mox.ReplayAll() + + res_map = ssc_utils.get_cluster_ssc(na_server, vserver) + + self.mox.VerifyAll() + self.assertEqual(len(res_map['mirrored']), 1) + self.assertEqual(len(res_map['dedup']), 3) + self.assertEqual(len(res_map['compression']), 1) + self.assertEqual(len(res_map['thin']), 2) + self.assertEqual(len(res_map['all']), 5) + + def test_vols_for_boolean_specs(self): + """Test ssc for boolean specs.""" + test_vols = set( + [self.vol1, self.vol2, self.vol3, self.vol4, self.vol5]) + ssc_map = {'mirrored': set([self.vol1]), + 'dedup': set([self.vol1, self.vol2, self.vol3]), + 'compression': set([self.vol3, self.vol4]), + 'thin': set([self.vol5, self.vol2]), 'all': test_vols} + test_map = {'mirrored': ('netapp_mirrored', 'netapp_unmirrored'), + 'dedup': ('netapp_dedup', 'netapp_nodedup'), + 'compression': ('netapp_compression', + 'netapp_nocompression'), + 'thin': ('netapp_thin_provisioned', + 'netapp_thick_provisioned')} + for type in test_map.keys(): + # type + extra_specs = {test_map[type][0]: 'true'} + res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs) + self.assertEqual(len(res), len(ssc_map[type])) + # opposite type + extra_specs = {test_map[type][1]: 'true'} + res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs) + self.assertEqual(len(res), len(ssc_map['all'] - ssc_map[type])) + # both types + extra_specs =\ + {test_map[type][0]: 'true', test_map[type][1]: 'true'} + res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs) + self.assertEqual(len(res), len(ssc_map['all'])) + + def test_vols_for_optional_specs(self): + """Test ssc for optional specs.""" + test_vols =\ + set([self.vol1, self.vol2, self.vol3, self.vol4, self.vol5]) + ssc_map = {'mirrored': set([self.vol1]), + 'dedup': set([self.vol1, self.vol2, self.vol3]), + 'compression': set([self.vol3, self.vol4]), + 'thin': set([self.vol5, self.vol2]), 'all': test_vols} + extra_specs =\ + {'netapp_dedup': 'true', + 'netapp:raid_type': 'raid4', 'netapp:disk_type': 'SSD'} + res = ssc_utils.get_volumes_for_specs(ssc_map, extra_specs) + self.assertEqual(len(res), 1) + + def test_query_cl_vols_for_ssc(self): + na_server = api.NaServer('127.0.0.1') + na_server.set_api_version(1, 15) + vols = ssc_utils.query_cluster_vols_for_ssc(na_server, 'Openstack') + self.assertEqual(len(vols), 2) + for vol in vols: + if vol.id['name'] != 'iscsi' or vol.id['name'] != 'nfsvol': + pass + else: + raise exception.InvalidVolume('Invalid volume returned.') + + def test_query_aggr_options(self): + na_server = api.NaServer('127.0.0.1') + aggr_attribs = ssc_utils.query_aggr_options(na_server, 'aggr0') + if aggr_attribs: + self.assertEqual(aggr_attribs['ha_policy'], 'cfo') + self.assertEqual(aggr_attribs['raid_type'], 'raid_dp') + else: + raise exception.InvalidParameterValue("Incorrect aggr options") + + def test_query_aggr_storage_disk(self): + na_server = api.NaServer('127.0.0.1') + eff_disk_type = ssc_utils.query_aggr_storage_disk(na_server, 'aggr0') + self.assertEqual(eff_disk_type, 'SATA') diff --git a/cinder/tests/test_nexenta.py b/cinder/tests/test_nexenta.py index 5a33318b1c..bc3da91e3a 100644 --- a/cinder/tests/test_nexenta.py +++ b/cinder/tests/test_nexenta.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2011 Nexenta Systems, Inc. # All Rights Reserved. @@ -22,26 +20,35 @@ import base64 import urllib2 -import cinder.flags -import cinder.test -from cinder.volume import nexenta -from cinder.volume.nexenta import volume -from cinder.volume.nexenta import jsonrpc +import mox as mox_lib -FLAGS = cinder.flags.FLAGS +from cinder import context +from cinder import db +from cinder import test +from cinder import units +from cinder.volume import configuration as conf +from cinder.volume.drivers import nexenta +from cinder.volume.drivers.nexenta import iscsi +from cinder.volume.drivers.nexenta import jsonrpc +from cinder.volume.drivers.nexenta import nfs +from cinder.volume.drivers.nexenta import utils -class TestNexentaDriver(cinder.test.TestCase): +class TestNexentaISCSIDriver(test.TestCase): TEST_VOLUME_NAME = 'volume1' TEST_VOLUME_NAME2 = 'volume2' TEST_SNAPSHOT_NAME = 'snapshot1' TEST_VOLUME_REF = { 'name': TEST_VOLUME_NAME, 'size': 1, + 'id': '1', + 'status': 'available' } TEST_VOLUME_REF2 = { 'name': TEST_VOLUME_NAME2, 'size': 1, + 'id': '2', + 'status': 'in-use' } TEST_SNAPSHOT_REF = { 'name': TEST_SNAPSHOT_NAME, @@ -49,25 +56,32 @@ class TestNexentaDriver(cinder.test.TestCase): } def __init__(self, method): - super(TestNexentaDriver, self).__init__(method) + super(TestNexentaISCSIDriver, self).__init__(method) def setUp(self): - super(TestNexentaDriver, self).setUp() - self.flags( - nexenta_host='1.1.1.1', - nexenta_volume='cinder', - nexenta_target_prefix='iqn:', - nexenta_target_group_prefix='cinder/', - nexenta_blocksize='8K', - nexenta_sparse=True, - ) + super(TestNexentaISCSIDriver, self).setUp() + self.configuration = mox_lib.MockObject(conf.Configuration) + self.configuration.nexenta_host = '1.1.1.1' + self.configuration.nexenta_user = 'admin' + self.configuration.nexenta_password = 'nexenta' + self.configuration.nexenta_volume = 'cinder' + self.configuration.nexenta_rest_port = 2000 + self.configuration.nexenta_rest_protocol = 'http' + self.configuration.nexenta_iscsi_target_portal_port = 3260 + self.configuration.nexenta_target_prefix = 'iqn:' + self.configuration.nexenta_target_group_prefix = 'cinder/' + self.configuration.nexenta_blocksize = '8K' + self.configuration.nexenta_sparse = True + self.configuration.nexenta_rrmgr_compression = 1 + self.configuration.nexenta_rrmgr_tcp_buf_size = 1024 + self.configuration.nexenta_rrmgr_connections = 2 self.nms_mock = self.mox.CreateMockAnything() - for mod in ['volume', 'zvol', 'iscsitarget', + for mod in ['volume', 'zvol', 'iscsitarget', 'appliance', 'stmf', 'scsidisk', 'snapshot']: setattr(self.nms_mock, mod, self.mox.CreateMockAnything()) self.stubs.Set(jsonrpc, 'NexentaJSONProxy', lambda *_, **__: self.nms_mock) - self.drv = volume.NexentaDriver() + self.drv = iscsi.NexentaISCSIDriver(configuration=self.configuration) self.drv.do_setup({}) def test_setup_error(self): @@ -85,13 +99,96 @@ def test_local_path(self): def test_create_volume(self): self.nms_mock.zvol.create('cinder/volume1', '1G', '8K', True) + self.nms_mock.stmf.list_targets() + self.nms_mock.iscsitarget.create_target({'target_name': 'iqn:volume1'}) + self.nms_mock.stmf.list_targetgroups() + self.nms_mock.stmf.create_targetgroup('cinder/volume1') + self.nms_mock.stmf.list_targetgroup_members('cinder/volume1') + self.nms_mock.stmf.add_targetgroup_member('cinder/volume1', + 'iqn:volume1') + self.nms_mock.scsidisk.lu_exists('cinder/volume1') + self.nms_mock.scsidisk.create_lu('cinder/volume1', {}) + self.nms_mock.scsidisk.lu_shared('cinder/volume1') + self.nms_mock.scsidisk.add_lun_mapping_entry( + 'cinder/volume1', {'target_group': 'cinder/volume1', 'lun': '0'}) self.mox.ReplayAll() self.drv.create_volume(self.TEST_VOLUME_REF) def test_delete_volume(self): + self.nms_mock.zvol.get_child_props('cinder/volume1', + 'origin').AndReturn({}) + self.nms_mock.zvol.destroy('cinder/volume1', '') + self.mox.ReplayAll() + self.drv.delete_volume(self.TEST_VOLUME_REF) + self.mox.ResetAll() + + c = self.nms_mock.zvol.get_child_props('cinder/volume1', 'origin') + c.AndReturn({'origin': 'cinder/volume0@snapshot'}) + self.nms_mock.zvol.destroy('cinder/volume1', '') + self.mox.ReplayAll() + self.drv.delete_volume(self.TEST_VOLUME_REF) + self.mox.ResetAll() + + c = self.nms_mock.zvol.get_child_props('cinder/volume1', 'origin') + c.AndReturn({'origin': 'cinder/volume0@cinder-clone-snapshot-1'}) self.nms_mock.zvol.destroy('cinder/volume1', '') + self.nms_mock.snapshot.destroy( + 'cinder/volume0@cinder-clone-snapshot-1', '') self.mox.ReplayAll() self.drv.delete_volume(self.TEST_VOLUME_REF) + self.mox.ResetAll() + + def test_create_cloned_volume(self): + vol = self.TEST_VOLUME_REF2 + src_vref = self.TEST_VOLUME_REF + snapshot = { + 'volume_name': src_vref['name'], + 'name': 'cinder-clone-snapshot-%s' % vol['id'], + } + self.nms_mock.zvol.create_snapshot('cinder/%s' % src_vref['name'], + snapshot['name'], '') + self.nms_mock.zvol.clone('cinder/%s@%s' % (src_vref['name'], + snapshot['name']), + 'cinder/%s' % vol['name']) + self.mox.ReplayAll() + self.drv.create_cloned_volume(vol, src_vref) + + def test_migrate_volume(self): + volume = self.TEST_VOLUME_REF + host = { + 'capabilities': { + 'vendor_name': 'Nexenta', + 'location_info': 'NexentaISCSIDriver:1.1.1.1:cinder', + 'free_capacity_gb': 1 + } + } + snapshot = { + 'volume_name': volume['name'], + 'name': 'cinder-migrate-snapshot-%s' % volume['id'], + } + self.nms_mock.appliance.ssh_list_bindings().AndReturn([]) + self.nms_mock.zvol.create_snapshot('cinder/%s' % volume['name'], + snapshot['name'], '') + + src = '%(volume)s/%(zvol)s@%(snapshot)s' % { + 'volume': 'cinder', + 'zvol': volume['name'], + 'snapshot': snapshot['name']} + dst = '1.1.1.1:cinder' + cmd = ' '.join(['rrmgr -s zfs -c 1 -q -e -w 1024 -n 2', src, dst]) + + self.nms_mock.appliance.execute(cmd) + + self.nms_mock.snapshot.destroy('cinder/%(volume)s@%(snapshot)s' % { + 'volume': volume['name'], + 'snapshot': snapshot['name']}, '') + volume_name = 'cinder/%s' % volume['name'] + self.nms_mock.zvol.get_child_props(volume_name, + 'origin').AndReturn(None) + self.nms_mock.zvol.destroy(volume_name, '') + + self.mox.ReplayAll() + self.drv.migrate_volume(None, volume, host) def test_create_snapshot(self): self.nms_mock.zvol.create_snapshot('cinder/volume1', 'snapshot1', '') @@ -108,39 +205,53 @@ def test_delete_snapshot(self): self.nms_mock.snapshot.destroy('cinder/volume1@snapshot1', '') self.mox.ReplayAll() self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) + self.mox.ResetAll() + + # Check that exception not raised if snapshot does not exist + mock = self.nms_mock.snapshot.destroy('cinder/volume1@snapshot1', '') + mock.AndRaise(nexenta.NexentaException( + 'Snapshot cinder/volume1@snapshot1 does not exist')) + self.mox.ReplayAll() + self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) _CREATE_EXPORT_METHODS = [ + ('stmf', 'list_targets', tuple(), [], False, ), ('iscsitarget', 'create_target', ({'target_name': 'iqn:volume1'},), u'Unable to create iscsi target\n' u' iSCSI target iqn.1986-03.com.sun:02:cinder-volume1 already' - u' configured\n' - u' itadm create-target failed with error 17\n', - ), + u' configured\n' + u' itadm create-target failed with error 17\n', True, ), + ('stmf', 'list_targetgroups', tuple(), [], False, ), ('stmf', 'create_targetgroup', ('cinder/volume1',), u'Unable to create targetgroup: stmfadm: cinder/volume1:' - u' already exists\n', - ), + u' already exists\n', True, ), + ('stmf', 'list_targetgroup_members', ('cinder/volume1', ), [], + False, ), ('stmf', 'add_targetgroup_member', ('cinder/volume1', 'iqn:volume1'), u'Unable to add member to targetgroup: stmfadm:' - u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n', - ), + u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n', + True, ), + ('scsidisk', 'lu_exists', ('cinder/volume1', ), 0, False, ), ('scsidisk', 'create_lu', ('cinder/volume1', {}), u"Unable to create lu with zvol 'cinder/volume1':\n" u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n", - ), + True, ), + ('scsidisk', 'lu_shared', ('cinder/volume1', ), 0, False, ), ('scsidisk', 'add_lun_mapping_entry', ('cinder/volume1', { - 'target_group': 'cinder/volume1', 'lun': '0'}), + 'target_group': 'cinder/volume1', 'lun': '0'}), u"Unable to add view to zvol 'cinder/volume1' (LUNs in use: ):\n" - u" stmfadm: view entry exists\n", - ), + u" stmfadm: view entry exists\n", True, ), ] - def _stub_export_method(self, module, method, args, error, fail=False): + def _stub_export_method(self, module, method, args, error, raise_exception, + fail=False): m = getattr(self.nms_mock, module) m = getattr(m, method) mock = m(*args) - if fail: + if raise_exception and fail: mock.AndRaise(nexenta.NexentaException(error)) + else: + mock.AndReturn(error) def _stub_all_export_methods(self, fail=False): for params in self._CREATE_EXPORT_METHODS: @@ -150,12 +261,13 @@ def test_create_export(self): self._stub_all_export_methods() self.mox.ReplayAll() retval = self.drv.create_export({}, self.TEST_VOLUME_REF) - self.assertEquals(retval, - {'provider_location': - '%s:%s,1 %s%s' % (FLAGS.nexenta_host, - FLAGS.nexenta_iscsi_target_portal_port, - FLAGS.nexenta_target_prefix, - self.TEST_VOLUME_NAME)}) + location = '%(host)s:%(port)s,1 %(prefix)s%(volume)s 0' % { + 'host': self.configuration.nexenta_host, + 'port': self.configuration.nexenta_iscsi_target_portal_port, + 'prefix': self.configuration.nexenta_target_prefix, + 'volume': self.TEST_VOLUME_NAME + } + self.assertEqual(retval, {'provider_location': location}) def __get_test(i): def _test_create_export_fail(self): @@ -165,11 +277,14 @@ def _test_create_export_fail(self): fail=True) self.mox.ReplayAll() self.assertRaises(nexenta.NexentaException, - self.drv.create_export, {}, self.TEST_VOLUME_REF) + self.drv.create_export, + {}, + self.TEST_VOLUME_REF) return _test_create_export_fail for i in range(len(_CREATE_EXPORT_METHODS)): - locals()['test_create_export_fail_%d' % i] = __get_test(i) + if i % 2: + locals()['test_create_export_fail_%d' % i] = __get_test(i) def test_ensure_export(self): self._stub_all_export_methods(fail=True) @@ -185,8 +300,8 @@ def test_remove_export(self): def test_remove_export_fail_0(self): self.nms_mock.scsidisk.delete_lu('cinder/volume1') - self.nms_mock.stmf.destroy_targetgroup('cinder/volume1').AndRaise( - nexenta.NexentaException()) + self.nms_mock.stmf.destroy_targetgroup( + 'cinder/volume1').AndRaise(nexenta.NexentaException()) self.nms_mock.iscsitarget.delete_target('iqn:volume1') self.mox.ReplayAll() self.drv.remove_export({}, self.TEST_VOLUME_REF) @@ -194,26 +309,45 @@ def test_remove_export_fail_0(self): def test_remove_export_fail_1(self): self.nms_mock.scsidisk.delete_lu('cinder/volume1') self.nms_mock.stmf.destroy_targetgroup('cinder/volume1') - self.nms_mock.iscsitarget.delete_target('iqn:volume1').AndRaise( - nexenta.NexentaException()) + self.nms_mock.iscsitarget.delete_target( + 'iqn:volume1').AndRaise(nexenta.NexentaException()) self.mox.ReplayAll() self.drv.remove_export({}, self.TEST_VOLUME_REF) - -class TestNexentaJSONRPC(cinder.test.TestCase): - URL = 'http://example.com/' - URL_S = 'https://example.com/' + def test_get_volume_stats(self): + stats = {'size': '5368709120G', + 'used': '5368709120G', + 'available': '5368709120G', + 'health': 'ONLINE'} + self.nms_mock.volume.get_child_props( + self.configuration.nexenta_volume, + 'health|size|used|available').AndReturn(stats) + self.mox.ReplayAll() + stats = self.drv.get_volume_stats(True) + self.assertEqual(stats['storage_protocol'], 'iSCSI') + self.assertEqual(stats['total_capacity_gb'], 5368709120.0) + self.assertEqual(stats['free_capacity_gb'], 5368709120.0) + self.assertEqual(stats['reserved_percentage'], 0) + self.assertEqual(stats['QoS_support'], False) + + +class TestNexentaJSONRPC(test.TestCase): + HOST = 'example.com' + URL = 'http://%s/' % HOST + URL_S = 'https://%s/' % HOST USER = 'user' PASSWORD = 'password' - HEADERS = {'Authorization': 'Basic %s' % (base64.b64encode( - ':'.join((USER, PASSWORD))),), - 'Content-Type': 'application/json'} + HEADERS = { + 'Authorization': + 'Basic %s' % base64.b64encode('%s:%s' % (USER, PASSWORD)), + 'Content-Type': 'application/json' + } REQUEST = 'the request' def setUp(self): super(TestNexentaJSONRPC, self).setUp() self.proxy = jsonrpc.NexentaJSONProxy( - self.URL, self.USER, self.PASSWORD, auto=True) + 'http', self.HOST, 2000, '/', self.USER, self.PASSWORD, auto=True) self.mox.StubOutWithMock(urllib2, 'Request', True) self.mox.StubOutWithMock(urllib2, 'urlopen') self.resp_mock = self.mox.CreateMockAnything() @@ -222,47 +356,52 @@ def setUp(self): urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock) def test_call(self): - urllib2.Request(self.URL, - '{"object": null, "params": ["arg1", "arg2"], "method": null}', - self.HEADERS).AndReturn(self.REQUEST) + urllib2.Request( + 'http://%s:2000/' % self.HOST, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) self.resp_info_mock.status = '' self.resp_mock.read().AndReturn( - '{"error": null, "result": "the result"}') + '{"error": null, "result": "the result"}') self.mox.ReplayAll() result = self.proxy('arg1', 'arg2') - self.assertEquals("the result", result) + self.assertEqual("the result", result) def test_call_deep(self): - urllib2.Request(self.URL, - '{"object": "obj1.subobj", "params": ["arg1", "arg2"],' - ' "method": "meth"}', - self.HEADERS).AndReturn(self.REQUEST) + urllib2.Request( + 'http://%s:2000/' % self.HOST, + '{"object": "obj1.subobj", "params": ["arg1", "arg2"],' + ' "method": "meth"}', + self.HEADERS).AndReturn(self.REQUEST) self.resp_info_mock.status = '' self.resp_mock.read().AndReturn( '{"error": null, "result": "the result"}') self.mox.ReplayAll() result = self.proxy.obj1.subobj.meth('arg1', 'arg2') - self.assertEquals("the result", result) + self.assertEqual("the result", result) def test_call_auto(self): - urllib2.Request(self.URL, - '{"object": null, "params": ["arg1", "arg2"], "method": null}', - self.HEADERS).AndReturn(self.REQUEST) - urllib2.Request(self.URL_S, - '{"object": null, "params": ["arg1", "arg2"], "method": null}', - self.HEADERS).AndReturn(self.REQUEST) + urllib2.Request( + 'http://%s:2000/' % self.HOST, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) + urllib2.Request( + 'https://%s:2000/' % self.HOST, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) self.resp_info_mock.status = 'EOF in headers' self.resp_mock.read().AndReturn( '{"error": null, "result": "the result"}') urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock) self.mox.ReplayAll() result = self.proxy('arg1', 'arg2') - self.assertEquals("the result", result) + self.assertEqual("the result", result) def test_call_error(self): - urllib2.Request(self.URL, - '{"object": null, "params": ["arg1", "arg2"], "method": null}', - self.HEADERS).AndReturn(self.REQUEST) + urllib2.Request( + 'http://%s:2000/' % self.HOST, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) self.resp_info_mock.status = '' self.resp_mock.read().AndReturn( '{"error": {"message": "the error"}, "result": "the result"}') @@ -271,11 +410,367 @@ def test_call_error(self): self.proxy, 'arg1', 'arg2') def test_call_fail(self): - urllib2.Request(self.URL, - '{"object": null, "params": ["arg1", "arg2"], "method": null}', - self.HEADERS).AndReturn(self.REQUEST) + urllib2.Request( + 'http://%s:2000/' % self.HOST, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) self.resp_info_mock.status = 'EOF in headers' self.proxy.auto = False self.mox.ReplayAll() self.assertRaises(jsonrpc.NexentaJSONException, self.proxy, 'arg1', 'arg2') + + +class TestNexentaNfsDriver(test.TestCase): + TEST_EXPORT1 = 'host1:/volumes/stack/share' + TEST_NMS1 = 'http://admin:nexenta@host1:2000' + + TEST_EXPORT2 = 'host2:/volumes/stack/share' + TEST_NMS2 = 'http://admin:nexenta@host2:2000' + + TEST_EXPORT2_OPTIONS = '-o intr' + + TEST_FILE_NAME = 'test.txt' + TEST_SHARES_CONFIG_FILE = '/etc/cinder/nexenta-shares.conf' + + TEST_SHARE_SVC = 'svc:/network/nfs/server:default' + + TEST_SHARE_OPTS = { + 'read_only': '', + 'read_write': '*', + 'recursive': 'true', + 'anonymous_rw': 'true', + 'extra_options': 'anon=0', + 'root': 'nobody' + } + + def _create_volume_db_entry(self): + vol = { + 'id': '1', + 'size': 1, + 'status': 'available', + 'provider_location': self.TEST_EXPORT1 + } + return db.volume_create(self.ctxt, vol)['id'] + + def setUp(self): + super(TestNexentaNfsDriver, self).setUp() + self.ctxt = context.get_admin_context() + self.configuration = mox_lib.MockObject(conf.Configuration) + self.configuration.nexenta_shares_config = None + self.configuration.nexenta_mount_point_base = '$state_path/mnt' + self.configuration.nexenta_sparsed_volumes = True + self.configuration.nexenta_volume_compression = 'on' + self.configuration.nfs_mount_point_base = '/mnt/test' + self.configuration.nfs_mount_options = None + self.configuration.nexenta_nms_cache_volroot = False + self.nms_mock = self.mox.CreateMockAnything() + for mod in ('appliance', 'folder', 'server', 'volume', 'netstorsvc', + 'snapshot'): + setattr(self.nms_mock, mod, self.mox.CreateMockAnything()) + self.nms_mock.__hash__ = lambda *_, **__: 1 + self.stubs.Set(jsonrpc, 'NexentaJSONProxy', + lambda *_, **__: self.nms_mock) + self.drv = nfs.NexentaNfsDriver(configuration=self.configuration) + self.drv.shares = {} + self.drv.share2nms = {} + + def test_check_for_setup_error(self): + self.drv.share2nms = { + 'host1:/volumes/stack/share': self.nms_mock + } + + self.nms_mock.server.get_prop('volroot').AndReturn('/volumes') + self.nms_mock.volume.object_exists('stack').AndReturn(True) + self.nms_mock.folder.object_exists('stack/share').AndReturn(True) + share_opts = { + 'read_write': '*', + 'read_only': '', + 'root': 'nobody', + 'extra_options': 'anon=0', + 'recursive': 'true', + 'anonymous_rw': 'true', + } + self.nms_mock.netstorsvc.share_folder( + 'svc:/network/nfs/server:default', 'stack/share', share_opts) + + self.mox.ReplayAll() + + self.drv.check_for_setup_error() + + self.mox.ResetAll() + + self.nms_mock.server.get_prop('volroot').AndReturn('/volumes') + self.nms_mock.volume.object_exists('stack').AndReturn(False) + + self.mox.ReplayAll() + + self.assertRaises(LookupError, self.drv.check_for_setup_error) + + self.mox.ResetAll() + + self.nms_mock.server.get_prop('volroot').AndReturn('/volumes') + self.nms_mock.volume.object_exists('stack').AndReturn(True) + self.nms_mock.folder.object_exists('stack/share').AndReturn(False) + + self.mox.ReplayAll() + + self.assertRaises(LookupError, self.drv.check_for_setup_error) + + def test_initialize_connection(self): + self.drv.shares = { + self.TEST_EXPORT1: None + } + volume = { + 'provider_location': self.TEST_EXPORT1, + 'name': 'volume' + } + result = self.drv.initialize_connection(volume, None) + self.assertEqual(result['data']['export'], + '%s/volume' % self.TEST_EXPORT1) + + def test_do_create_volume(self): + volume = { + 'provider_location': self.TEST_EXPORT1, + 'size': 1, + 'name': 'volume-1' + } + self.drv.shares = {self.TEST_EXPORT1: None} + self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} + + compression = self.configuration.nexenta_volume_compression + self.nms_mock.server.get_prop('volroot').AndReturn('/volumes') + self.nms_mock.folder.create_with_props( + 'stack', 'share/volume-1', {'compression': compression}) + self.nms_mock.netstorsvc.share_folder(self.TEST_SHARE_SVC, + 'stack/share/volume-1', + self.TEST_SHARE_OPTS) + self.nms_mock.appliance.execute( + 'dd if=/dev/zero of=/volumes/stack/share/volume-1/volume bs=1M ' + 'count=0 seek=1024' + ) + self.nms_mock.appliance.execute('chmod ugo+rw ' + '/volumes/stack/share/volume-1/volume') + + self.mox.ReplayAll() + + self.drv._do_create_volume(volume) + + self.mox.ResetAll() + + self.nms_mock.server.get_prop('volroot').AndReturn('/volumes') + self.nms_mock.folder.create_with_props( + 'stack', 'share/volume-1', {'compression': compression}) + self.nms_mock.netstorsvc.share_folder( + self.TEST_SHARE_SVC, 'stack/share/volume-1', + self.TEST_SHARE_OPTS).AndRaise(nexenta.NexentaException('-')) + self.nms_mock.folder.destroy('stack/share/volume-1') + + self.mox.ReplayAll() + + self.assertRaises(nexenta.NexentaException, self.drv._do_create_volume, + volume) + + def test_create_sparsed_file(self): + self.nms_mock.appliance.execute('dd if=/dev/zero of=/tmp/path bs=1M ' + 'count=0 seek=1024') + self.mox.ReplayAll() + + self.drv._create_sparsed_file(self.nms_mock, '/tmp/path', 1) + + def test_create_regular_file(self): + self.nms_mock.appliance.execute('dd if=/dev/zero of=/tmp/path bs=1M ' + 'count=1024') + self.mox.ReplayAll() + + self.drv._create_regular_file(self.nms_mock, '/tmp/path', 1) + + def test_set_rw_permissions_for_all(self): + path = '/tmp/path' + self.nms_mock.appliance.execute('chmod ugo+rw %s' % path) + self.mox.ReplayAll() + + self.drv._set_rw_permissions_for_all(self.nms_mock, path) + + def test_local_path(self): + volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'} + path = self.drv.local_path(volume) + self.assertEqual( + path, + '$state_path/mnt/b3f660847a52b29ac330d8555e4ad669/volume-1/volume' + ) + + def test_remote_path(self): + volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'} + path = self.drv.remote_path(volume) + self.assertEqual(path, '/volumes/stack/share/volume-1/volume') + + def test_share_folder(self): + path = 'stack/share/folder' + self.nms_mock.netstorsvc.share_folder(self.TEST_SHARE_SVC, path, + self.TEST_SHARE_OPTS) + self.mox.ReplayAll() + + self.drv._share_folder(self.nms_mock, 'stack', 'share/folder') + + def test_load_shares_config(self): + self.drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE + + self.mox.StubOutWithMock(self.drv, '_read_config_file') + config_data = [ + '%s %s' % (self.TEST_EXPORT1, self.TEST_NMS1), + '# %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2), + '', + '%s %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2, + self.TEST_EXPORT2_OPTIONS) + ] + + self.drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\ + AndReturn(config_data) + self.mox.ReplayAll() + + self.drv._load_shares_config(self.drv.configuration.nfs_shares_config) + + self.assertIn(self.TEST_EXPORT1, self.drv.shares) + self.assertIn(self.TEST_EXPORT2, self.drv.shares) + self.assertEqual(len(self.drv.shares), 2) + + self.assertIn(self.TEST_EXPORT1, self.drv.share2nms) + self.assertIn(self.TEST_EXPORT2, self.drv.share2nms) + self.assertEqual(len(self.drv.share2nms.keys()), 2) + + self.assertEqual(self.drv.shares[self.TEST_EXPORT2], + self.TEST_EXPORT2_OPTIONS) + + self.mox.VerifyAll() + + def test_get_capacity_info(self): + self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} + self.nms_mock.server.get_prop('volroot').AndReturn('/volumes') + self.nms_mock.folder.get_child_props('stack/share', '').AndReturn({ + 'available': '1G', + 'used': '2G' + }) + self.mox.ReplayAll() + total, free, allocated = self.drv._get_capacity_info(self.TEST_EXPORT1) + + self.assertEqual(total, 3 * units.GiB) + self.assertEqual(free, units.GiB) + self.assertEqual(allocated, 2 * units.GiB) + + def test_get_share_datasets(self): + self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} + self.nms_mock.server.get_prop('volroot').AndReturn('/volumes') + self.mox.ReplayAll() + + volume_name, folder_name = \ + self.drv._get_share_datasets(self.TEST_EXPORT1) + + self.assertEqual(volume_name, 'stack') + self.assertEqual(folder_name, 'share') + + def test_delete_snapshot(self): + self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} + self._create_volume_db_entry() + + self.nms_mock.server.get_prop('volroot').AndReturn('/volumes') + self.nms_mock.snapshot.destroy('stack/share/volume-1@snapshot1', '') + self.mox.ReplayAll() + self.drv.delete_snapshot({'volume_id': '1', 'name': 'snapshot1'}) + self.mox.ResetAll() + + # Check that exception not raised if snapshot does not exist on + # NexentaStor appliance. + self.nms_mock.server.get_prop('volroot').AndReturn('/volumes') + mock = self.nms_mock.snapshot.destroy('stack/share/volume-1@snapshot1', + '') + mock.AndRaise(nexenta.NexentaException("Snapshot does not exist")) + self.mox.ReplayAll() + self.drv.delete_snapshot({'volume_id': '1', 'name': 'snapshot1'}) + self.mox.ResetAll() + + def test_delete_volume(self): + self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} + self._create_volume_db_entry() + + self.drv._ensure_share_mounted = lambda *_, **__: 0 + self.drv._execute = lambda *_, **__: 0 + + self.nms_mock.server.get_prop('volroot').AndReturn('/volumes') + self.nms_mock.folder.get_child_props('stack/share/volume-1', + 'origin').AndReturn(None) + self.nms_mock.folder.destroy('stack/share/volume-1', '-r') + self.mox.ReplayAll() + self.drv.delete_volume({ + 'id': '1', + 'name': 'volume-1', + 'provider_location': self.TEST_EXPORT1 + }) + self.mox.ResetAll() + + # Check that exception not raised if folder does not exist on + # NexentaStor appliance. + self.nms_mock.server.get_prop('volroot').AndReturn('/volumes') + self.nms_mock.folder.get_child_props('stack/share/volume-1', + 'origin').AndReturn(None) + mock = self.nms_mock.folder.destroy('stack/share/volume-1', '-r') + mock.AndRaise(nexenta.NexentaException("Folder does not exist")) + self.mox.ReplayAll() + self.drv.delete_volume({ + 'id': '1', + 'name': 'volume-1', + 'provider_location': self.TEST_EXPORT1 + }) + self.mox.ResetAll() + + +class TestNexentaUtils(test.TestCase): + + def test_str2size(self): + values_to_test = ( + # Test empty value + (None, 0), + ('', 0), + ('0', 0), + ('12', 12), + # Test int and long values + (10, 10), + (long(10), 10), + # Test bytes string + ('1b', 1), + ('1B', 1), + ('1023b', 1023), + ('0B', 0), + # Test other units + ('1M', units.MiB), + ('1.0M', units.MiB), + ) + + for value, result in values_to_test: + self.assertEqual(utils.str2size(value), result) + + # Invalid format value + self.assertRaises(ValueError, utils.str2size, 'A') + + def test_str2gib_size(self): + self.assertEqual(utils.str2gib_size('1024M'), 1) + self.assertEqual(utils.str2gib_size('300M'), + 300 * units.MiB // units.GiB) + self.assertEqual(utils.str2gib_size('1.2T'), + 1.2 * units.TiB // units.GiB) + self.assertRaises(ValueError, utils.str2gib_size, 'A') + + def test_parse_nms_url(self): + urls = ( + ('auto://192.168.1.1/', (True, 'http', 'admin', 'nexenta', + '192.168.1.1', '2000', '/rest/nms/')), + ('http://192.168.1.1/', (False, 'http', 'admin', 'nexenta', + '192.168.1.1', '2000', '/rest/nms/')), + ('http://192.168.1.1:8080', (False, 'http', 'admin', 'nexenta', + '192.168.1.1', '8080', '/rest/nms/')), + ('https://root:password@192.168.1.1:8080', + (False, 'https', 'root', 'password', '192.168.1.1', '8080', + '/rest/nms/')), + ) + for url, result in urls: + self.assertEqual(utils.parse_nms_url(url), result) diff --git a/cinder/tests/test_nfs.py b/cinder/tests/test_nfs.py index 402d84b06a..9e73d41e19 100644 --- a/cinder/tests/test_nfs.py +++ b/cinder/tests/test_nfs.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 NetApp, Inc. # All Rights Reserved. @@ -14,23 +13,24 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Unit tests for the NFS driver module""" +"""Unit tests for the NFS driver module.""" -import os import errno -import __builtin__ +import os import mox as mox_lib -from mox import IsA from mox import IgnoreArg +from mox import IsA from mox import stubout +from oslo.config import cfg from cinder import context from cinder import exception +from cinder.image import image_utils from cinder import test -from cinder.exception import ProcessExecutionError - -from cinder.volume import nfs +from cinder import units +from cinder.volume import configuration as conf +from cinder.volume.drivers import nfs class DumbVolume(object): @@ -43,304 +43,271 @@ def __getitem__(self, item): return self.fields[item] -class NfsDriverTestCase(test.TestCase): - """Test case for NFS driver""" - - TEST_NFS_EXPORT1 = 'nfs-host1:/export' - TEST_NFS_EXPORT2 = 'nfs-host2:/export' - TEST_SIZE_IN_GB = 1 - TEST_MNT_POINT = '/mnt/nfs' - TEST_MNT_POINT_BASE = '/mnt/test' - TEST_LOCAL_PATH = '/mnt/nfs/volume-123' +class RemoteFsDriverTestCase(test.TestCase): TEST_FILE_NAME = 'test.txt' - TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' - ONE_GB_IN_BYTES = 1024 * 1024 * 1024 def setUp(self): - self._driver = nfs.NfsDriver() + super(RemoteFsDriverTestCase, self).setUp() + self._driver = nfs.RemoteFsDriver() self._mox = mox_lib.Mox() - self.stubs = stubout.StubOutForTesting() + self.addCleanup(self._mox.UnsetStubs) - def tearDown(self): - self._mox.UnsetStubs() - self.stubs.UnsetAll() - - def stub_out_not_replaying(self, obj, attr_name): - attr_to_replace = getattr(obj, attr_name) - stub = mox_lib.MockObject(attr_to_replace) - self.stubs.Set(obj, attr_name, stub) - - def test_path_exists_should_return_true(self): - """_path_exists should return True if stat returns 0""" - mox = self._mox - drv = self._driver + def test_create_sparsed_file(self): + (mox, drv) = self._mox, self._driver mox.StubOutWithMock(drv, '_execute') - drv._execute('stat', self.TEST_FILE_NAME, run_as_root=True) + drv._execute('truncate', '-s', '1G', '/path', run_as_root=True).\ + AndReturn("") mox.ReplayAll() - self.assertTrue(drv._path_exists(self.TEST_FILE_NAME)) + drv._create_sparsed_file('/path', 1) mox.VerifyAll() - def test_path_exists_should_return_false(self): - """_path_exists should return True if stat doesn't return 0""" - mox = self._mox - drv = self._driver + def test_create_regular_file(self): + (mox, drv) = self._mox, self._driver mox.StubOutWithMock(drv, '_execute') - drv._execute('stat', self.TEST_FILE_NAME, run_as_root=True).\ - AndRaise(ProcessExecutionError( - stderr="stat: cannot stat `test.txt': No such file or directory")) + drv._execute('dd', 'if=/dev/zero', 'of=/path', 'bs=1M', 'count=1024', + run_as_root=True) mox.ReplayAll() - self.assertFalse(drv._path_exists(self.TEST_FILE_NAME)) + drv._create_regular_file('/path', 1) mox.VerifyAll() - def test_local_path(self): - """local_path common use case""" - nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE - drv = self._driver - - volume = DumbVolume() - volume['provider_location'] = self.TEST_NFS_EXPORT1 - volume['name'] = 'volume-123' - - self.assertEqual( - '/mnt/test/2f4f60214cf43c595666dd815f0360a4/volume-123', - drv.local_path(volume)) - - def test_mount_nfs_should_mount_correctly(self): - """_mount_nfs common case usage""" - mox = self._mox - drv = self._driver + def test_create_qcow2_file(self): + (mox, drv) = self._mox, self._driver - mox.StubOutWithMock(drv, '_path_exists') - drv._path_exists(self.TEST_MNT_POINT).AndReturn(True) + file_size = 1 mox.StubOutWithMock(drv, '_execute') - drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1, - self.TEST_MNT_POINT, run_as_root=True) + drv._execute('qemu-img', 'create', '-f', 'qcow2', + '-o', 'preallocation=metadata', '/path', + '%s' % str(file_size * units.GiB), run_as_root=True) mox.ReplayAll() - drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT) + drv._create_qcow2_file('/path', file_size) mox.VerifyAll() - def test_mount_nfs_should_suppress_already_mounted_error(self): - """_mount_nfs should suppress already mounted error if ensure=True - """ - mox = self._mox - drv = self._driver - - mox.StubOutWithMock(drv, '_path_exists') - drv._path_exists(self.TEST_MNT_POINT).AndReturn(True) + def test_set_rw_permissions_for_all(self): + (mox, drv) = self._mox, self._driver mox.StubOutWithMock(drv, '_execute') - drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1, - self.TEST_MNT_POINT, run_as_root=True).\ - AndRaise(ProcessExecutionError( - stderr='is busy or already mounted')) + drv._execute('chmod', 'ugo+rw', '/path', run_as_root=True) mox.ReplayAll() - drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, ensure=True) + drv._set_rw_permissions_for_all('/path') mox.VerifyAll() - def test_mount_nfs_should_reraise_already_mounted_error(self): - """_mount_nfs should not suppress already mounted error if ensure=False - """ - mox = self._mox - drv = self._driver - mox.StubOutWithMock(drv, '_path_exists') - drv._path_exists(self.TEST_MNT_POINT).AndReturn(True) +class NfsDriverTestCase(test.TestCase): + """Test case for NFS driver.""" - mox.StubOutWithMock(drv, '_execute') - drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1, - self.TEST_MNT_POINT, run_as_root=True).\ - AndRaise(ProcessExecutionError(stderr='is busy or already mounted')) + TEST_NFS_EXPORT1 = 'nfs-host1:/export' + TEST_NFS_EXPORT2 = 'nfs-host2:/export' + TEST_NFS_EXPORT2_OPTIONS = '-o intr' + TEST_SIZE_IN_GB = 1 + TEST_MNT_POINT = '/mnt/nfs' + TEST_MNT_POINT_BASE = '/mnt/test' + TEST_LOCAL_PATH = '/mnt/nfs/volume-123' + TEST_FILE_NAME = 'test.txt' + TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' + TEST_NFS_EXPORT_SPACES = 'nfs-host3:/export this' + TEST_MNT_POINT_SPACES = '/ 0 0 0 /foo' - mox.ReplayAll() + def setUp(self): + super(NfsDriverTestCase, self).setUp() + self._mox = mox_lib.Mox() + self.stubs = stubout.StubOutForTesting() + self.configuration = mox_lib.MockObject(conf.Configuration) + self.configuration.append_config_values(mox_lib.IgnoreArg()) + self.configuration.nfs_shares_config = None + self.configuration.nfs_sparsed_volumes = True + self.configuration.nfs_used_ratio = 0.95 + self.configuration.nfs_oversub_ratio = 1.0 + self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE + self.configuration.nfs_mount_options = None + self.configuration.volume_dd_blocksize = '1M' + self._driver = nfs.NfsDriver(configuration=self.configuration) + self._driver.shares = {} + self.addCleanup(self.stubs.UnsetAll) + self.addCleanup(self._mox.UnsetStubs) - self.assertRaises(ProcessExecutionError, drv._mount_nfs, - self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, - ensure=False) + def stub_out_not_replaying(self, obj, attr_name): + attr_to_replace = getattr(obj, attr_name) + stub = mox_lib.MockObject(attr_to_replace) + self.stubs.Set(obj, attr_name, stub) - mox.VerifyAll() + def test_local_path(self): + """local_path common use case.""" + self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE + drv = self._driver + + volume = DumbVolume() + volume['provider_location'] = self.TEST_NFS_EXPORT1 + volume['name'] = 'volume-123' + + self.assertEqual( + '/mnt/test/2f4f60214cf43c595666dd815f0360a4/volume-123', + drv.local_path(volume)) - def test_mount_nfs_should_create_mountpoint_if_not_yet(self): - """_mount_nfs should create mountpoint if it doesn't exist""" + def test_copy_image_to_volume(self): + """resize_image common case usage.""" mox = self._mox drv = self._driver - mox.StubOutWithMock(drv, '_path_exists') - drv._path_exists(self.TEST_MNT_POINT).AndReturn(False) + TEST_IMG_SOURCE = 'foo.img' - mox.StubOutWithMock(drv, '_execute') - drv._execute('mkdir', '-p', self.TEST_MNT_POINT) - drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg()) + volume = {'size': self.TEST_SIZE_IN_GB, 'name': TEST_IMG_SOURCE} - mox.ReplayAll() + def fake_local_path(volume): + return volume['name'] - drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT) + self.stubs.Set(drv, 'local_path', fake_local_path) - mox.VerifyAll() + mox.StubOutWithMock(image_utils, 'fetch_to_raw') + image_utils.fetch_to_raw(None, None, None, TEST_IMG_SOURCE, + mox_lib.IgnoreArg(), + size=self.TEST_SIZE_IN_GB) - def test_mount_nfs_should_not_create_mountpoint_if_already(self): - """_mount_nfs should not create mountpoint if it already exists""" - mox = self._mox - drv = self._driver + mox.StubOutWithMock(image_utils, 'resize_image') + image_utils.resize_image(TEST_IMG_SOURCE, self.TEST_SIZE_IN_GB) - mox.StubOutWithMock(drv, '_path_exists') - drv._path_exists(self.TEST_MNT_POINT).AndReturn(True) - - mox.StubOutWithMock(drv, '_execute') - drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg()) + mox.StubOutWithMock(image_utils, 'qemu_img_info') + data = mox_lib.MockAnything() + data.virtual_size = 1 * units.GiB + image_utils.qemu_img_info(TEST_IMG_SOURCE).AndReturn(data) mox.ReplayAll() - drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT) + drv.copy_image_to_volume(None, volume, None, None) mox.VerifyAll() - def test_get_hash_str(self): - """_get_hash_str should calculation correct value""" - drv = self._driver - - self.assertEqual('2f4f60214cf43c595666dd815f0360a4', - drv._get_hash_str(self.TEST_NFS_EXPORT1)) - def test_get_mount_point_for_share(self): - """_get_mount_point_for_share should calculate correct value""" + """_get_mount_point_for_share should calculate correct value.""" drv = self._driver - nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE + self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4', drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1)) - def test_get_available_capacity_with_df(self): - """_get_available_capacity should calculate correct value""" + def test_get_capacity_info(self): + """_get_capacity_info should calculate correct value.""" mox = self._mox drv = self._driver - df_avail = 1490560 - df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n' - df_data = 'nfs-host:/export 2620544 996864 %d 41%% /mnt' % df_avail - df_output = df_head + df_data + stat_total_size = 2620544 + stat_avail = 2129984 + stat_output = '1 %d %d' % (stat_total_size, stat_avail) - setattr(nfs.FLAGS, 'nfs_disk_util', 'df') + du_used = 490560 + du_output = '%d /mnt' % du_used mox.StubOutWithMock(drv, '_get_mount_point_for_share') drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\ AndReturn(self.TEST_MNT_POINT) mox.StubOutWithMock(drv, '_execute') - drv._execute('df', '-P', '-B', '1', self.TEST_MNT_POINT, - run_as_root=True).AndReturn((df_output, None)) + drv._execute('stat', '-f', '-c', '%S %b %a', + self.TEST_MNT_POINT, + run_as_root=True).AndReturn((stat_output, None)) + + drv._execute('du', '-sb', '--apparent-size', + '--exclude', '*snapshot*', + self.TEST_MNT_POINT, + run_as_root=True).AndReturn((du_output, None)) mox.ReplayAll() - self.assertEquals(df_avail, - drv._get_available_capacity(self.TEST_NFS_EXPORT1)) + self.assertEqual((stat_total_size, stat_avail, du_used), + drv._get_capacity_info(self.TEST_NFS_EXPORT1)) mox.VerifyAll() - delattr(nfs.FLAGS, 'nfs_disk_util') - - def test_get_available_capacity_with_du(self): - """_get_available_capacity should calculate correct value""" + def test_get_capacity_info_for_share_and_mount_point_with_spaces(self): + """_get_capacity_info should calculate correct value.""" mox = self._mox drv = self._driver - setattr(nfs.FLAGS, 'nfs_disk_util', 'du') - - df_total_size = 2620544 - df_used_size = 996864 - df_avail_size = 1490560 - df_title = 'Filesystem 1-blocks Used Available Use% Mounted on\n' - df_mnt_data = 'nfs-host:/export %d %d %d 41%% /mnt' % (df_total_size, - df_used_size, - df_avail_size) - df_output = df_title + df_mnt_data + stat_total_size = 2620544 + stat_avail = 2129984 + stat_output = '1 %d %d' % (stat_total_size, stat_avail) du_used = 490560 du_output = '%d /mnt' % du_used mox.StubOutWithMock(drv, '_get_mount_point_for_share') - drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\ - AndReturn(self.TEST_MNT_POINT) + drv._get_mount_point_for_share(self.TEST_NFS_EXPORT_SPACES).\ + AndReturn(self.TEST_MNT_POINT_SPACES) mox.StubOutWithMock(drv, '_execute') - drv._execute('df', '-P', '-B', '1', self.TEST_MNT_POINT, - run_as_root=True).\ - AndReturn((df_output, None)) + drv._execute('stat', '-f', '-c', '%S %b %a', + self.TEST_MNT_POINT_SPACES, + run_as_root=True).AndReturn((stat_output, None)) + drv._execute('du', '-sb', '--apparent-size', '--exclude', '*snapshot*', - self.TEST_MNT_POINT, + self.TEST_MNT_POINT_SPACES, run_as_root=True).AndReturn((du_output, None)) mox.ReplayAll() - self.assertEquals(df_total_size - du_used, - drv._get_available_capacity(self.TEST_NFS_EXPORT1)) + self.assertEqual((stat_total_size, stat_avail, du_used), + drv._get_capacity_info(self.TEST_NFS_EXPORT_SPACES)) mox.VerifyAll() - delattr(nfs.FLAGS, 'nfs_disk_util') - def test_load_shares_config(self): mox = self._mox drv = self._driver - nfs.FLAGS.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE + drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE - mox.StubOutWithMock(__builtin__, 'open') + mox.StubOutWithMock(drv, '_read_config_file') config_data = [] config_data.append(self.TEST_NFS_EXPORT1) config_data.append('#' + self.TEST_NFS_EXPORT2) config_data.append('') - __builtin__.open(self.TEST_SHARES_CONFIG_FILE).AndReturn(config_data) + config_data.append(self.TEST_NFS_EXPORT2 + ' ' + + self.TEST_NFS_EXPORT2_OPTIONS) + config_data.append('broken:share_format') + drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\ + AndReturn(config_data) mox.ReplayAll() - shares = drv._load_shares_config() - - self.assertEqual([self.TEST_NFS_EXPORT1], shares) - - mox.VerifyAll() - - def test_ensure_share_mounted(self): - """_ensure_share_mounted simple use case""" - mox = self._mox - drv = self._driver - - mox.StubOutWithMock(drv, '_get_mount_point_for_share') - drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\ - AndReturn(self.TEST_MNT_POINT) - - mox.StubOutWithMock(drv, '_mount_nfs') - drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, ensure=True) + drv._load_shares_config(drv.configuration.nfs_shares_config) - mox.ReplayAll() + self.assertIn(self.TEST_NFS_EXPORT1, drv.shares) + self.assertIn(self.TEST_NFS_EXPORT2, drv.shares) + self.assertEqual(len(drv.shares), 2) - drv._ensure_share_mounted(self.TEST_NFS_EXPORT1) + self.assertEqual(drv.shares[self.TEST_NFS_EXPORT2], + self.TEST_NFS_EXPORT2_OPTIONS) mox.VerifyAll() def test_ensure_shares_mounted_should_save_mounting_successfully(self): - """_ensure_shares_mounted should save share if mounted with success""" + """_ensure_shares_mounted should save share if mounted with success.""" mox = self._mox drv = self._driver - mox.StubOutWithMock(drv, '_load_shares_config') - drv._load_shares_config().AndReturn([self.TEST_NFS_EXPORT1]) + mox.StubOutWithMock(drv, '_read_config_file') + config_data = [] + config_data.append(self.TEST_NFS_EXPORT1) + drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\ + AndReturn(config_data) + mox.StubOutWithMock(drv, '_ensure_share_mounted') + drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE drv._ensure_share_mounted(self.TEST_NFS_EXPORT1) mox.ReplayAll() @@ -353,13 +320,18 @@ def test_ensure_shares_mounted_should_save_mounting_successfully(self): mox.VerifyAll() def test_ensure_shares_mounted_should_not_save_mounting_with_error(self): - """_ensure_shares_mounted should not save share if failed to mount""" + """_ensure_shares_mounted should not save share if failed to mount.""" mox = self._mox drv = self._driver - mox.StubOutWithMock(drv, '_load_shares_config') - drv._load_shares_config().AndReturn([self.TEST_NFS_EXPORT1]) + mox.StubOutWithMock(drv, '_read_config_file') + config_data = [] + config_data.append(self.TEST_NFS_EXPORT1) + drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\ + AndReturn(config_data) + mox.StubOutWithMock(drv, '_ensure_share_mounted') + drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE drv._ensure_share_mounted(self.TEST_NFS_EXPORT1).AndRaise(Exception()) mox.ReplayAll() @@ -371,25 +343,47 @@ def test_ensure_shares_mounted_should_not_save_mounting_with_error(self): mox.VerifyAll() def test_setup_should_throw_error_if_shares_config_not_configured(self): - """do_setup should throw error if shares config is not configured """ + """do_setup should throw error if shares config is not configured.""" drv = self._driver - - nfs.FLAGS.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE + self.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE self.assertRaises(exception.NfsException, drv.do_setup, IsA(context.RequestContext)) + def test_setup_should_throw_error_if_oversub_ratio_less_than_zero(self): + """do_setup should throw error if nfs_oversub_ratio is less than 0.""" + drv = self._driver + self.configuration.nfs_oversub_ratio = -1 + self.assertRaises(exception.NfsException, + drv.do_setup, + IsA(context.RequestContext)) + + def test_setup_should_throw_error_if_used_ratio_less_than_zero(self): + """do_setup should throw error if nfs_used_ratio is less than 0.""" + drv = self._driver + self.configuration.nfs_used_ratio = -1 + self.assertRaises(exception.NfsException, + drv.do_setup, + IsA(context.RequestContext)) + + def test_setup_should_throw_error_if_used_ratio_greater_than_one(self): + """do_setup should throw error if nfs_used_ratio is greater than 1.""" + drv = self._driver + self.configuration.nfs_used_ratio = 2 + self.assertRaises(exception.NfsException, + drv.do_setup, + IsA(context.RequestContext)) + def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self): - """do_setup should throw error if nfs client is not installed """ + """do_setup should throw error if nfs client is not installed.""" mox = self._mox drv = self._driver - - nfs.FLAGS.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE + self.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE mox.StubOutWithMock(os.path, 'exists') os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True) mox.StubOutWithMock(drv, '_execute') - drv._execute('mount.nfs', check_exit_code=False).\ + drv._execute('mount.nfs', check_exit_code=False, run_as_root=True).\ AndRaise(OSError(errno.ENOENT, 'No such file or directory')) mox.ReplayAll() @@ -400,26 +394,34 @@ def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self): mox.VerifyAll() def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self): - """_find_share should throw error if there is no mounted shares""" + """_find_share should throw error if there is no mounted shares.""" drv = self._driver drv._mounted_shares = [] - self.assertRaises(exception.NotFound, drv._find_share, + self.assertRaises(exception.NfsNoSharesMounted, drv._find_share, self.TEST_SIZE_IN_GB) def test_find_share(self): - """_find_share simple use case""" + """_find_share simple use case.""" mox = self._mox drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] - mox.StubOutWithMock(drv, '_get_available_capacity') - drv._get_available_capacity(self.TEST_NFS_EXPORT1).\ - AndReturn(2 * self.ONE_GB_IN_BYTES) - drv._get_available_capacity(self.TEST_NFS_EXPORT2).\ - AndReturn(3 * self.ONE_GB_IN_BYTES) + mox.StubOutWithMock(drv, '_get_capacity_info') + drv._get_capacity_info(self.TEST_NFS_EXPORT1).\ + AndReturn((5 * units.GiB, 2 * units.GiB, + 2 * units.GiB)) + drv._get_capacity_info(self.TEST_NFS_EXPORT1).\ + AndReturn((5 * units.GiB, 2 * units.GiB, + 2 * units.GiB)) + drv._get_capacity_info(self.TEST_NFS_EXPORT2).\ + AndReturn((10 * units.GiB, 3 * units.GiB, + 1 * units.GiB)) + drv._get_capacity_info(self.TEST_NFS_EXPORT2).\ + AndReturn((10 * units.GiB, 3 * units.GiB, + 1 * units.GiB)) mox.ReplayAll() @@ -429,17 +431,18 @@ def test_find_share(self): mox.VerifyAll() def test_find_share_should_throw_error_if_there_is_no_enough_place(self): - """_find_share should throw error if there is no share to host vol""" + """_find_share should throw error if there is no share to host vol.""" mox = self._mox drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] - mox.StubOutWithMock(drv, '_get_available_capacity') - drv._get_available_capacity(self.TEST_NFS_EXPORT1).\ - AndReturn(0) - drv._get_available_capacity(self.TEST_NFS_EXPORT2).\ - AndReturn(0) + mox.StubOutWithMock(drv, '_get_capacity_info') + drv._get_capacity_info(self.TEST_NFS_EXPORT1).\ + AndReturn((5 * units.GiB, 0, 5 * units.GiB)) + drv._get_capacity_info(self.TEST_NFS_EXPORT2).\ + AndReturn((10 * units.GiB, 0, + 10 * units.GiB)) mox.ReplayAll() @@ -461,7 +464,7 @@ def test_create_sparsed_volume(self): drv = self._driver volume = self._simple_volume() - setattr(nfs.FLAGS, 'nfs_sparsed_volumes', True) + setattr(cfg.CONF, 'nfs_sparsed_volumes', True) mox.StubOutWithMock(drv, '_create_sparsed_file') mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') @@ -475,14 +478,15 @@ def test_create_sparsed_volume(self): mox.VerifyAll() - delattr(nfs.FLAGS, 'nfs_sparsed_volumes') + delattr(cfg.CONF, 'nfs_sparsed_volumes') def test_create_nonsparsed_volume(self): mox = self._mox drv = self._driver + self.configuration.nfs_sparsed_volumes = False volume = self._simple_volume() - setattr(nfs.FLAGS, 'nfs_sparsed_volumes', False) + setattr(cfg.CONF, 'nfs_sparsed_volumes', False) mox.StubOutWithMock(drv, '_create_regular_file') mox.StubOutWithMock(drv, '_set_rw_permissions_for_all') @@ -496,10 +500,10 @@ def test_create_nonsparsed_volume(self): mox.VerifyAll() - delattr(nfs.FLAGS, 'nfs_sparsed_volumes') + delattr(cfg.CONF, 'nfs_sparsed_volumes') def test_create_volume_should_ensure_nfs_mounted(self): - """create_volume should ensure shares provided in config are mounted""" + """create_volume ensures shares provided in config are mounted.""" mox = self._mox drv = self._driver @@ -519,7 +523,7 @@ def test_create_volume_should_ensure_nfs_mounted(self): mox.VerifyAll() def test_create_volume_should_return_provider_location(self): - """create_volume should return provider_location with found share """ + """create_volume should return provider_location with found share.""" mox = self._mox drv = self._driver @@ -540,7 +544,7 @@ def test_create_volume_should_return_provider_location(self): mox.VerifyAll() def test_delete_volume(self): - """delete_volume simple test case""" + """delete_volume simple test case.""" mox = self._mox drv = self._driver @@ -553,9 +557,6 @@ def test_delete_volume(self): mox.StubOutWithMock(drv, 'local_path') drv.local_path(volume).AndReturn(self.TEST_LOCAL_PATH) - mox.StubOutWithMock(drv, '_path_exists') - drv._path_exists(self.TEST_LOCAL_PATH).AndReturn(True) - mox.StubOutWithMock(drv, '_execute') drv._execute('rm', '-f', self.TEST_LOCAL_PATH, run_as_root=True) @@ -566,7 +567,7 @@ def test_delete_volume(self): mox.VerifyAll() def test_delete_should_ensure_share_mounted(self): - """delete_volume should ensure that corresponding share is mounted""" + """delete_volume should ensure that corresponding share is mounted.""" mox = self._mox drv = self._driver @@ -586,7 +587,7 @@ def test_delete_should_ensure_share_mounted(self): mox.VerifyAll() def test_delete_should_not_delete_if_provider_location_not_provided(self): - """delete_volume shouldn't try to delete if provider_location missed""" + """delete_volume shouldn't delete if provider_location missed.""" mox = self._mox drv = self._driver @@ -604,27 +605,29 @@ def test_delete_should_not_delete_if_provider_location_not_provided(self): mox.VerifyAll() - def test_delete_should_not_delete_if_there_is_no_file(self): - """delete_volume should not try to delete if file missed""" + def test_get_volume_stats(self): + """get_volume_stats must fill the correct values.""" mox = self._mox drv = self._driver - self.stub_out_not_replaying(drv, '_ensure_share_mounted') - - volume = DumbVolume() - volume['name'] = 'volume-123' - volume['provider_location'] = self.TEST_NFS_EXPORT1 + drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] - mox.StubOutWithMock(drv, 'local_path') - drv.local_path(volume).AndReturn(self.TEST_LOCAL_PATH) + mox.StubOutWithMock(drv, '_ensure_shares_mounted') + mox.StubOutWithMock(drv, '_get_capacity_info') - mox.StubOutWithMock(drv, '_path_exists') - drv._path_exists(self.TEST_LOCAL_PATH).AndReturn(False) + drv._ensure_shares_mounted() - mox.StubOutWithMock(drv, '_execute') + drv._get_capacity_info(self.TEST_NFS_EXPORT1).\ + AndReturn((10 * units.GiB, 2 * units.GiB, + 2 * units.GiB)) + drv._get_capacity_info(self.TEST_NFS_EXPORT2).\ + AndReturn((20 * units.GiB, 3 * units.GiB, + 3 * units.GiB)) mox.ReplayAll() - drv.delete_volume(volume) + drv.get_volume_stats() + self.assertEqual(drv._stats['total_capacity_gb'], 30.0) + self.assertEqual(drv._stats['free_capacity_gb'], 5.0) mox.VerifyAll() diff --git a/cinder/tests/test_policy.py b/cinder/tests/test_policy.py index f99c227bbd..e900f1306a 100644 --- a/cinder/tests/test_policy.py +++ b/cinder/tests/test_policy.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. @@ -15,22 +14,24 @@ # License for the specific language governing permissions and limitations # under the License. -"""Test of Policy Engine For Cinder""" +"""Test of Policy Engine For Cinder.""" import os.path import StringIO import urllib2 +from oslo.config import cfg + from cinder import context from cinder import exception -from cinder import flags import cinder.openstack.common.policy from cinder.openstack.common import policy as common_policy from cinder import policy from cinder import test from cinder import utils -FLAGS = flags.FLAGS + +CONF = cfg.CONF class PolicyFileTestCase(test.TestCase): @@ -82,7 +83,7 @@ def setUp(self): "example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]], } # NOTE(vish): then overload underlying brain - common_policy.set_brain(common_policy.HttpBrain(rules)) + common_policy.set_brain(common_policy.Brain(rules)) self.context = context.RequestContext('fake', 'fake', roles=['member']) self.target = {} @@ -112,7 +113,7 @@ def fakeurlopen(url, post_data): action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) - self.assertEqual(result, None) + self.assertIsNone(result) def test_enforce_http_false(self): @@ -147,8 +148,8 @@ def test_ignore_case_role_check(self): # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored admin_context = context.RequestContext('admin', - 'fake', - roles=['AdMiN']) + 'fake', + roles=['AdMiN']) policy.enforce(admin_context, lowercase_action, self.target) policy.enforce(admin_context, uppercase_action, self.target) @@ -170,8 +171,8 @@ def setUp(self): self.context = context.RequestContext('fake', 'fake') def _set_brain(self, default_rule): - brain = cinder.openstack.common.policy.HttpBrain(self.rules, - default_rule) + brain = cinder.openstack.common.policy.Brain(self.rules, + default_rule) cinder.openstack.common.policy.set_brain(brain) def tearDown(self): @@ -180,7 +181,7 @@ def tearDown(self): def test_policy_called(self): self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, - self.context, "example:exist", {}) + self.context, "example:exist", {}) def test_not_found_policy_calls_default(self): policy.enforce(self.context, "example:noexist", {}) @@ -188,7 +189,7 @@ def test_not_found_policy_calls_default(self): def test_default_not_found(self): self._set_brain("default_noexist") self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, - self.context, "example:noexist", {}) + self.context, "example:noexist", {}) class ContextIsAdminPolicyTestCase(test.TestCase): @@ -202,19 +203,19 @@ def test_default_admin_role_is_admin(self): ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) self.assertFalse(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['admin']) - self.assert_(ctx.is_admin) + self.assertTrue(ctx.is_admin) def test_custom_admin_role_is_admin(self): # define explict rules for context_is_admin rules = { 'context_is_admin': [["role:administrator"], ["role:johnny-admin"]] } - brain = common_policy.Brain(rules, FLAGS.policy_default_rule) + brain = common_policy.Brain(rules, CONF.policy_default_rule) common_policy.set_brain(brain) ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) - self.assert_(ctx.is_admin) + self.assertTrue(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['administrator']) - self.assert_(ctx.is_admin) + self.assertTrue(ctx.is_admin) # default rule no longer applies ctx = context.RequestContext('fake', 'fake', roles=['admin']) self.assertFalse(ctx.is_admin) @@ -224,9 +225,9 @@ def test_context_is_admin_undefined(self): "admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]], "default": [["rule:admin_or_owner"]], } - brain = common_policy.Brain(rules, FLAGS.policy_default_rule) + brain = common_policy.Brain(rules, CONF.policy_default_rule) common_policy.set_brain(brain) ctx = context.RequestContext('fake', 'fake') self.assertFalse(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['admin']) - self.assert_(ctx.is_admin) + self.assertTrue(ctx.is_admin) diff --git a/cinder/tests/test_qos_specs.py b/cinder/tests/test_qos_specs.py new file mode 100644 index 0000000000..b147e235d3 --- /dev/null +++ b/cinder/tests/test_qos_specs.py @@ -0,0 +1,385 @@ + +# Copyright (c) 2013 eBay Inc. +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for qos specs internal API +""" + +import time + +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common.db import exception as db_exc +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import qos_specs +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +def fake_db_qos_specs_create(context, values): + if values['name'] == 'DupQoSName': + raise exception.QoSSpecsExists(specs_id=values['name']) + elif values['name'] == 'FailQoSName': + raise db_exc.DBError() + + pass + + +class QoSSpecsTestCase(test.TestCase): + """Test cases for qos specs code.""" + def setUp(self): + super(QoSSpecsTestCase, self).setUp() + self.ctxt = context.get_admin_context() + + def _create_qos_specs(self, name, values=None): + """Create a transfer object.""" + if values: + specs = dict(name=name, qos_specs=values) + else: + specs = {'name': name, + 'qos_specs': { + 'consumer': 'back-end', + 'key1': 'value1', + 'key2': 'value2'}} + return db.qos_specs_create(self.ctxt, specs)['id'] + + def test_create(self): + input = {'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3'} + ref = qos_specs.create(self.ctxt, 'FakeName', input) + specs = qos_specs.get_qos_specs(self.ctxt, ref['id']) + expected = (dict(consumer='back-end')) + expected.update(dict(id=ref['id'])) + expected.update(dict(name='FakeName')) + del input['consumer'] + expected.update(dict(specs=input)) + self.assertDictMatch(specs, expected) + + self.stubs.Set(db, 'qos_specs_create', + fake_db_qos_specs_create) + + # qos specs must have unique name + self.assertRaises(exception.QoSSpecsExists, + qos_specs.create, self.ctxt, 'DupQoSName', input) + + input.update({'consumer': 'FakeConsumer'}) + # consumer must be one of: front-end, back-end, both + self.assertRaises(exception.InvalidQoSSpecs, + qos_specs.create, self.ctxt, 'QoSName', input) + + del input['consumer'] + # able to catch DBError + self.assertRaises(exception.QoSSpecsCreateFailed, + qos_specs.create, self.ctxt, 'FailQoSName', input) + + def test_update(self): + def fake_db_update(context, specs_id, values): + raise db_exc.DBError() + + input = {'key1': 'value1', + 'consumer': 'WrongPlace'} + # consumer must be one of: front-end, back-end, both + self.assertRaises(exception.InvalidQoSSpecs, + qos_specs.update, self.ctxt, 'fake_id', input) + + input['consumer'] = 'front-end' + # qos specs must exists + self.assertRaises(exception.QoSSpecsNotFound, + qos_specs.update, self.ctxt, 'fake_id', input) + + specs_id = self._create_qos_specs('Name', input) + qos_specs.update(self.ctxt, specs_id, + {'key1': 'newvalue1', + 'key2': 'value2'}) + specs = qos_specs.get_qos_specs(self.ctxt, specs_id) + self.assertEqual(specs['specs']['key1'], 'newvalue1') + self.assertEqual(specs['specs']['key2'], 'value2') + + self.stubs.Set(db, 'qos_specs_update', fake_db_update) + self.assertRaises(exception.QoSSpecsUpdateFailed, + qos_specs.update, self.ctxt, 'fake_id', input) + + def test_delete(self): + def fake_db_associations_get(context, id): + if id == 'InUse': + return True + else: + return False + + def fake_db_delete(context, id): + if id == 'NotFound': + raise exception.QoSSpecsNotFound(specs_id=id) + + def fake_disassociate_all(context, id): + pass + + self.stubs.Set(db, 'qos_specs_associations_get', + fake_db_associations_get) + self.stubs.Set(qos_specs, 'disassociate_all', + fake_disassociate_all) + self.stubs.Set(db, 'qos_specs_delete', fake_db_delete) + self.assertRaises(exception.InvalidQoSSpecs, + qos_specs.delete, self.ctxt, None) + self.assertRaises(exception.QoSSpecsNotFound, + qos_specs.delete, self.ctxt, 'NotFound') + self.assertRaises(exception.QoSSpecsInUse, + qos_specs.delete, self.ctxt, 'InUse') + # able to delete in-use qos specs if force=True + qos_specs.delete(self.ctxt, 'InUse', force=True) + + def test_delete_keys(self): + def fake_db_qos_delete_key(context, id, key): + if key == 'NotFound': + raise exception.QoSSpecsKeyNotFound(specs_id=id, + specs_key=key) + else: + pass + + def fake_qos_specs_get(context, id): + if id == 'NotFound': + raise exception.QoSSpecsNotFound(specs_id=id) + else: + pass + + value = dict(consumer='front-end', + foo='Foo', bar='Bar', zoo='tiger') + specs_id = self._create_qos_specs('QoSName', value) + qos_specs.delete_keys(self.ctxt, specs_id, ['foo', 'bar']) + del value['consumer'] + del value['foo'] + del value['bar'] + expected = {'name': 'QoSName', + 'id': specs_id, + 'consumer': 'front-end', + 'specs': value} + specs = qos_specs.get_qos_specs(self.ctxt, specs_id) + self.assertDictMatch(expected, specs) + + self.stubs.Set(qos_specs, 'get_qos_specs', fake_qos_specs_get) + self.stubs.Set(db, 'qos_specs_item_delete', fake_db_qos_delete_key) + self.assertRaises(exception.InvalidQoSSpecs, + qos_specs.delete_keys, self.ctxt, None, []) + self.assertRaises(exception.QoSSpecsNotFound, + qos_specs.delete_keys, self.ctxt, 'NotFound', []) + self.assertRaises(exception.QoSSpecsKeyNotFound, + qos_specs.delete_keys, self.ctxt, + 'Found', ['NotFound']) + self.assertRaises(exception.QoSSpecsKeyNotFound, + qos_specs.delete_keys, self.ctxt, 'Found', + ['foo', 'bar', 'NotFound']) + + def test_get_associations(self): + def fake_db_associate_get(context, id): + if id == 'Trouble': + raise db_exc.DBError() + return [{'name': 'type-1', 'id': 'id-1'}, + {'name': 'type-2', 'id': 'id-2'}] + + self.stubs.Set(db, 'qos_specs_associations_get', + fake_db_associate_get) + expected1 = {'association_type': 'volume_type', + 'name': 'type-1', + 'id': 'id-1'} + expected2 = {'association_type': 'volume_type', + 'name': 'type-2', + 'id': 'id-2'} + res = qos_specs.get_associations(self.ctxt, 'specs-id') + self.assertIn(expected1, res) + self.assertIn(expected2, res) + + self.assertRaises(exception.CinderException, + qos_specs.get_associations, self.ctxt, + 'Trouble') + + def test_associate_qos_with_type(self): + def fake_qos_specs_get(context, id): + if id == 'NotFound': + raise exception.QoSSpecsNotFound(specs_id=id) + else: + pass + + def fake_db_associate(context, id, type_id): + if id == 'Trouble': + raise db_exc.DBError() + elif type_id == 'NotFound': + raise exception.VolumeTypeNotFound(volume_type_id=type_id) + pass + + def fake_vol_type_qos_get(type_id): + if type_id == 'Invalid': + return {'qos_specs': {'id': 'Invalid'}} + else: + return {'qos_specs': None} + + type_ref = volume_types.create(self.ctxt, 'TypeName') + specs_id = self._create_qos_specs('QoSName') + + qos_specs.associate_qos_with_type(self.ctxt, specs_id, + type_ref['id']) + res = qos_specs.get_associations(self.ctxt, specs_id) + self.assertEqual(len(res), 1) + self.assertEqual('TypeName', res[0]['name']) + self.assertEqual(type_ref['id'], res[0]['id']) + + self.stubs.Set(db, 'qos_specs_associate', + fake_db_associate) + self.stubs.Set(qos_specs, 'get_qos_specs', fake_qos_specs_get) + self.stubs.Set(volume_types, 'get_volume_type_qos_specs', + fake_vol_type_qos_get) + self.assertRaises(exception.VolumeTypeNotFound, + qos_specs.associate_qos_with_type, + self.ctxt, 'specs-id', 'NotFound') + self.assertRaises(exception.QoSSpecsAssociateFailed, + qos_specs.associate_qos_with_type, + self.ctxt, 'Trouble', 'id') + self.assertRaises(exception.QoSSpecsNotFound, + qos_specs.associate_qos_with_type, + self.ctxt, 'NotFound', 'id') + self.assertRaises(exception.InvalidVolumeType, + qos_specs.associate_qos_with_type, + self.ctxt, 'specs-id', 'Invalid') + + def test_disassociate_qos_specs(self): + def fake_qos_specs_get(context, id): + if id == 'NotFound': + raise exception.QoSSpecsNotFound(specs_id=id) + else: + pass + + def fake_db_disassociate(context, id, type_id): + if id == 'Trouble': + raise db_exc.DBError() + elif type_id == 'NotFound': + raise exception.VolumeTypeNotFound(volume_type_id=type_id) + pass + + type_ref = volume_types.create(self.ctxt, 'TypeName') + specs_id = self._create_qos_specs('QoSName') + + qos_specs.associate_qos_with_type(self.ctxt, specs_id, + type_ref['id']) + res = qos_specs.get_associations(self.ctxt, specs_id) + self.assertEqual(len(res), 1) + + qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) + res = qos_specs.get_associations(self.ctxt, specs_id) + self.assertEqual(len(res), 0) + + self.stubs.Set(db, 'qos_specs_disassociate', + fake_db_disassociate) + self.stubs.Set(qos_specs, 'get_qos_specs', + fake_qos_specs_get) + self.assertRaises(exception.VolumeTypeNotFound, + qos_specs.disassociate_qos_specs, + self.ctxt, 'specs-id', 'NotFound') + self.assertRaises(exception.QoSSpecsDisassociateFailed, + qos_specs.disassociate_qos_specs, + self.ctxt, 'Trouble', 'id') + + def test_disassociate_all(self): + def fake_db_disassociate_all(context, id): + if id == 'Trouble': + raise db_exc.DBError() + pass + + def fake_qos_specs_get(context, id): + if id == 'NotFound': + raise exception.QoSSpecsNotFound(specs_id=id) + else: + pass + + type1_ref = volume_types.create(self.ctxt, 'TypeName1') + type2_ref = volume_types.create(self.ctxt, 'TypeName2') + specs_id = self._create_qos_specs('QoSName') + + qos_specs.associate_qos_with_type(self.ctxt, specs_id, + type1_ref['id']) + qos_specs.associate_qos_with_type(self.ctxt, specs_id, + type2_ref['id']) + res = qos_specs.get_associations(self.ctxt, specs_id) + self.assertEqual(len(res), 2) + + qos_specs.disassociate_all(self.ctxt, specs_id) + res = qos_specs.get_associations(self.ctxt, specs_id) + self.assertEqual(len(res), 0) + + self.stubs.Set(db, 'qos_specs_disassociate_all', + fake_db_disassociate_all) + self.stubs.Set(qos_specs, 'get_qos_specs', + fake_qos_specs_get) + self.assertRaises(exception.QoSSpecsDisassociateFailed, + qos_specs.disassociate_all, + self.ctxt, 'Trouble') + + def test_get_all_specs(self): + input = {'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + 'consumer': 'both'} + specs_id1 = self._create_qos_specs('Specs1', input) + input.update({'key4': 'value4'}) + specs_id2 = self._create_qos_specs('Specs2', input) + + expected1 = { + 'id': specs_id1, + 'name': 'Specs1', + 'consumer': 'both', + 'specs': {'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3'}} + expected2 = { + 'id': specs_id2, + 'name': 'Specs2', + 'consumer': 'both', + 'specs': {'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + 'key4': 'value4'}} + res = qos_specs.get_all_specs(self.ctxt) + self.assertEqual(len(res), 2) + self.assertIn(expected1, res) + self.assertIn(expected2, res) + + def test_get_qos_specs(self): + one_time_value = str(int(time.time())) + input = {'key1': one_time_value, + 'key2': 'value2', + 'key3': 'value3', + 'consumer': 'both'} + id = self._create_qos_specs('Specs1', input) + specs = qos_specs.get_qos_specs(self.ctxt, id) + self.assertEqual(specs['specs']['key1'], one_time_value) + + self.assertRaises(exception.InvalidQoSSpecs, + qos_specs.get_qos_specs, self.ctxt, None) + + def test_get_qos_specs_by_name(self): + one_time_value = str(int(time.time())) + input = {'key1': one_time_value, + 'key2': 'value2', + 'key3': 'value3', + 'consumer': 'back-end'} + id = self._create_qos_specs(one_time_value, input) + specs = qos_specs.get_qos_specs_by_name(self.ctxt, + one_time_value) + self.assertEqual(specs['specs']['key1'], one_time_value) + + self.assertRaises(exception.InvalidQoSSpecs, + qos_specs.get_qos_specs_by_name, self.ctxt, None) diff --git a/cinder/tests/test_quota.py b/cinder/tests/test_quota.py index ad165587db..d6bdb3734d 100644 --- a/cinder/tests/test_quota.py +++ b/cinder/tests/test_quota.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -16,14 +15,16 @@ # License for the specific language governing permissions and limitations # under the License. + import datetime +from oslo.config import cfg + from cinder import context from cinder import db from cinder.db.sqlalchemy import api as sqa_api from cinder.db.sqlalchemy import models as sqa_models from cinder import exception -from cinder import flags from cinder.openstack.common import rpc from cinder.openstack.common import timeutils from cinder import quota @@ -32,19 +33,22 @@ from cinder import volume -FLAGS = flags.FLAGS +CONF = cfg.CONF class QuotaIntegrationTestCase(test.TestCase): def setUp(self): super(QuotaIntegrationTestCase, self).setUp() + self.volume_type_name = CONF.default_volume_type + self.volume_type = db.volume_type_create( + context.get_admin_context(), + dict(name=self.volume_type_name)) + self.flags(quota_volumes=2, + quota_snapshots=2, quota_gigabytes=20) - # Apparently needed by the RPC tests... - #self.network = self.start_service('network') - self.user_id = 'admin' self.project_id = 'admin' self.context = context.RequestContext(self.user_id, @@ -57,39 +61,143 @@ def rpc_call_wrapper(context, topic, msg, timeout=None): self.stubs.Set(rpc, 'call', rpc_call_wrapper) + # Destroy the 'default' quota_class in the database to avoid + # conflicts with the test cases here that are setting up their own + # defaults. + db.quota_class_destroy_all_by_name(self.context, 'default') + def tearDown(self): + db.volume_type_destroy(context.get_admin_context(), + self.volume_type['id']) super(QuotaIntegrationTestCase, self).tearDown() cinder.tests.image.fake.FakeImageService_reset() - def _create_volume(self, size=10): - """Create a test volume""" + def _create_volume(self, size=1): + """Create a test volume.""" vol = {} vol['user_id'] = self.user_id vol['project_id'] = self.project_id vol['size'] = size - return db.volume_create(self.context, vol)['id'] + vol['status'] = 'available' + vol['volume_type_id'] = self.volume_type['id'] + return db.volume_create(self.context, vol) + + def _create_snapshot(self, volume): + snapshot = {} + snapshot['user_id'] = self.user_id + snapshot['project_id'] = self.project_id + snapshot['volume_id'] = volume['id'] + snapshot['volume_size'] = volume['size'] + snapshot['status'] = 'available' + return db.snapshot_create(self.context, snapshot) def test_too_many_volumes(self): volume_ids = [] - for i in range(FLAGS.quota_volumes): - volume_id = self._create_volume() - volume_ids.append(volume_id) - self.assertRaises(exception.QuotaError, + for i in range(CONF.quota_volumes): + vol_ref = self._create_volume() + volume_ids.append(vol_ref['id']) + self.assertRaises(exception.VolumeLimitExceeded, volume.API().create, - self.context, 10, '', '', None) + self.context, 1, '', '', + volume_type=self.volume_type) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) + def test_too_many_volumes_of_type(self): + resource = 'volumes_%s' % self.volume_type_name + db.quota_class_create(self.context, 'default', resource, 1) + flag_args = { + 'quota_volumes': 2000, + 'quota_gigabytes': 2000 + } + self.flags(**flag_args) + vol_ref = self._create_volume() + self.assertRaises(exception.VolumeLimitExceeded, + volume.API().create, + self.context, 1, '', '', + volume_type=self.volume_type) + db.volume_destroy(self.context, vol_ref['id']) + + def test_too_many_snapshots_of_type(self): + resource = 'snapshots_%s' % self.volume_type_name + db.quota_class_create(self.context, 'default', resource, 1) + flag_args = { + 'quota_volumes': 2000, + 'quota_gigabytes': 2000, + } + self.flags(**flag_args) + vol_ref = self._create_volume() + snap_ref = self._create_snapshot(vol_ref) + self.assertRaises(exception.SnapshotLimitExceeded, + volume.API().create_snapshot, + self.context, vol_ref, '', '') + db.snapshot_destroy(self.context, snap_ref['id']) + db.volume_destroy(self.context, vol_ref['id']) + def test_too_many_gigabytes(self): volume_ids = [] - volume_id = self._create_volume(size=20) - volume_ids.append(volume_id) - self.assertRaises(exception.QuotaError, + vol_ref = self._create_volume(size=20) + volume_ids.append(vol_ref['id']) + self.assertRaises(exception.VolumeSizeExceedsAvailableQuota, volume.API().create, - self.context, 10, '', '', None) + self.context, 1, '', '', + volume_type=self.volume_type) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) + def test_too_many_combined_gigabytes(self): + vol_ref = self._create_volume(size=10) + snap_ref = self._create_snapshot(vol_ref) + self.assertRaises(exception.QuotaError, + volume.API().create_snapshot, + self.context, vol_ref, '', '') + usages = db.quota_usage_get_all_by_project(self.context, + self.project_id) + self.assertEqual(usages['gigabytes']['in_use'], 20) + db.snapshot_destroy(self.context, snap_ref['id']) + db.volume_destroy(self.context, vol_ref['id']) + + def test_no_snapshot_gb_quota_flag(self): + self.flags(quota_volumes=2, + quota_snapshots=2, + quota_gigabytes=20, + no_snapshot_gb_quota=True) + vol_ref = self._create_volume(size=10) + snap_ref = self._create_snapshot(vol_ref) + snap_ref2 = volume.API().create_snapshot(self.context, + vol_ref, '', '') + + # Make sure no reservation was created for snapshot gigabytes. + reservations = db.reservation_get_all_by_project(self.context, + self.project_id) + self.assertIsNone(reservations.get('gigabytes')) + + # Make sure the snapshot volume_size isn't included in usage. + vol_ref2 = volume.API().create(self.context, 10, '', '') + usages = db.quota_usage_get_all_by_project(self.context, + self.project_id) + self.assertEqual(usages['gigabytes']['in_use'], 20) + + db.snapshot_destroy(self.context, snap_ref['id']) + db.snapshot_destroy(self.context, snap_ref2['id']) + db.volume_destroy(self.context, vol_ref['id']) + db.volume_destroy(self.context, vol_ref2['id']) + + def test_too_many_gigabytes_of_type(self): + resource = 'gigabytes_%s' % self.volume_type_name + db.quota_class_create(self.context, 'default', resource, 10) + flag_args = { + 'quota_volumes': 2000, + 'quota_gigabytes': 2000, + } + self.flags(**flag_args) + vol_ref = self._create_volume(size=10) + self.assertRaises(exception.VolumeSizeExceedsAvailableQuota, + volume.API().create, + self.context, 1, '', '', + volume_type=self.volume_type) + db.volume_destroy(self.context, vol_ref['id']) + class FakeContext(object): def __init__(self, project_id, quota_class): @@ -125,6 +233,10 @@ def get_by_class(self, context, quota_class, resource): except KeyError: raise exception.QuotaClassNotFound(class_name=quota_class) + def get_default(self, context, resource): + self.called.append(('get_default', context, resource)) + return resource.default + def get_defaults(self, context, resources): self.called.append(('get_defaults', context, resources)) return resources @@ -141,18 +253,21 @@ def get_project_quotas(self, context, resources, project_id, project_id, quota_class, defaults, usages)) return resources - def limit_check(self, context, resources, values): - self.called.append(('limit_check', context, resources, values)) + def limit_check(self, context, resources, values, project_id=None): + self.called.append(('limit_check', context, resources, + values, project_id)) - def reserve(self, context, resources, deltas, expire=None): - self.called.append(('reserve', context, resources, deltas, expire)) + def reserve(self, context, resources, deltas, expire=None, + project_id=None): + self.called.append(('reserve', context, resources, deltas, + expire, project_id)) return self.reservations - def commit(self, context, reservations): - self.called.append(('commit', context, reservations)) + def commit(self, context, reservations, project_id=None): + self.called.append(('commit', context, reservations, project_id)) - def rollback(self, context, reservations): - self.called.append(('rollback', context, reservations)) + def rollback(self, context, reservations, project_id=None): + self.called.append(('rollback', context, reservations, project_id)) def destroy_all_by_project(self, context, project_id): self.called.append(('destroy_all_by_project', context, project_id)) @@ -166,7 +281,7 @@ def test_no_flag(self): resource = quota.BaseResource('test_resource') self.assertEqual(resource.name, 'test_resource') - self.assertEqual(resource.flag, None) + self.assertIsNone(resource.flag) self.assertEqual(resource.default, -1) def test_with_flag(self): @@ -198,9 +313,9 @@ def test_quota_no_project_no_class(self): def test_quota_with_project_no_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') - driver = FakeDriver(by_project=dict( - test_project=dict(test_resource=15), - )) + driver = FakeDriver( + by_project=dict( + test_project=dict(test_resource=15), )) context = FakeContext('test_project', None) quota_value = resource.quota(driver, context) @@ -209,9 +324,9 @@ def test_quota_with_project_no_class(self): def test_quota_no_project_with_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') - driver = FakeDriver(by_class=dict( - test_class=dict(test_resource=20), - )) + driver = FakeDriver( + by_class=dict( + test_class=dict(test_resource=20), )) context = FakeContext(None, 'test_class') quota_value = resource.quota(driver, context) @@ -221,11 +336,8 @@ def test_quota_with_project_with_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver(by_project=dict( - test_project=dict(test_resource=15), - ), - by_class=dict( - test_class=dict(test_resource=20), - )) + test_project=dict(test_resource=15), ), + by_class=dict(test_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context) @@ -235,9 +347,8 @@ def test_quota_override_project_with_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver(by_project=dict( - test_project=dict(test_resource=15), - override_project=dict(test_resource=20), - )) + test_project=dict(test_resource=15), + override_project=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, project_id='override_project') @@ -248,9 +359,8 @@ def test_quota_with_project_override_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver(by_class=dict( - test_class=dict(test_resource=15), - override_class=dict(test_resource=20), - )) + test_class=dict(test_resource=15), + override_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, quota_class='override_class') @@ -258,24 +368,35 @@ def test_quota_with_project_override_class(self): self.assertEqual(quota_value, 20) +class VolumeTypeResourceTestCase(test.TestCase): + def test_name_and_flag(self): + volume_type_name = 'foo' + volume = {'name': volume_type_name, 'id': 'myid'} + resource = quota.VolumeTypeResource('volumes', volume) + + self.assertEqual(resource.name, 'volumes_%s' % volume_type_name) + self.assertIsNone(resource.flag) + self.assertEqual(resource.default, -1) + + class QuotaEngineTestCase(test.TestCase): def test_init(self): quota_obj = quota.QuotaEngine() - self.assertEqual(quota_obj._resources, {}) - self.assertTrue(isinstance(quota_obj._driver, quota.DbQuotaDriver)) + self.assertEqual(quota_obj.resources, {}) + self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver) def test_init_override_string(self): quota_obj = quota.QuotaEngine( quota_driver_class='cinder.tests.test_quota.FakeDriver') - self.assertEqual(quota_obj._resources, {}) - self.assertTrue(isinstance(quota_obj._driver, FakeDriver)) + self.assertEqual(quota_obj.resources, {}) + self.assertIsInstance(quota_obj._driver, FakeDriver) def test_init_override_obj(self): quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver) - self.assertEqual(quota_obj._resources, {}) + self.assertEqual(quota_obj.resources, {}) self.assertEqual(quota_obj._driver, FakeDriver) def test_register_resource(self): @@ -283,76 +404,49 @@ def test_register_resource(self): resource = quota.AbsoluteResource('test_resource') quota_obj.register_resource(resource) - self.assertEqual(quota_obj._resources, dict(test_resource=resource)) + self.assertEqual(quota_obj.resources, dict(test_resource=resource)) def test_register_resources(self): quota_obj = quota.QuotaEngine() resources = [ quota.AbsoluteResource('test_resource1'), quota.AbsoluteResource('test_resource2'), - quota.AbsoluteResource('test_resource3'), - ] + quota.AbsoluteResource('test_resource3'), ] quota_obj.register_resources(resources) - self.assertEqual(quota_obj._resources, dict( - test_resource1=resources[0], - test_resource2=resources[1], - test_resource3=resources[2], - )) - - def test_sync_predeclared(self): - quota_obj = quota.QuotaEngine() - - def spam(*args, **kwargs): - pass - - resource = quota.ReservableResource('test_resource', spam) - quota_obj.register_resource(resource) - - self.assertEqual(resource.sync, spam) - - def test_sync_multi(self): - quota_obj = quota.QuotaEngine() - - def spam(*args, **kwargs): - pass - - resources = [ - quota.ReservableResource('test_resource1', spam), - quota.ReservableResource('test_resource2', spam), - quota.ReservableResource('test_resource3', spam), - quota.ReservableResource('test_resource4', spam), - ] - quota_obj.register_resources(resources[:2]) - - self.assertEqual(resources[0].sync, spam) - self.assertEqual(resources[1].sync, spam) - self.assertEqual(resources[2].sync, spam) - self.assertEqual(resources[3].sync, spam) + self.assertEqual(quota_obj.resources, + dict(test_resource1=resources[0], + test_resource2=resources[1], + test_resource3=resources[2], )) def test_get_by_project(self): context = FakeContext('test_project', 'test_class') - driver = FakeDriver(by_project=dict( + driver = FakeDriver( + by_project=dict( test_project=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_project(context, 'test_project', 'test_resource') - self.assertEqual(driver.called, [ - ('get_by_project', context, 'test_project', 'test_resource'), - ]) + self.assertEqual(driver.called, + [('get_by_project', + context, + 'test_project', + 'test_resource'), ]) self.assertEqual(result, 42) def test_get_by_class(self): context = FakeContext('test_project', 'test_class') - driver = FakeDriver(by_class=dict( + driver = FakeDriver( + by_class=dict( test_class=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_class(context, 'test_class', 'test_resource') - self.assertEqual(driver.called, [ - ('get_by_class', context, 'test_class', 'test_resource'), - ]) + self.assertEqual(driver.called, [('get_by_class', + context, + 'test_class', + 'test_resource'), ]) self.assertEqual(result, 42) def _make_quota_obj(self, driver): @@ -361,8 +455,7 @@ def _make_quota_obj(self, driver): quota.AbsoluteResource('test_resource4'), quota.AbsoluteResource('test_resource3'), quota.AbsoluteResource('test_resource2'), - quota.AbsoluteResource('test_resource1'), - ] + quota.AbsoluteResource('test_resource1'), ] quota_obj.register_resources(resources) return quota_obj @@ -373,10 +466,10 @@ def test_get_defaults(self): quota_obj = self._make_quota_obj(driver) result = quota_obj.get_defaults(context) - self.assertEqual(driver.called, [ - ('get_defaults', context, quota_obj._resources), - ]) - self.assertEqual(result, quota_obj._resources) + self.assertEqual(driver.called, [('get_defaults', + context, + quota_obj.resources), ]) + self.assertEqual(result, quota_obj.resources) def test_get_class_quotas(self): context = FakeContext(None, None) @@ -386,13 +479,15 @@ def test_get_class_quotas(self): result2 = quota_obj.get_class_quotas(context, 'test_class', False) self.assertEqual(driver.called, [ - ('get_class_quotas', context, quota_obj._resources, - 'test_class', True), - ('get_class_quotas', context, quota_obj._resources, - 'test_class', False), - ]) - self.assertEqual(result1, quota_obj._resources) - self.assertEqual(result2, quota_obj._resources) + ('get_class_quotas', + context, + quota_obj.resources, + 'test_class', True), + ('get_class_quotas', + context, quota_obj.resources, + 'test_class', False), ]) + self.assertEqual(result1, quota_obj.resources) + self.assertEqual(result2, quota_obj.resources) def test_get_project_quotas(self): context = FakeContext(None, None) @@ -405,13 +500,22 @@ def test_get_project_quotas(self): usages=False) self.assertEqual(driver.called, [ - ('get_project_quotas', context, quota_obj._resources, - 'test_project', None, True, True), - ('get_project_quotas', context, quota_obj._resources, - 'test_project', 'test_class', False, False), - ]) - self.assertEqual(result1, quota_obj._resources) - self.assertEqual(result2, quota_obj._resources) + ('get_project_quotas', + context, + quota_obj.resources, + 'test_project', + None, + True, + True), + ('get_project_quotas', + context, + quota_obj.resources, + 'test_project', + 'test_class', + False, + False), ]) + self.assertEqual(result1, quota_obj.resources) + self.assertEqual(result2, quota_obj.resources) def test_count_no_resource(self): context = FakeContext(None, None) @@ -452,19 +556,22 @@ def test_limit_check(self): test_resource3=2, test_resource4=1) self.assertEqual(driver.called, [ - ('limit_check', context, quota_obj._resources, dict( - test_resource1=4, - test_resource2=3, - test_resource3=2, - test_resource4=1, - )), - ]) + ('limit_check', + context, + quota_obj.resources, + dict( + test_resource1=4, + test_resource2=3, + test_resource3=2, + test_resource4=1,), + None), ]) def test_reserve(self): context = FakeContext(None, None) - driver = FakeDriver(reservations=[ - 'resv-01', 'resv-02', 'resv-03', 'resv-04', - ]) + driver = FakeDriver(reservations=['resv-01', + 'resv-02', + 'resv-03', + 'resv-04', ]) quota_obj = self._make_quota_obj(driver) result1 = quota_obj.reserve(context, test_resource1=4, test_resource2=3, test_resource3=2, @@ -472,27 +579,53 @@ def test_reserve(self): result2 = quota_obj.reserve(context, expire=3600, test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4) + result3 = quota_obj.reserve(context, project_id='fake_project', + test_resource1=1, test_resource2=2, + test_resource3=3, test_resource4=4) self.assertEqual(driver.called, [ - ('reserve', context, quota_obj._resources, dict( - test_resource1=4, - test_resource2=3, - test_resource3=2, - test_resource4=1, - ), None), - ('reserve', context, quota_obj._resources, dict( - test_resource1=1, - test_resource2=2, - test_resource3=3, - test_resource4=4, - ), 3600), - ]) - self.assertEqual(result1, [ - 'resv-01', 'resv-02', 'resv-03', 'resv-04', - ]) - self.assertEqual(result2, [ - 'resv-01', 'resv-02', 'resv-03', 'resv-04', - ]) + ('reserve', + context, + quota_obj.resources, + dict( + test_resource1=4, + test_resource2=3, + test_resource3=2, + test_resource4=1, ), + None, + None), + ('reserve', + context, + quota_obj.resources, + dict( + test_resource1=1, + test_resource2=2, + test_resource3=3, + test_resource4=4, ), + 3600, + None), + ('reserve', + context, + quota_obj.resources, + dict( + test_resource1=1, + test_resource2=2, + test_resource3=3, + test_resource4=4, ), + None, + 'fake_project'), ]) + self.assertEqual(result1, ['resv-01', + 'resv-02', + 'resv-03', + 'resv-04', ]) + self.assertEqual(result2, ['resv-01', + 'resv-02', + 'resv-03', + 'resv-04', ]) + self.assertEqual(result3, ['resv-01', + 'resv-02', + 'resv-03', + 'resv-04', ]) def test_commit(self): context = FakeContext(None, None) @@ -500,9 +633,13 @@ def test_commit(self): quota_obj = self._make_quota_obj(driver) quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03']) - self.assertEqual(driver.called, [ - ('commit', context, ['resv-01', 'resv-02', 'resv-03']), - ]) + self.assertEqual(driver.called, + [('commit', + context, + ['resv-01', + 'resv-02', + 'resv-03'], + None), ]) def test_rollback(self): context = FakeContext(None, None) @@ -510,9 +647,13 @@ def test_rollback(self): quota_obj = self._make_quota_obj(driver) quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03']) - self.assertEqual(driver.called, [ - ('rollback', context, ['resv-01', 'resv-02', 'resv-03']), - ]) + self.assertEqual(driver.called, + [('rollback', + context, + ['resv-01', + 'resv-02', + 'resv-03'], + None), ]) def test_destroy_all_by_project(self): context = FakeContext(None, None) @@ -520,9 +661,10 @@ def test_destroy_all_by_project(self): quota_obj = self._make_quota_obj(driver) quota_obj.destroy_all_by_project(context, 'test_project') - self.assertEqual(driver.called, [ - ('destroy_all_by_project', context, 'test_project'), - ]) + self.assertEqual(driver.called, + [('destroy_all_by_project', + context, + 'test_project'), ]) def test_expire(self): context = FakeContext(None, None) @@ -530,23 +672,61 @@ def test_expire(self): quota_obj = self._make_quota_obj(driver) quota_obj.expire(context) - self.assertEqual(driver.called, [ - ('expire', context), - ]) + self.assertEqual(driver.called, [('expire', context), ]) - def test_resources(self): + def test_resource_names(self): quota_obj = self._make_quota_obj(None) - self.assertEqual(quota_obj.resources, + self.assertEqual(quota_obj.resource_names, ['test_resource1', 'test_resource2', 'test_resource3', 'test_resource4']) +class VolumeTypeQuotaEngineTestCase(test.TestCase): + def test_default_resources(self): + def fake_vtga(context, inactive=False, filters=None): + return {} + self.stubs.Set(db, 'volume_type_get_all', fake_vtga) + + engine = quota.VolumeTypeQuotaEngine() + self.assertEqual(engine.resource_names, + ['gigabytes', 'snapshots', 'volumes']) + + def test_volume_type_resources(self): + ctx = context.RequestContext('admin', 'admin', is_admin=True) + vtype = db.volume_type_create(ctx, {'name': 'type1'}) + vtype2 = db.volume_type_create(ctx, {'name': 'type_2'}) + + def fake_vtga(context, inactive=False, filters=None): + return { + 'type1': { + 'id': vtype['id'], + 'name': 'type1', + 'extra_specs': {}, + }, + 'type_2': { + 'id': vtype['id'], + 'name': 'type_2', + 'extra_specs': {}, + }, + } + self.stubs.Set(db, 'volume_type_get_all', fake_vtga) + + engine = quota.VolumeTypeQuotaEngine() + self.assertEqual(engine.resource_names, + ['gigabytes', 'gigabytes_type1', 'gigabytes_type_2', + 'snapshots', 'snapshots_type1', 'snapshots_type_2', + 'volumes', 'volumes_type1', 'volumes_type_2']) + db.volume_type_destroy(ctx, vtype['id']) + db.volume_type_destroy(ctx, vtype2['id']) + + class DbQuotaDriverTestCase(test.TestCase): def setUp(self): super(DbQuotaDriverTestCase, self).setUp() self.flags(quota_volumes=10, + quota_snapshots=10, quota_gigabytes=1000, reservation_expire=86400, until_refresh=0, @@ -565,182 +745,176 @@ def tearDown(self): def test_get_defaults(self): # Use our pre-defined resources - result = self.driver.get_defaults(None, quota.QUOTAS._resources) + self._stub_quota_class_get_default() + self._stub_volume_type_get_all() + result = self.driver.get_defaults(None, quota.QUOTAS.resources) - self.assertEqual(result, dict( + self.assertEqual( + result, + dict( volumes=10, - gigabytes=1000, - )) + snapshots=10, + gigabytes=1000, )) + + def _stub_quota_class_get_default(self): + # Stub out quota_class_get_default + def fake_qcgd(context): + self.calls.append('quota_class_get_default') + return dict(volumes=10, + snapshots=10, + gigabytes=1000,) + self.stubs.Set(db, 'quota_class_get_default', fake_qcgd) + + def _stub_volume_type_get_all(self): + def fake_vtga(context, inactive=False, filters=None): + return {} + self.stubs.Set(db, 'volume_type_get_all', fake_vtga) def _stub_quota_class_get_all_by_name(self): # Stub out quota_class_get_all_by_name def fake_qcgabn(context, quota_class): self.calls.append('quota_class_get_all_by_name') self.assertEqual(quota_class, 'test_class') - return dict( - gigabytes=500, - volumes=10, - ) + return dict(gigabytes=500, volumes=10, snapshots=10, ) self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn) def test_get_class_quotas(self): self._stub_quota_class_get_all_by_name() - result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, + self._stub_volume_type_get_all() + result = self.driver.get_class_quotas(None, quota.QUOTAS.resources, 'test_class') self.assertEqual(self.calls, ['quota_class_get_all_by_name']) - self.assertEqual(result, dict( - volumes=10, - gigabytes=500, - )) + self.assertEqual(result, dict(volumes=10, + gigabytes=500, + snapshots=10)) def test_get_class_quotas_no_defaults(self): self._stub_quota_class_get_all_by_name() - result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, + result = self.driver.get_class_quotas(None, quota.QUOTAS.resources, 'test_class', False) self.assertEqual(self.calls, ['quota_class_get_all_by_name']) - self.assertEqual(result, dict( - volumes=10, - gigabytes=500, - )) + self.assertEqual(result, dict(volumes=10, + gigabytes=500, + snapshots=10)) def _stub_get_by_project(self): def fake_qgabp(context, project_id): self.calls.append('quota_get_all_by_project') self.assertEqual(project_id, 'test_project') - return dict( - volumes=10, - gigabytes=50, - reserved=0 - ) + return dict(volumes=10, gigabytes=50, reserved=0, snapshots=10) def fake_qugabp(context, project_id): self.calls.append('quota_usage_get_all_by_project') self.assertEqual(project_id, 'test_project') - return dict( - volumes=dict(in_use=2, reserved=0), - gigabytes=dict(in_use=10, reserved=0), - ) + return dict(volumes=dict(in_use=2, reserved=0), + snapshots=dict(in_use=2, reserved=0), + gigabytes=dict(in_use=10, reserved=0), ) self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp) self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp) self._stub_quota_class_get_all_by_name() + self._stub_quota_class_get_default() def test_get_project_quotas(self): self._stub_get_by_project() + self._stub_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), - quota.QUOTAS._resources, 'test_project') - - self.assertEqual(self.calls, [ - 'quota_get_all_by_project', - 'quota_usage_get_all_by_project', - 'quota_class_get_all_by_name', - ]) - self.assertEqual(result, dict( - volumes=dict( - limit=10, - in_use=2, - reserved=0, - ), - gigabytes=dict( - limit=50, - in_use=10, - reserved=0, - ), - )) + quota.QUOTAS.resources, 'test_project') + + self.assertEqual(self.calls, ['quota_get_all_by_project', + 'quota_usage_get_all_by_project', + 'quota_class_get_all_by_name', + 'quota_class_get_default', ]) + self.assertEqual(result, dict(volumes=dict(limit=10, + in_use=2, + reserved=0, ), + snapshots=dict(limit=10, + in_use=2, + reserved=0, ), + gigabytes=dict(limit=50, + in_use=10, + reserved=0, ), )) def test_get_project_quotas_alt_context_no_class(self): self._stub_get_by_project() + self._stub_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), - quota.QUOTAS._resources, 'test_project') - - self.assertEqual(self.calls, [ - 'quota_get_all_by_project', - 'quota_usage_get_all_by_project', - ]) - self.assertEqual(result, dict( - volumes=dict( - limit=10, - in_use=2, - reserved=0, - ), - gigabytes=dict( - limit=50, - in_use=10, - reserved=0, - ), - )) + quota.QUOTAS.resources, 'test_project') + + self.assertEqual(self.calls, ['quota_get_all_by_project', + 'quota_usage_get_all_by_project', + 'quota_class_get_default', ]) + self.assertEqual(result, dict(volumes=dict(limit=10, + in_use=2, + reserved=0, ), + snapshots=dict(limit=10, + in_use=2, + reserved=0, ), + gigabytes=dict(limit=50, + in_use=10, + reserved=0, ), )) def test_get_project_quotas_alt_context_with_class(self): self._stub_get_by_project() + self._stub_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), - quota.QUOTAS._resources, 'test_project', quota_class='test_class') - - self.assertEqual(self.calls, [ - 'quota_get_all_by_project', - 'quota_usage_get_all_by_project', - 'quota_class_get_all_by_name', - ]) - self.assertEqual(result, dict( - volumes=dict( - limit=10, - in_use=2, - reserved=0, - ), - gigabytes=dict( - limit=50, - in_use=10, - reserved=0, - ), - )) + quota.QUOTAS.resources, 'test_project', quota_class='test_class') + + self.assertEqual(self.calls, ['quota_get_all_by_project', + 'quota_usage_get_all_by_project', + 'quota_class_get_all_by_name', + 'quota_class_get_default', ]) + self.assertEqual(result, dict(volumes=dict(limit=10, + in_use=2, + reserved=0, ), + snapshots=dict(limit=10, + in_use=2, + reserved=0, ), + gigabytes=dict(limit=50, + in_use=10, + reserved=0, ), )) def test_get_project_quotas_no_defaults(self): self._stub_get_by_project() + self._stub_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), - quota.QUOTAS._resources, 'test_project', defaults=False) - - self.assertEqual(self.calls, [ - 'quota_get_all_by_project', - 'quota_usage_get_all_by_project', - 'quota_class_get_all_by_name', - ]) - self.assertEqual(result, dict( - gigabytes=dict( - limit=50, - in_use=10, - reserved=0, - ), - volumes=dict( - limit=10, - in_use=2, - reserved=0, - ), - )) + quota.QUOTAS.resources, 'test_project', defaults=False) + + self.assertEqual(self.calls, ['quota_get_all_by_project', + 'quota_usage_get_all_by_project', + 'quota_class_get_all_by_name', + 'quota_class_get_default', ]) + self.assertEqual(result, + dict(gigabytes=dict(limit=50, + in_use=10, + reserved=0, ), + snapshots=dict(limit=10, + in_use=2, + reserved=0, ), + volumes=dict(limit=10, + in_use=2, + reserved=0, ), )) def test_get_project_quotas_no_usages(self): self._stub_get_by_project() + self._stub_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), - quota.QUOTAS._resources, 'test_project', usages=False) - - self.assertEqual(self.calls, [ - 'quota_get_all_by_project', - 'quota_class_get_all_by_name', - ]) - self.assertEqual(result, dict( - volumes=dict( - limit=10, - ), - gigabytes=dict( - limit=50, - ), - )) + quota.QUOTAS.resources, 'test_project', usages=False) + + self.assertEqual(self.calls, ['quota_get_all_by_project', + 'quota_class_get_all_by_name', + 'quota_class_get_default', ]) + self.assertEqual(result, dict(volumes=dict(limit=10, ), + snapshots=dict(limit=10, ), + gigabytes=dict(limit=50, ), )) def _stub_get_project_quotas(self): def fake_get_project_quotas(context, resources, project_id, @@ -757,7 +931,7 @@ def test_get_quotas_has_sync_unknown(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, - None, quota.QUOTAS._resources, + None, quota.QUOTAS.resources, ['unknown'], True) self.assertEqual(self.calls, []) @@ -765,7 +939,7 @@ def test_get_quotas_no_sync_unknown(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, - None, quota.QUOTAS._resources, + None, quota.QUOTAS.resources, ['unknown'], False) self.assertEqual(self.calls, []) @@ -773,7 +947,7 @@ def test_get_quotas_has_sync_no_sync_resource(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, - None, quota.QUOTAS._resources, + None, quota.QUOTAS.resources, ['metadata_items'], True) self.assertEqual(self.calls, []) @@ -781,7 +955,7 @@ def test_get_quotas_no_sync_has_sync_resource(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, - None, quota.QUOTAS._resources, + None, quota.QUOTAS.resources, ['volumes'], False) self.assertEqual(self.calls, []) @@ -789,19 +963,16 @@ def test_get_quotas_has_sync(self): self._stub_get_project_quotas() result = self.driver._get_quotas(FakeContext('test_project', 'test_class'), - quota.QUOTAS._resources, + quota.QUOTAS.resources, ['volumes', 'gigabytes'], True) self.assertEqual(self.calls, ['get_project_quotas']) - self.assertEqual(result, dict( - volumes=10, - gigabytes=1000, - )) + self.assertEqual(result, dict(volumes=10, gigabytes=1000, )) def _stub_quota_reserve(self): def fake_quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age): + until_refresh, max_age, project_id=None): self.calls.append(('quota_reserve', expire, until_refresh, max_age)) return ['resv-1', 'resv-2', 'resv-3'] @@ -813,7 +984,7 @@ def test_reserve_bad_expire(self): self.assertRaises(exception.InvalidReservationExpiration, self.driver.reserve, FakeContext('test_project', 'test_class'), - quota.QUOTAS._resources, + quota.QUOTAS.resources, dict(volumes=2), expire='invalid') self.assertEqual(self.calls, []) @@ -821,28 +992,24 @@ def test_reserve_default_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS._resources, + quota.QUOTAS.resources, dict(volumes=2)) expire = timeutils.utcnow() + datetime.timedelta(seconds=86400) - self.assertEqual(self.calls, [ - 'get_project_quotas', - ('quota_reserve', expire, 0, 0), - ]) + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_int_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS._resources, + quota.QUOTAS.resources, dict(volumes=2), expire=3600) expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) - self.assertEqual(self.calls, [ - 'get_project_quotas', - ('quota_reserve', expire, 0, 0), - ]) + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_timedelta_expire(self): @@ -850,14 +1017,12 @@ def test_reserve_timedelta_expire(self): self._stub_quota_reserve() expire_delta = datetime.timedelta(seconds=60) result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS._resources, + quota.QUOTAS.resources, dict(volumes=2), expire=expire_delta) expire = timeutils.utcnow() + expire_delta - self.assertEqual(self.calls, [ - 'get_project_quotas', - ('quota_reserve', expire, 0, 0), - ]) + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_datetime_expire(self): @@ -865,13 +1030,11 @@ def test_reserve_datetime_expire(self): self._stub_quota_reserve() expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS._resources, + quota.QUOTAS.resources, dict(volumes=2), expire=expire) - self.assertEqual(self.calls, [ - 'get_project_quotas', - ('quota_reserve', expire, 0, 0), - ]) + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 0, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_until_refresh(self): @@ -880,13 +1043,11 @@ def test_reserve_until_refresh(self): self.flags(until_refresh=500) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS._resources, + quota.QUOTAS.resources, dict(volumes=2), expire=expire) - self.assertEqual(self.calls, [ - 'get_project_quotas', - ('quota_reserve', expire, 500, 0), - ]) + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 500, 0), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) def test_reserve_max_age(self): @@ -895,15 +1056,28 @@ def test_reserve_max_age(self): self.flags(max_age=86400) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS._resources, + quota.QUOTAS.resources, dict(volumes=2), expire=expire) - self.assertEqual(self.calls, [ - 'get_project_quotas', - ('quota_reserve', expire, 0, 86400), - ]) + self.assertEqual(self.calls, ['get_project_quotas', + ('quota_reserve', expire, 0, 86400), ]) self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3']) + def _stub_quota_destroy_all_by_project(self): + def fake_quota_destroy_all_by_project(context, project_id): + self.calls.append(('quota_destroy_all_by_project', project_id)) + return None + self.stubs.Set(sqa_api, 'quota_destroy_all_by_project', + fake_quota_destroy_all_by_project) + + def test_destroy_by_project(self): + self._stub_quota_destroy_all_by_project() + self.driver.destroy_all_by_project(FakeContext('test_project', + 'test_class'), + 'test_project') + self.assertEqual(self.calls, [('quota_destroy_all_by_project', + ('test_project')), ]) + class FakeSession(object): def begin(self): @@ -932,7 +1106,8 @@ def setUp(self): self.sync_called = set() def make_sync(res_name): - def sync(context, project_id, session): + def fake_sync(context, project_id, volume_type_id=None, + volume_type_name=None, session=None): self.sync_called.add(res_name) if res_name in self.usages: if self.usages[res_name].in_use < 0: @@ -940,13 +1115,16 @@ def sync(context, project_id, session): else: return {res_name: self.usages[res_name].in_use - 1} return {res_name: 0} - return sync + return fake_sync self.resources = {} + QUOTA_SYNC_FUNCTIONS = {} for res_name in ('volumes', 'gigabytes'): - res = quota.ReservableResource(res_name, make_sync(res_name)) + res = quota.ReservableResource(res_name, '_sync_%s' % res_name) + QUOTA_SYNC_FUNCTIONS['_sync_%s' % res_name] = make_sync(res_name) self.resources[res_name] = res + self.stubs.Set(sqa_api, 'QUOTA_SYNC_FUNCTIONS', QUOTA_SYNC_FUNCTIONS) self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) self.usages = {} @@ -956,7 +1134,7 @@ def sync(context, project_id, session): def fake_get_session(): return FakeSession() - def fake_get_quota_usages(context, session): + def fake_get_quota_usages(context, session, project_id): return self.usages.copy() def fake_quota_usage_create(context, project_id, resource, in_use, @@ -982,8 +1160,8 @@ def fake_reservation_create(context, uuid, usage_id, project_id, self.stubs.Set(sqa_api, 'get_session', fake_get_session) self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages) - self.stubs.Set(sqa_api, 'quota_usage_create', fake_quota_usage_create) - self.stubs.Set(sqa_api, 'reservation_create', fake_reservation_create) + self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create) + self.stubs.Set(sqa_api, '_reservation_create', fake_reservation_create) timeutils.set_time_override() @@ -1061,117 +1239,96 @@ def compare_reservation(self, reservations, expected): def test_quota_reserve_create_usages(self): context = FakeContext('test_project', 'test_class') - quotas = dict( - volumes=5, - gigabytes=10 * 1024, - ) - deltas = dict( - volumes=2, - gigabytes=2 * 1024, - ) + quotas = dict(volumes=5, + gigabytes=10 * 1024, ) + deltas = dict(volumes=2, + gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set(['volumes', 'gigabytes'])) - self.compare_usage(self.usages_created, [ - dict(resource='volumes', - project_id='test_project', - in_use=0, - reserved=2, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=0, - reserved=2 * 1024, - until_refresh=None), - ]) - self.compare_reservation(result, [ - dict(resource='volumes', - usage_id=self.usages_created['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages_created['gigabytes'], - delta=2 * 1024), - ]) + self.compare_usage(self.usages_created, + [dict(resource='volumes', + project_id='test_project', + in_use=0, + reserved=2, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=0, + reserved=2 * 1024, + until_refresh=None), ]) + self.compare_reservation( + result, + [dict(resource='volumes', + usage_id=self.usages_created['volumes'], + project_id='test_project', + delta=2), + dict(resource='gigabytes', + usage_id=self.usages_created['gigabytes'], + delta=2 * 1024), ]) def test_quota_reserve_negative_in_use(self): self.init_usage('test_project', 'volumes', -1, 0, until_refresh=1) self.init_usage('test_project', 'gigabytes', -1, 0, until_refresh=1) context = FakeContext('test_project', 'test_class') - quotas = dict( - volumes=5, - gigabytes=10 * 1024, - ) - deltas = dict( - volumes=2, - gigabytes=2 * 1024, - ) + quotas = dict(volumes=5, + gigabytes=10 * 1024, ) + deltas = dict(volumes=2, + gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 5, 0) self.assertEqual(self.sync_called, set(['volumes', 'gigabytes'])) - self.compare_usage(self.usages, [ - dict(resource='volumes', - project_id='test_project', - in_use=2, - reserved=2, - until_refresh=5), - dict(resource='gigabytes', - project_id='test_project', - in_use=2, - reserved=2 * 1024, - until_refresh=5), - ]) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=2, + reserved=2, + until_refresh=5), + dict(resource='gigabytes', + project_id='test_project', + in_use=2, + reserved=2 * 1024, + until_refresh=5), ]) self.assertEqual(self.usages_created, {}) - self.compare_reservation(result, [ - dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - delta=2 * 1024), - ]) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + delta=2 * 1024), ]) def test_quota_reserve_until_refresh(self): self.init_usage('test_project', 'volumes', 3, 0, until_refresh=1) self.init_usage('test_project', 'gigabytes', 3, 0, until_refresh=1) context = FakeContext('test_project', 'test_class') - quotas = dict( - volumes=5, - gigabytes=10 * 1024, - ) - deltas = dict( - volumes=2, - gigabytes=2 * 1024, - ) + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 5, 0) self.assertEqual(self.sync_called, set(['volumes', 'gigabytes'])) - self.compare_usage(self.usages, [ - dict(resource='volumes', - project_id='test_project', - in_use=2, - reserved=2, - until_refresh=5), - dict(resource='gigabytes', - project_id='test_project', - in_use=2, - reserved=2 * 1024, - until_refresh=5), - ]) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=2, + reserved=2, + until_refresh=5), + dict(resource='gigabytes', + project_id='test_project', + in_use=2, + reserved=2 * 1024, + until_refresh=5), ]) self.assertEqual(self.usages_created, {}) - self.compare_reservation(result, [ - dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - delta=2 * 1024), - ]) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + delta=2 * 1024), ]) def test_quota_reserve_max_age(self): max_age = 3600 @@ -1182,149 +1339,114 @@ def test_quota_reserve_max_age(self): self.init_usage('test_project', 'gigabytes', 3, 0, created_at=record_created, updated_at=record_created) context = FakeContext('test_project', 'test_class') - quotas = dict( - volumes=5, - gigabytes=10 * 1024, - ) - deltas = dict( - volumes=2, - gigabytes=2 * 1024, - ) + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 0, max_age) self.assertEqual(self.sync_called, set(['volumes', 'gigabytes'])) - self.compare_usage(self.usages, [ - dict(resource='volumes', - project_id='test_project', - in_use=2, - reserved=2, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=2, - reserved=2 * 1024, - until_refresh=None), - ]) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=2, + reserved=2, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=2, + reserved=2 * 1024, + until_refresh=None), ]) self.assertEqual(self.usages_created, {}) - self.compare_reservation(result, [ - dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - delta=2 * 1024), - ]) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + delta=2 * 1024), ]) def test_quota_reserve_no_refresh(self): self.init_usage('test_project', 'volumes', 3, 0) self.init_usage('test_project', 'gigabytes', 3, 0) context = FakeContext('test_project', 'test_class') - quotas = dict( - volumes=5, - gigabytes=10 * 1024, - ) - deltas = dict( - volumes=2, - gigabytes=2 * 1024, - ) + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set([])) - self.compare_usage(self.usages, [ - dict(resource='volumes', - project_id='test_project', - in_use=3, - reserved=2, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=3, - reserved=2 * 1024, - until_refresh=None), - ]) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=3, + reserved=2, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=3, + reserved=2 * 1024, + until_refresh=None), ]) self.assertEqual(self.usages_created, {}) - self.compare_reservation(result, [ - dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - delta=2 * 1024), - ]) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + delta=2 * 1024), ]) def test_quota_reserve_unders(self): self.init_usage('test_project', 'volumes', 1, 0) self.init_usage('test_project', 'gigabytes', 1 * 1024, 0) context = FakeContext('test_project', 'test_class') - quotas = dict( - volumes=5, - gigabytes=10 * 1024, - ) - deltas = dict( - volumes=-2, - gigabytes=-2 * 1024, - ) + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=-2, gigabytes=-2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set([])) - self.compare_usage(self.usages, [ - dict(resource='volumes', - project_id='test_project', - in_use=1, - reserved=0, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=1 * 1024, - reserved=0, - until_refresh=None), - ]) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=1, + reserved=0, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=1 * 1024, + reserved=0, + until_refresh=None), ]) self.assertEqual(self.usages_created, {}) - self.compare_reservation(result, [ - dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=-2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - delta=-2 * 1024), - ]) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=-2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + delta=-2 * 1024), ]) def test_quota_reserve_overs(self): self.init_usage('test_project', 'volumes', 4, 0) self.init_usage('test_project', 'gigabytes', 10 * 1024, 0) context = FakeContext('test_project', 'test_class') - quotas = dict( - volumes=5, - gigabytes=10 * 1024, - ) - deltas = dict( - volumes=2, - gigabytes=2 * 1024, - ) + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=2, gigabytes=2 * 1024, ) self.assertRaises(exception.OverQuota, sqa_api.quota_reserve, context, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set([])) - self.compare_usage(self.usages, [ - dict(resource='volumes', - project_id='test_project', - in_use=4, - reserved=0, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=10 * 1024, - reserved=0, - until_refresh=None), - ]) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=4, + reserved=0, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=10 * 1024, + reserved=0, + until_refresh=None), ]) self.assertEqual(self.usages_created, {}) self.assertEqual(self.reservations_created, {}) @@ -1332,38 +1454,29 @@ def test_quota_reserve_reduction(self): self.init_usage('test_project', 'volumes', 10, 0) self.init_usage('test_project', 'gigabytes', 20 * 1024, 0) context = FakeContext('test_project', 'test_class') - quotas = dict( - volumes=5, - gigabytes=10 * 1024, - ) - deltas = dict( - volumes=-2, - gigabytes=-2 * 1024, - ) + quotas = dict(volumes=5, gigabytes=10 * 1024, ) + deltas = dict(volumes=-2, gigabytes=-2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(self.sync_called, set([])) - self.compare_usage(self.usages, [ - dict(resource='volumes', - project_id='test_project', - in_use=10, - reserved=0, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=20 * 1024, - reserved=0, - until_refresh=None), - ]) + self.compare_usage(self.usages, [dict(resource='volumes', + project_id='test_project', + in_use=10, + reserved=0, + until_refresh=None), + dict(resource='gigabytes', + project_id='test_project', + in_use=20 * 1024, + reserved=0, + until_refresh=None), ]) self.assertEqual(self.usages_created, {}) - self.compare_reservation(result, [ - dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=-2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - project_id='test_project', - delta=-2 * 1024), - ]) + self.compare_reservation(result, + [dict(resource='volumes', + usage_id=self.usages['volumes'], + project_id='test_project', + delta=-2), + dict(resource='gigabytes', + usage_id=self.usages['gigabytes'], + project_id='test_project', + delta=-2 * 1024), ]) diff --git a/cinder/tests/test_rbd.py b/cinder/tests/test_rbd.py index 704e18029e..4c80863f4e 100644 --- a/cinder/tests/test_rbd.py +++ b/cinder/tests/test_rbd.py @@ -1,6 +1,6 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Josh Durgin +# Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,113 +15,806 @@ # License for the specific language governing permissions and limitations # under the License. + +import mock +import os +import tempfile + from cinder import db from cinder import exception +from cinder.image import image_utils from cinder.openstack.common import log as logging from cinder.openstack.common import timeutils from cinder import test from cinder.tests.image import fake as fake_image from cinder.tests.test_volume import DriverTestCase -from cinder.volume.driver import RBDDriver +from cinder import units +from cinder.volume import configuration as conf +import cinder.volume.drivers.rbd as driver +from cinder.volume.flows.manager import create_volume + LOG = logging.getLogger(__name__) +CEPH_MON_DUMP = """dumped monmap epoch 1 +{ "epoch": 1, + "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa", + "modified": "2013-05-22 17:44:56.343618", + "created": "2013-05-22 17:44:56.343618", + "mons": [ + { "rank": 0, + "name": "a", + "addr": "[::1]:6789\/0"}, + { "rank": 1, + "name": "b", + "addr": "[::1]:6790\/0"}, + { "rank": 2, + "name": "c", + "addr": "[::1]:6791\/0"}, + { "rank": 3, + "name": "d", + "addr": "127.0.0.1:6792\/0"}, + { "rank": 4, + "name": "e", + "addr": "example.com:6791\/0"}], + "quorum": [ + 0, + 1, + 2]} +""" + + +class TestUtil(test.TestCase): + def test_ascii_str(self): + self.assertIsNone(driver.ascii_str(None)) + self.assertEqual('foo', driver.ascii_str('foo')) + self.assertEqual('foo', driver.ascii_str(u'foo')) + self.assertRaises(UnicodeEncodeError, + driver.ascii_str, 'foo' + unichr(300)) + + class RBDTestCase(test.TestCase): def setUp(self): super(RBDTestCase, self).setUp() - def fake_execute(*args): - pass - self.driver = RBDDriver(execute=fake_execute) + self.cfg = mock.Mock(spec=conf.Configuration) + self.cfg.volume_tmp_dir = None + self.cfg.rbd_pool = 'rbd' + self.cfg.rbd_ceph_conf = None + self.cfg.rbd_secret_uuid = None + self.cfg.rbd_user = None + self.cfg.volume_dd_blocksize = '1M' + + # set some top level mocks for these common modules and tests can then + # set method/attributes as required. + self.rados = mock.Mock() + self.rbd = mock.Mock() + self.rbd.RBD = mock.Mock + self.rbd.Image = mock.Mock + self.rbd.ImageSnapshot = mock.Mock + + mock_exec = mock.Mock() + mock_exec.return_value = ('', '') + + self.driver = driver.RBDDriver(execute=mock_exec, + configuration=self.cfg, + rados=self.rados, + rbd=self.rbd) + self.driver.set_initialized() + + self.volume_name = u'volume-00000001' + self.snapshot_name = u'snapshot-00000001' + self.volume_size = 1 + self.volume = dict(name=self.volume_name, size=self.volume_size) + self.snapshot = dict(volume_name=self.volume_name, + name=self.snapshot_name) + + def tearDown(self): + super(RBDTestCase, self).tearDown() + + @mock.patch('cinder.volume.drivers.rbd.RADOSClient') + def test_create_volume(self, mock_client): + client = mock_client.return_value + client.__enter__.return_value = client + + self.driver._supports_layering = mock.Mock() + self.driver._supports_layering.return_value = True + self.rbd.RBD.create = mock.Mock() + + self.driver.create_volume(self.volume) + + args = [client.ioctx, str(self.volume_name), + self.volume_size * units.GiB] + kwargs = {'old_format': False, + 'features': self.rbd.RBD_FEATURE_LAYERING} + + self.rbd.RBD.create.assert_called_once() + client.__enter__.assert_called_once() + client.__exit__.assert_called_once() + self.driver._supports_layering.assert_called_once() + self.rbd.RBD.create.assert_called_once_with(*args, **kwargs) + + @mock.patch('cinder.volume.drivers.rbd.RADOSClient') + def test_create_volume_no_layering(self, mock_client): + client = mock_client.return_value + client.__enter__.return_value = client + + self.driver._supports_layering = mock.Mock() + self.driver._supports_layering.return_value = False + self.rbd.RBD.create = mock.Mock() + + self.driver.create_volume(self.volume) + + args = [client.ioctx, str(self.volume_name), + self.volume_size * units.GiB] + kwargs = {'old_format': True, + 'features': 0} + + self.rbd.RBD.create.assert_called_once() + client.__enter__.assert_called_once() + client.__exit__.assert_called_once() + self.driver._supports_layering.assert_called_once() + self.rbd.RBD.create.assert_called_once_with(*args, **kwargs) + + @mock.patch('cinder.volume.drivers.rbd.RADOSClient') + def test_delete_volume(self, mock_client): + client = mock_client.return_value + + self.driver.rbd.Image.list_snaps = mock.Mock() + self.driver.rbd.Image.list_snaps.return_value = [] + self.driver.rbd.Image.close = mock.Mock() + self.driver.rbd.Image.remove = mock.Mock() + self.driver.rbd.Image.unprotect_snap = mock.Mock() + + self.driver._get_clone_info = mock.Mock() + self.driver._get_clone_info.return_value = (None, None, None) + self.driver._delete_backup_snaps = mock.Mock() + + self.driver.delete_volume(self.volume) + + self.driver._get_clone_info.assert_called_once() + self.driver.rbd.Image.list_snaps.assert_called_once() + client.__enter__.assert_called_once() + client.__exit__.assert_called_once() + self.driver._delete_backup_snaps.assert_called_once() + self.assertFalse(self.driver.rbd.Image.unprotect_snap.called) + self.driver.rbd.RBD.remove.assert_called_once() + + @mock.patch('cinder.volume.drivers.rbd.rbd') + def test_delete_volume_not_found(self, mock_rbd): + mock_rbd.RBD = mock.Mock + mock_rbd.ImageNotFound = Exception + mock_rbd.Image.side_effect = mock_rbd.ImageNotFound + + self.driver.rbd = mock_rbd + + with mock.patch.object(driver, 'RADOSClient'): + self.assertIsNone(self.driver.delete_volume(self.volume)) + mock_rbd.Image.assert_called_once() + + def test_delete_busy_volume(self): + self.rbd.Image.close = mock.Mock() + self.rbd.Image.list_snaps = mock.Mock() + self.rbd.Image.list_snaps.return_value = [] + self.rbd.Image.unprotect_snap = mock.Mock() + + self.rbd.ImageBusy = Exception + self.rbd.RBD.remove = mock.Mock() + self.rbd.RBD.remove.side_effect = self.rbd.ImageBusy + + self.driver._get_clone_info = mock.Mock() + self.driver._get_clone_info.return_value = (None, None, None) + self.driver._delete_backup_snaps = mock.Mock() + + with mock.patch.object(driver, 'RADOSClient') as mock_rados_client: + self.assertRaises(exception.VolumeIsBusy, + self.driver.delete_volume, self.volume) + + self.driver._get_clone_info.assert_called_once() + self.rbd.Image.list_snaps.assert_called_once() + mock_rados_client.assert_called_once() + self.driver._delete_backup_snaps.assert_called_once() + self.assertFalse(self.rbd.Image.unprotect_snap.called) + self.rbd.RBD.remove.assert_called_once() + + @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') + def test_create_snapshot(self, mock_proxy): + proxy = mock_proxy.return_value + proxy.__enter__.return_value = proxy + + self.driver.create_snapshot(self.snapshot) + + args = [str(self.snapshot_name)] + proxy.create_snap.assert_called_with(*args) + proxy.protect_snap.assert_called_with(*args) + + @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') + def test_delete_snapshot(self, mock_proxy): + proxy = mock_proxy.return_value + proxy.__enter__.return_value = proxy + + self.driver.delete_snapshot(self.snapshot) + + args = [str(self.snapshot_name)] + proxy.remove_snap.assert_called_with(*args) + proxy.unprotect_snap.assert_called_with(*args) + + def test_get_clone_info(self): + + volume = self.rbd.Image() + volume.set_snap = mock.Mock() + volume.parent_info = mock.Mock() + parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name)) + volume.parent_info.return_value = parent_info + + info = self.driver._get_clone_info(volume, self.volume_name) + + self.assertEqual(info, parent_info) + + self.assertFalse(volume.set_snap.called) + volume.parent_info.assert_called_once() + + def test_get_clone_info_w_snap(self): + + volume = self.rbd.Image() + volume.set_snap = mock.Mock() + volume.parent_info = mock.Mock() + parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name)) + volume.parent_info.return_value = parent_info + + snapshot = self.rbd.ImageSnapshot() + + info = self.driver._get_clone_info(volume, self.volume_name, + snap=snapshot) + + self.assertEqual(info, parent_info) + + volume.set_snap.assert_called_once() + self.assertEqual(volume.set_snap.call_count, 2) + volume.parent_info.assert_called_once() + + def test_get_clone_info_w_exception(self): + + self.rbd.ImageNotFound = Exception + + volume = self.rbd.Image() + volume.set_snap = mock.Mock() + volume.parent_info = mock.Mock() + volume.parent_info.side_effect = self.rbd.ImageNotFound + + snapshot = self.rbd.ImageSnapshot() + + info = self.driver._get_clone_info(volume, self.volume_name, + snap=snapshot) + + self.assertEqual(info, (None, None, None)) + + volume.set_snap.assert_called_once() + self.assertEqual(volume.set_snap.call_count, 2) + volume.parent_info.assert_called_once() + + def test_get_clone_info_deleted_volume(self): + + volume = self.rbd.Image() + volume.set_snap = mock.Mock() + volume.parent_info = mock.Mock() + parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name)) + volume.parent_info.return_value = parent_info + + info = self.driver._get_clone_info(volume, + "%s.deleted" % (self.volume_name)) + + self.assertEqual(info, parent_info) + + self.assertFalse(volume.set_snap.called) + volume.parent_info.assert_called_once() + + @mock.patch('cinder.volume.drivers.rbd.RADOSClient') + def test_create_cloned_volume(self, mock_client): + src_name = u'volume-00000001' + dst_name = u'volume-00000002' + + self.cfg.rbd_max_clone_depth = 2 + self.rbd.RBD.clone = mock.Mock() + self.driver._get_clone_depth = mock.Mock() + # Try with no flatten required + self.driver._get_clone_depth.return_value = 1 + + self.rbd.Image.create_snap = mock.Mock() + self.rbd.Image.protect_snap = mock.Mock() + self.rbd.Image.close = mock.Mock() + + self.driver.create_cloned_volume(dict(name=dst_name), + dict(name=src_name)) + + self.rbd.Image.create_snap.assert_called_once() + self.rbd.Image.protect_snap.assert_called_once() + self.rbd.RBD.clone.assert_called_once() + self.rbd.Image.close.assert_called_once() + + @mock.patch('cinder.volume.drivers.rbd.RADOSClient') + def test_create_cloned_volume_w_flatten(self, mock_client): + src_name = u'volume-00000001' + dst_name = u'volume-00000002' + + self.cfg.rbd_max_clone_depth = 1 + self.rbd.RBD.Error = Exception + self.rbd.RBD.clone = mock.Mock() + self.rbd.RBD.clone.side_effect = self.rbd.RBD.Error + self.driver._get_clone_depth = mock.Mock() + # Try with no flatten required + self.driver._get_clone_depth.return_value = 1 + + self.rbd.Image.create_snap = mock.Mock() + self.rbd.Image.protect_snap = mock.Mock() + self.rbd.Image.unprotect_snap = mock.Mock() + self.rbd.Image.remove_snap = mock.Mock() + self.rbd.Image.close = mock.Mock() + + self.assertRaises(self.rbd.RBD.Error, self.driver.create_cloned_volume, + dict(name=dst_name), dict(name=src_name)) + + self.rbd.Image.create_snap.assert_called_once() + self.rbd.Image.protect_snap.assert_called_once() + self.rbd.RBD.clone.assert_called_once() + self.rbd.Image.unprotect_snap.assert_called_once() + self.rbd.Image.remove_snap.assert_called_once() + self.rbd.Image.close.assert_called_once() + + @mock.patch('cinder.volume.drivers.rbd.RADOSClient') + def test_create_cloned_volume_w_clone_exception(self, mock_client): + src_name = u'volume-00000001' + dst_name = u'volume-00000002' + + self.cfg.rbd_max_clone_depth = 2 + self.rbd.RBD.Error = Exception + self.rbd.RBD.clone = mock.Mock() + self.rbd.RBD.clone.side_effect = self.rbd.RBD.Error + self.driver._get_clone_depth = mock.Mock() + # Try with no flatten required + self.driver._get_clone_depth.return_value = 1 + + self.rbd.Image.create_snap = mock.Mock() + self.rbd.Image.protect_snap = mock.Mock() + self.rbd.Image.unprotect_snap = mock.Mock() + self.rbd.Image.remove_snap = mock.Mock() + self.rbd.Image.close = mock.Mock() + + self.assertRaises(self.rbd.RBD.Error, self.driver.create_cloned_volume, + dict(name=dst_name), dict(name=src_name)) + + self.rbd.Image.create_snap.assert_called_once() + self.rbd.Image.protect_snap.assert_called_once() + self.rbd.RBD.clone.assert_called_once() + self.rbd.Image.unprotect_snap.assert_called_once() + self.rbd.Image.remove_snap.assert_called_once() + self.rbd.Image.close.assert_called_once() def test_good_locations(self): - locations = [ - 'rbd://fsid/pool/image/snap', - 'rbd://%2F/%2F/%2F/%2F', - ] + locations = ['rbd://fsid/pool/image/snap', + 'rbd://%2F/%2F/%2F/%2F', ] map(self.driver._parse_location, locations) def test_bad_locations(self): - locations = [ - 'rbd://image', - 'http://path/to/somewhere/else', - 'rbd://image/extra', - 'rbd://image/', - 'rbd://fsid/pool/image/', - 'rbd://fsid/pool/image/snap/', - 'rbd://///', - ] + locations = ['rbd://image', + 'http://path/to/somewhere/else', + 'rbd://image/extra', + 'rbd://image/', + 'rbd://fsid/pool/image/', + 'rbd://fsid/pool/image/snap/', + 'rbd://///', ] for loc in locations: self.assertRaises(exception.ImageUnacceptable, self.driver._parse_location, loc) - self.assertFalse(self.driver._is_cloneable(loc)) + self.assertFalse( + self.driver._is_cloneable(loc, {'disk_format': 'raw'})) - def test_cloneable(self): - self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc') + @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') + def test_cloneable(self, mock_proxy): + self.driver._get_fsid = mock.Mock() + self.driver._get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' - self.assertTrue(self.driver._is_cloneable(location)) + info = {'disk_format': 'raw'} + self.assertTrue(self.driver._is_cloneable(location, info)) - def test_uncloneable_different_fsid(self): - self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc') + @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') + def test_uncloneable_different_fsid(self, mock_proxy): + self.driver._get_fsid = mock.Mock() + self.driver._get_fsid.return_value = 'abc' location = 'rbd://def/pool/image/snap' - self.assertFalse(self.driver._is_cloneable(location)) + self.assertFalse( + self.driver._is_cloneable(location, {'disk_format': 'raw'})) + + @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') + def test_uncloneable_unreadable(self, mock_proxy): + self.driver._get_fsid = mock.Mock() + self.driver._get_fsid.return_value = 'abc' + location = 'rbd://abc/pool/image/snap' - def test_uncloneable_unreadable(self): - def fake_exc(*args): - raise exception.ProcessExecutionError() - self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc') - self.stubs.Set(self.driver, '_execute', fake_exc) + self.rbd.Error = Exception + mock_proxy.side_effect = self.rbd.Error + + args = [location, {'disk_format': 'raw'}] + self.assertFalse(self.driver._is_cloneable(*args)) + mock_proxy.assert_called_once() + + def test_uncloneable_bad_format(self): + self.driver._get_fsid = mock.Mock() + self.driver._get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' - self.assertFalse(self.driver._is_cloneable(location)) + formats = ['qcow2', 'vmdk', 'vdi'] + for f in formats: + self.assertFalse( + self.driver._is_cloneable(location, {'disk_format': f})) + + def _copy_image(self): + with mock.patch.object(tempfile, 'NamedTemporaryFile'): + with mock.patch.object(os.path, 'exists') as mock_exists: + mock_exists.return_value = True + with mock.patch.object(image_utils, 'fetch_to_raw'): + with mock.patch.object(self.driver, 'delete_volume'): + with mock.patch.object(self.driver, '_resize'): + mock_image_service = mock.MagicMock() + args = [None, {'name': 'test', 'size': 1}, + mock_image_service, None] + self.driver.copy_image_to_volume(*args) + + def test_copy_image_no_volume_tmp(self): + self.cfg.volume_tmp_dir = None + self._copy_image() + + def test_copy_image_volume_tmp(self): + self.cfg.volume_tmp_dir = '/var/run/cinder/tmp' + self._copy_image() + @mock.patch('cinder.volume.drivers.rbd.RADOSClient') + def test_update_volume_stats(self, mock_client): + client = mock_client.return_value + client.__enter__.return_value = client -class FakeRBDDriver(RBDDriver): + client.cluster = mock.Mock() + client.cluster.get_cluster_stats = mock.Mock() + client.cluster.get_cluster_stats.return_value = {'kb': 1024 ** 3, + 'kb_avail': 1024 ** 2} - def _clone(self): - pass + self.driver.configuration.safe_get = mock.Mock() + self.driver.configuration.safe_get.return_value = 'RBD' - def _resize(self): - pass + expected = dict( + volume_backend_name='RBD', + vendor_name='Open Source', + driver_version=self.driver.VERSION, + storage_protocol='ceph', + total_capacity_gb=1024, + free_capacity_gb=1, + reserved_percentage=0) + + actual = self.driver.get_volume_stats(True) + client.cluster.get_cluster_stats.assert_called_once() + self.assertDictMatch(expected, actual) + + @mock.patch('cinder.volume.drivers.rbd.RADOSClient') + def test_update_volume_stats_error(self, mock_client): + client = mock_client.return_value + client.__enter__.return_value = client + + client.cluster = mock.Mock() + client.cluster.get_cluster_stats = mock.Mock() + client.cluster.get_cluster_stats.side_effect = Exception + + self.driver.configuration.safe_get = mock.Mock() + self.driver.configuration.safe_get.return_value = 'RBD' + + self.rados.Error = Exception + + expected = dict(volume_backend_name='RBD', + vendor_name='Open Source', + driver_version=self.driver.VERSION, + storage_protocol='ceph', + total_capacity_gb='unknown', + free_capacity_gb='unknown', + reserved_percentage=0) + + actual = self.driver.get_volume_stats(True) + client.cluster.get_cluster_stats.assert_called_once() + self.assertDictMatch(expected, actual) + + def test_get_mon_addrs(self): + self.driver._execute = mock.Mock() + self.driver._execute.return_value = (CEPH_MON_DUMP, '') + + hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] + ports = ['6789', '6790', '6791', '6792', '6791'] + self.assertEqual((hosts, ports), self.driver._get_mon_addrs()) + + def test_initialize_connection(self): + hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] + ports = ['6789', '6790', '6791', '6792', '6791'] + + self.driver._get_mon_addrs = mock.Mock() + self.driver._get_mon_addrs.return_value = (hosts, ports) + + expected = { + 'driver_volume_type': 'rbd', + 'data': { + 'name': '%s/%s' % (self.cfg.rbd_pool, + self.volume_name), + 'hosts': hosts, + 'ports': ports, + 'auth_enabled': False, + 'auth_username': None, + 'secret_type': 'ceph', + 'secret_uuid': None, } + } + actual = self.driver.initialize_connection(dict(name=self.volume_name), + None) + self.assertDictMatch(expected, actual) + + @mock.patch('cinder.volume.drivers.rbd.RADOSClient') + def test_clone(self, mock_client): + src_pool = u'images' + src_image = u'image-name' + src_snap = u'snapshot-name' + + client_stack = [] + + def mock__enter__(inst): + def _inner(): + client_stack.append(inst) + return inst + return _inner + + client = mock_client.return_value + # capture both rados client used to perform the clone + client.__enter__.side_effect = mock__enter__(client) + + self.rbd.RBD.clone = mock.Mock() + + self.driver._clone(self.volume, src_pool, src_image, src_snap) + + args = [client_stack[0].ioctx, str(src_image), str(src_snap), + client_stack[1].ioctx, str(self.volume_name)] + kwargs = {'features': self.rbd.RBD_FEATURE_LAYERING} + self.rbd.RBD.clone.assert_called_once_with(*args, **kwargs) + self.assertEqual(client.__enter__.call_count, 2) + + def test_extend_volume(self): + fake_size = '20' + fake_vol = {'project_id': 'testprjid', 'name': self.volume_name, + 'size': fake_size, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + + self.mox.StubOutWithMock(self.driver, '_resize') + size = int(fake_size) * units.GiB + self.driver._resize(fake_vol, size=size) + + self.mox.ReplayAll() + self.driver.extend_volume(fake_vol, fake_size) + + self.mox.VerifyAll() + + @mock.patch('cinder.volume.drivers.rbd.RADOSClient') + def test_rbd_volume_proxy_init(self, mock_client): + snap = u'snapshot-name' + + client = mock_client.return_value + client.__enter__.return_value = client + + self.driver._connect_to_rados = mock.Mock() + self.driver._connect_to_rados.return_value = (None, None) + self.driver._disconnect_from_rados = mock.Mock() + self.driver._disconnect_from_rados.return_value = (None, None) + + with driver.RBDVolumeProxy(self.driver, self.volume_name): + self.driver._connect_to_rados.assert_called_once() + self.assertFalse(self.driver._disconnect_from_rados.called) + + self.driver._disconnect_from_rados.assert_called_once() + + self.driver._connect_to_rados.reset_mock() + self.driver._disconnect_from_rados.reset_mock() + + with driver.RBDVolumeProxy(self.driver, self.volume_name, + snapshot=snap): + self.driver._connect_to_rados.assert_called_once() + self.assertFalse(self.driver._disconnect_from_rados.called) + + self.driver._disconnect_from_rados.assert_called_once() + + @mock.patch('cinder.volume.drivers.rbd.RADOSClient') + def test_connect_to_rados(self, mock_client): + client = mock_client.return_value + client.__enter__.return_value = client + client.open_ioctx = mock.Mock() + + mock_ioctx = mock.Mock() + client.open_ioctx.return_value = mock_ioctx + + self.rados.Error = test.TestingException + self.rados.Rados.return_value = client + + # default configured pool + self.assertEqual((client, mock_ioctx), + self.driver._connect_to_rados()) + client.open_ioctx.assert_called_with(self.cfg.rbd_pool) + + # different pool + self.assertEqual((client, mock_ioctx), + self.driver._connect_to_rados('images')) + client.open_ioctx.assert_called_with('images') + + # error + client.open_ioctx.reset_mock() + client.shutdown.reset_mock() + client.open_ioctx.side_effect = self.rados.Error + self.assertRaises(test.TestingException, self.driver._connect_to_rados) + client.open_ioctx.assert_called_once() + client.shutdown.assert_called_once() + + +class RBDImageIOWrapperTestCase(test.TestCase): + def setUp(self): + super(RBDImageIOWrapperTestCase, self).setUp() + self.meta = mock.Mock() + self.meta.user = 'mock_user' + self.meta.conf = 'mock_conf' + self.meta.pool = 'mock_pool' + self.meta.image = mock.Mock() + self.meta.image.read = mock.Mock() + self.meta.image.write = mock.Mock() + self.meta.image.size = mock.Mock() + self.rbd_wrapper = driver.RBDImageIOWrapper(self.meta) + self.data_length = 1024 + self.full_data = 'abcd' * 256 + + def tearDown(self): + super(RBDImageIOWrapperTestCase, self).tearDown() + + def test_init(self): + self.assertEqual(self.rbd_wrapper._rbd_meta, self.meta) + self.assertEqual(self.rbd_wrapper._offset, 0) + + def test_inc_offset(self): + self.rbd_wrapper._inc_offset(10) + self.rbd_wrapper._inc_offset(10) + self.assertEqual(self.rbd_wrapper._offset, 20) + + def test_rbd_image(self): + self.assertEqual(self.rbd_wrapper.rbd_image, self.meta.image) + + def test_rbd_user(self): + self.assertEqual(self.rbd_wrapper.rbd_user, self.meta.user) + + def test_rbd_pool(self): + self.assertEqual(self.rbd_wrapper.rbd_conf, self.meta.conf) + + def test_rbd_conf(self): + self.assertEqual(self.rbd_wrapper.rbd_pool, self.meta.pool) + + def test_read(self): + + def mock_read(offset, length): + return self.full_data[offset:length] + + self.meta.image.read.side_effect = mock_read + self.meta.image.size.return_value = self.data_length + + data = self.rbd_wrapper.read() + self.assertEqual(data, self.full_data) + + data = self.rbd_wrapper.read() + self.assertEqual(data, '') + + self.rbd_wrapper.seek(0) + data = self.rbd_wrapper.read() + self.assertEqual(data, self.full_data) + + self.rbd_wrapper.seek(0) + data = self.rbd_wrapper.read(10) + self.assertEqual(data, self.full_data[:10]) + + def test_write(self): + self.rbd_wrapper.write(self.full_data) + self.assertEqual(self.rbd_wrapper._offset, 1024) + + def test_seekable(self): + self.assertTrue(self.rbd_wrapper.seekable) + + def test_seek(self): + self.assertEqual(self.rbd_wrapper._offset, 0) + self.rbd_wrapper.seek(10) + self.assertEqual(self.rbd_wrapper._offset, 10) + self.rbd_wrapper.seek(10) + self.assertEqual(self.rbd_wrapper._offset, 10) + self.rbd_wrapper.seek(10, 1) + self.assertEqual(self.rbd_wrapper._offset, 20) + + self.rbd_wrapper.seek(0) + self.rbd_wrapper.write(self.full_data) + self.meta.image.size.return_value = self.data_length + self.rbd_wrapper.seek(0) + self.assertEqual(self.rbd_wrapper._offset, 0) + + self.rbd_wrapper.seek(10, 2) + self.assertEqual(self.rbd_wrapper._offset, self.data_length + 10) + self.rbd_wrapper.seek(-10, 2) + self.assertEqual(self.rbd_wrapper._offset, self.data_length - 10) + + # test exceptions. + self.assertRaises(IOError, self.rbd_wrapper.seek, 0, 3) + self.assertRaises(IOError, self.rbd_wrapper.seek, -1) + # offset should not have been changed by any of the previous + # operations. + self.assertEqual(self.rbd_wrapper._offset, self.data_length - 10) + + def test_tell(self): + self.assertEqual(self.rbd_wrapper.tell(), 0) + self.rbd_wrapper._inc_offset(10) + self.assertEqual(self.rbd_wrapper.tell(), 10) + + def test_flush(self): + with mock.patch.object(driver, 'LOG') as mock_logger: + self.meta.image.flush = mock.Mock() + self.rbd_wrapper.flush() + self.meta.image.flush.assert_called_once() + self.meta.image.flush.reset_mock() + # this should be caught and logged silently. + self.meta.image.flush.side_effect = AttributeError + self.rbd_wrapper.flush() + self.meta.image.flush.assert_called_once() + msg = _("flush() not supported in this version of librbd") + mock_logger.warning.assert_called_with(msg) + + def test_fileno(self): + self.assertRaises(IOError, self.rbd_wrapper.fileno) + + def test_close(self): + self.rbd_wrapper.close() class ManagedRBDTestCase(DriverTestCase): - driver_name = "cinder.tests.test_rbd.FakeRBDDriver" + driver_name = "cinder.volume.drivers.rbd.RBDDriver" def setUp(self): super(ManagedRBDTestCase, self).setUp() + # TODO(dosaboy): need to remove dependency on mox stubs here once + # image.fake has been converted to mock. fake_image.stub_out_image_service(self.stubs) + self.volume.driver.set_initialized() + self.volume.stats = {'allocated_capacity_gb': 0} + self.called = [] - def _clone_volume_from_image(self, expected_status, - clone_works=True): + def _create_volume_from_image(self, expected_status, raw=False, + clone_error=False): """Try to clone a volume from an image, and check the status - afterwards""" - def fake_clone_image(volume, image_location): - pass + afterwards. - def fake_clone_error(volume, image_location): - raise exception.CinderException() + NOTE: if clone_error is True we force the image type to raw otherwise + clone_image is not called + """ + volume_id = 1 - self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True) - if clone_works: - self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image) + # See tests.image.fake for image types. + if raw: + image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6' else: - self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_error) + image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - volume_id = 1 # creating volume testdata - db.volume_create(self.context, {'id': volume_id, - 'updated_at': timeutils.utcnow(), - 'display_description': 'Test Desc', - 'size': 20, - 'status': 'creating', - 'instance_uuid': None, - 'host': 'dummy'}) + db.volume_create(self.context, + {'id': volume_id, + 'updated_at': timeutils.utcnow(), + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'creating', + 'instance_uuid': None, + 'host': 'dummy'}) + try: - if clone_works: + if not clone_error: self.volume.create_volume(self.context, volume_id, image_id=image_id) @@ -138,24 +831,86 @@ def fake_clone_error(volume, image_location): # cleanup db.volume_destroy(self.context, volume_id) - def test_clone_image_status_available(self): - """Verify that before cloning, an image is in the available state.""" - self._clone_volume_from_image('available', True) + def test_create_vol_from_image_status_available(self): + """Clone raw image then verify volume is in available state.""" + + def mock_clone_image(volume, image_location, image_id, image_meta): + return {'provider_location': None}, True + + self.volume.driver.clone_image = mock.Mock() + self.volume.driver.clone_image.side_effect = mock_clone_image + self.volume.driver.create_volume = mock.Mock() + + with mock.patch.object(create_volume.CreateVolumeFromSpecTask, + '_copy_image_to_volume') as mock_copy: + self._create_volume_from_image('available', raw=True) + + self.volume.driver.clone_image.assert_called_once() + self.assertFalse(self.volume.driver.create_volume.called) + self.assertFalse(mock_copy.called) + + def test_create_vol_from_non_raw_image_status_available(self): + """Clone non-raw image then verify volume is in available state.""" + + def mock_clone_image(volume, image_location, image_id, image_meta): + return {'provider_location': None}, False + + self.volume.driver.clone_image = mock.Mock() + self.volume.driver.clone_image.side_effect = mock_clone_image + self.volume.driver.create_volume = mock.Mock() + self.volume.driver.create_volume.return_value = None + + with mock.patch.object(create_volume.CreateVolumeFromSpecTask, + '_copy_image_to_volume') as mock_copy: + self._create_volume_from_image('available', raw=False) - def test_clone_image_status_error(self): - """Verify that before cloning, an image is in the available state.""" - self._clone_volume_from_image('error', False) + self.volume.driver.clone_image.assert_called_once() + self.volume.driver.create_volume.assert_called_once() + mock_copy.assert_called_once() + + def test_create_vol_from_image_status_error(self): + """Fail to clone raw image then verify volume is in error state.""" + + self.volume.driver.clone_image = mock.Mock() + self.volume.driver.clone_image.side_effect = exception.CinderException + self.volume.driver.create_volume = mock.Mock() + self.volume.driver.create_volume.return_value = None + + with mock.patch.object(create_volume.CreateVolumeFromSpecTask, + '_copy_image_to_volume') as mock_copy: + self._create_volume_from_image('error', raw=True, clone_error=True) + + self.volume.driver.clone_image.assert_called_once() + self.assertFalse(self.volume.driver.create_volume.called) + self.assertFalse(mock_copy.called) + + def test_clone_failure(self): + driver = self.volume.driver + + with mock.patch.object(driver, '_is_cloneable', lambda *args: False): + image_loc = (mock.Mock(), mock.Mock()) + actual = driver.clone_image(mock.Mock(), image_loc, + mock.Mock(), {}) + self.assertEqual(({}, False), actual) + + self.assertEqual(({}, False), + driver.clone_image(object(), None, None, {})) def test_clone_success(self): - self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True) - self.stubs.Set(self.volume.driver, 'clone_image', lambda a, b: True) - image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - self.assertTrue(self.volume.driver.clone_image({}, image_id)) - - def test_clone_bad_image_id(self): - self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True) - self.assertFalse(self.volume.driver.clone_image({}, None)) - - def test_clone_uncloneable(self): - self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: False) - self.assertFalse(self.volume.driver.clone_image({}, 'dne')) + expected = ({'provider_location': None}, True) + driver = self.volume.driver + + self.volume.driver._is_cloneable = mock.Mock() + self.volume.driver._is_cloneable.return_value = True + self.volume.driver._clone = mock.Mock() + self.volume.driver._resize = mock.Mock() + + image_loc = ('rbd://fee/fi/fo/fum', None) + actual = driver.clone_image({'name': 'vol1'}, + image_loc, + 'id.foo', + {'disk_format': 'raw'}) + + self.assertEqual(expected, actual) + self.volume.driver._clone.assert_called_once() + self.volume.driver._resize.assert_called_once() diff --git a/cinder/tests/test_scality.py b/cinder/tests/test_scality.py new file mode 100644 index 0000000000..e8efe3003d --- /dev/null +++ b/cinder/tests/test_scality.py @@ -0,0 +1,290 @@ +# Copyright (c) 2013 Scality +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit tests for the Scality SOFS Volume Driver. +""" + +import errno +import os +import shutil +import tempfile + +import mox as mox_lib + +from cinder import context +from cinder import exception +from cinder.image import image_utils +from cinder import test +from cinder import units +from cinder import utils +from cinder.volume.drivers import scality + + +class ScalityDriverTestCase(test.TestCase): + """Test case for the Scality driver.""" + + TEST_MOUNT = '/tmp/fake_mount' + TEST_CONFIG = '/tmp/fake_config' + TEST_VOLDIR = 'volumes' + + TEST_VOLNAME = 'volume_name' + TEST_VOLSIZE = '0' + TEST_VOLUME = { + 'name': TEST_VOLNAME, + 'size': TEST_VOLSIZE + } + TEST_VOLPATH = os.path.join(TEST_MOUNT, + TEST_VOLDIR, + TEST_VOLNAME) + + TEST_SNAPNAME = 'snapshot_name' + TEST_SNAPSHOT = { + 'name': TEST_SNAPNAME, + 'volume_name': TEST_VOLNAME, + 'volume_size': TEST_VOLSIZE + } + TEST_SNAPPATH = os.path.join(TEST_MOUNT, + TEST_VOLDIR, + TEST_SNAPNAME) + + TEST_CLONENAME = 'clone_name' + TEST_CLONE = { + 'name': TEST_CLONENAME, + 'size': TEST_VOLSIZE + } + + TEST_NEWSIZE = '2' + + TEST_IMAGE_SERVICE = 'image_service' + TEST_IMAGE_ID = 'image_id' + TEST_IMAGE_META = 'image_meta' + + def _makedirs(self, path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + def _create_fake_config(self): + open(self.TEST_CONFIG, "w+").close() + + def _create_fake_mount(self): + self._makedirs(os.path.join(self.TEST_MOUNT, 'sys')) + self._makedirs(os.path.join(self.TEST_MOUNT, self.TEST_VOLDIR)) + + def _remove_fake_config(self): + try: + os.unlink(self.TEST_CONFIG) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + def _configure_driver(self): + scality.CONF.scality_sofs_config = self.TEST_CONFIG + scality.CONF.scality_sofs_mount_point = self.TEST_MOUNT + scality.CONF.scality_sofs_volume_dir = self.TEST_VOLDIR + scality.CONF.volume_dd_blocksize = '1M' + + def _execute_wrapper(self, cmd, *args, **kwargs): + try: + kwargs.pop('run_as_root') + except KeyError: + pass + utils.execute(cmd, *args, **kwargs) + + def _set_access_wrapper(self, is_visible): + + def _access_wrapper(path, flags): + if path == '/sbin/mount.sofs': + return is_visible + else: + return os.access(path, flags) + + self.stubs.Set(os, 'access', _access_wrapper) + + def setUp(self): + super(ScalityDriverTestCase, self).setUp() + + self.tempdir = tempfile.mkdtemp() + + self.TEST_MOUNT = self.tempdir + self.TEST_VOLPATH = os.path.join(self.TEST_MOUNT, + self.TEST_VOLDIR, + self.TEST_VOLNAME) + self.TEST_SNAPPATH = os.path.join(self.TEST_MOUNT, + self.TEST_VOLDIR, + self.TEST_SNAPNAME) + self.TEST_CLONEPATH = os.path.join(self.TEST_MOUNT, + self.TEST_VOLDIR, + self.TEST_CLONENAME) + + self._driver = scality.ScalityDriver() + self._driver.set_execute(self._execute_wrapper) + self._mox = mox_lib.Mox() + + self._create_fake_mount() + self._create_fake_config() + self._configure_driver() + + def tearDown(self): + shutil.rmtree(self.tempdir) + self._remove_fake_config() + super(ScalityDriverTestCase, self).tearDown() + + def test_setup_no_config(self): + """Missing SOFS configuration shall raise an error.""" + scality.CONF.scality_sofs_config = None + self.assertRaises(exception.VolumeBackendAPIException, + self._driver.do_setup, None) + + def test_setup_missing_config(self): + """Non-existent SOFS configuration file shall raise an error.""" + scality.CONF.scality_sofs_config = 'nonexistent.conf' + self.assertRaises(exception.VolumeBackendAPIException, + self._driver.do_setup, None) + + def test_setup_no_mount_helper(self): + """SOFS must be installed to use the driver.""" + self._set_access_wrapper(False) + self.assertRaises(exception.VolumeBackendAPIException, + self._driver.do_setup, None) + + def test_setup_make_voldir(self): + """The directory for volumes shall be created automatically.""" + self._set_access_wrapper(True) + voldir_path = os.path.join(self.TEST_MOUNT, self.TEST_VOLDIR) + os.rmdir(voldir_path) + self._driver.do_setup(None) + self.assertTrue(os.path.isdir(voldir_path)) + + def test_local_path(self): + """Expected behaviour for local_path.""" + self.assertEqual(self._driver.local_path(self.TEST_VOLUME), + self.TEST_VOLPATH) + + def test_create_volume(self): + """Expected behaviour for create_volume.""" + ret = self._driver.create_volume(self.TEST_VOLUME) + self.assertEqual(ret['provider_location'], + os.path.join(self.TEST_VOLDIR, + self.TEST_VOLNAME)) + self.assertTrue(os.path.isfile(self.TEST_VOLPATH)) + self.assertEqual(os.stat(self.TEST_VOLPATH).st_size, + 100 * units.MiB) + + def test_delete_volume(self): + """Expected behaviour for delete_volume.""" + self._driver.create_volume(self.TEST_VOLUME) + self._driver.delete_volume(self.TEST_VOLUME) + self.assertFalse(os.path.isfile(self.TEST_VOLPATH)) + + def test_create_snapshot(self): + """Expected behaviour for create_snapshot.""" + mox = self._mox + + vol_size = self._driver._size_bytes(self.TEST_VOLSIZE) + + mox.StubOutWithMock(self._driver, '_create_file') + self._driver._create_file(self.TEST_SNAPPATH, vol_size) + mox.StubOutWithMock(self._driver, '_copy_file') + self._driver._copy_file(self.TEST_VOLPATH, self.TEST_SNAPPATH) + + mox.ReplayAll() + + self._driver.create_snapshot(self.TEST_SNAPSHOT) + + mox.UnsetStubs() + mox.VerifyAll() + + def test_delete_snapshot(self): + """Expected behaviour for delete_snapshot.""" + mox = self._mox + + mox.StubOutWithMock(os, 'remove') + os.remove(self.TEST_SNAPPATH) + + mox.ReplayAll() + + self._driver.delete_snapshot(self.TEST_SNAPSHOT) + + mox.UnsetStubs() + mox.VerifyAll() + + def test_initialize_connection(self): + """Expected behaviour for initialize_connection.""" + ret = self._driver.initialize_connection(self.TEST_VOLUME, None) + self.assertEqual(ret['driver_volume_type'], 'scality') + self.assertEqual(ret['data']['sofs_path'], + os.path.join(self.TEST_VOLDIR, + self.TEST_VOLNAME)) + + def test_copy_image_to_volume(self): + """Expected behaviour for copy_image_to_volume.""" + self.mox.StubOutWithMock(image_utils, 'fetch_to_raw') + + image_utils.fetch_to_raw(context, + self.TEST_IMAGE_SERVICE, + self.TEST_IMAGE_ID, + self.TEST_VOLPATH, + mox_lib.IgnoreArg(), + size=self.TEST_VOLSIZE) + + self.mox.ReplayAll() + + self._driver.copy_image_to_volume(context, + self.TEST_VOLUME, + self.TEST_IMAGE_SERVICE, + self.TEST_IMAGE_ID) + + def test_copy_volume_to_image(self): + """Expected behaviour for copy_volume_to_image.""" + self.mox.StubOutWithMock(image_utils, 'upload_volume') + + image_utils.upload_volume(context, + self.TEST_IMAGE_SERVICE, + self.TEST_IMAGE_META, + self.TEST_VOLPATH) + + self.mox.ReplayAll() + + self._driver.copy_volume_to_image(context, + self.TEST_VOLUME, + self.TEST_IMAGE_SERVICE, + self.TEST_IMAGE_META) + + def test_create_cloned_volume(self): + """Expected behaviour for create_cloned_volume.""" + self.mox.StubOutWithMock(self._driver, '_create_file') + self.mox.StubOutWithMock(self._driver, '_copy_file') + + vol_size = self._driver._size_bytes(self.TEST_VOLSIZE) + self._driver._create_file(self.TEST_CLONEPATH, vol_size) + self._driver._copy_file(self.TEST_VOLPATH, self.TEST_CLONEPATH) + + self.mox.ReplayAll() + + self._driver.create_cloned_volume(self.TEST_CLONE, self.TEST_VOLUME) + + def test_extend_volume(self): + """Expected behaviour for extend_volume.""" + self.mox.StubOutWithMock(self._driver, '_create_file') + + new_size = self._driver._size_bytes(self.TEST_NEWSIZE) + self._driver._create_file(self.TEST_VOLPATH, new_size) + + self.mox.ReplayAll() + + self._driver.extend_volume(self.TEST_VOLUME, self.TEST_NEWSIZE) diff --git a/cinder/tests/test_service.py b/cinder/tests/test_service.py index 38b0c3922b..0f34a52329 100644 --- a/cinder/tests/test_service.py +++ b/cinder/tests/test_service.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -20,16 +19,16 @@ Unit Tests for remote procedure calls using queue """ + import mox +from oslo.config import cfg from cinder import context from cinder import db from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg -from cinder import test -from cinder import service from cinder import manager +from cinder import service +from cinder import test from cinder import wsgi @@ -42,14 +41,19 @@ help="Host to bind test service to"), cfg.IntOpt("test_service_listen_port", default=0, - help="Port number to bind test service to"), - ] + help="Port number to bind test service to"), ] -flags.FLAGS.register_opts(test_service_opts) +CONF = cfg.CONF +CONF.register_opts(test_service_opts) class FakeManager(manager.Manager): - """Fake manager for tests""" + """Fake manager for tests.""" + def __init__(self, host=None, + db_driver=None, service_name=None): + super(FakeManager, self).__init__(host=host, + db_driver=db_driver) + def test_method(self): return 'manager' @@ -60,7 +64,7 @@ def test_method(self): class ServiceManagerTestCase(test.TestCase): - """Test cases for Services""" + """Test cases for Services.""" def test_message_gets_to_manager(self): serv = service.Service('test', @@ -89,7 +93,7 @@ def test_service_enabled_on_create_based_on_flag(self): app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) - self.assert_(not ref['disabled']) + self.assertFalse(ref['disabled']) def test_service_disabled_on_create_based_on_flag(self): self.flags(enable_new_services=False) @@ -100,11 +104,11 @@ def test_service_disabled_on_create_based_on_flag(self): app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) - self.assert_(ref['disabled']) + self.assertTrue(ref['disabled']) class ServiceTestCase(test.TestCase): - """Test cases for Services""" + """Test cases for Services.""" def setUp(self): super(ServiceTestCase, self).setUp() @@ -119,7 +123,7 @@ def test_create(self): # the looping calls are created in StartService. app = service.Service.create(host=host, binary=binary, topic=topic) - self.assert_(app) + self.assertTrue(app) def test_report_state_newly_disconnected(self): host = 'foo' @@ -131,15 +135,15 @@ def test_report_state_newly_disconnected(self): 'report_count': 0, 'availability_zone': 'nova'} service_ref = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'availability_zone': 'nova', - 'id': 1} + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'nova', + 'id': 1} service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) + host, + binary).AndRaise(exception.NotFound()) service.db.service_create(mox.IgnoreArg(), service_create).AndReturn(service_ref) service.db.service_get(mox.IgnoreArg(), @@ -152,7 +156,7 @@ def test_report_state_newly_disconnected(self): 'cinder.tests.test_service.FakeManager') serv.start() serv.report_state() - self.assert_(serv.model_disconnected) + self.assertTrue(serv.model_disconnected) def test_report_state_newly_connected(self): host = 'foo' @@ -164,15 +168,15 @@ def test_report_state_newly_connected(self): 'report_count': 0, 'availability_zone': 'nova'} service_ref = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'availability_zone': 'nova', - 'id': 1} + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'nova', + 'id': 1} service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) + host, + binary).AndRaise(exception.NotFound()) service.db.service_create(mox.IgnoreArg(), service_create).AndReturn(service_ref) service.db.service_get(mox.IgnoreArg(), @@ -189,7 +193,14 @@ def test_report_state_newly_connected(self): serv.model_disconnected = True serv.report_state() - self.assert_(not serv.model_disconnected) + self.assertFalse(serv.model_disconnected) + + def test_service_with_long_report_interval(self): + CONF.set_override('service_down_time', 10) + CONF.set_override('report_interval', 10) + service.Service.create(binary="test_service", + manager="cinder.tests.test_service.FakeManager") + self.assertEqual(CONF.service_down_time, 25) class TestWSGIService(test.TestCase): @@ -200,22 +211,7 @@ def setUp(self): def test_service_random_port(self): test_service = service.WSGIService("test_service") - self.assertEquals(0, test_service.port) + self.assertEqual(0, test_service.port) test_service.start() self.assertNotEqual(0, test_service.port) test_service.stop() - - -class TestLauncher(test.TestCase): - - def setUp(self): - super(TestLauncher, self).setUp() - self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) - self.service = service.WSGIService("test_service") - - def test_launch_app(self): - self.assertEquals(0, self.service.port) - launcher = service.Launcher() - launcher.launch_server(self.service) - self.assertEquals(0, self.service.port) - launcher.stop() diff --git a/cinder/tests/test_sheepdog.py b/cinder/tests/test_sheepdog.py new file mode 100644 index 0000000000..3f8e93b762 --- /dev/null +++ b/cinder/tests/test_sheepdog.py @@ -0,0 +1,143 @@ + +# Copyright (c) 2013 Zelin.io +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import contextlib +import os +import tempfile + +from cinder.image import image_utils +from cinder.openstack.common import processutils +from cinder import test +from cinder import units +from cinder.volume.drivers.sheepdog import SheepdogDriver + + +COLLIE_NODE_INFO = """ +0 107287605248 3623897354 3% +Total 107287605248 3623897354 3% 54760833024 +""" + +COLLIE_CLUSTER_INFO_0_5 = """ +Cluster status: running + +Cluster created at Tue Jun 25 19:51:41 2013 + +Epoch Time Version +2013-06-25 19:51:41 1 [127.0.0.1:7000, 127.0.0.1:7001, 127.0.0.1:7002] +""" + +COLLIE_CLUSTER_INFO_0_6 = """ +Cluster status: running, auto-recovery enabled + +Cluster created at Tue Jun 25 19:51:41 2013 + +Epoch Time Version +2013-06-25 19:51:41 1 [127.0.0.1:7000, 127.0.0.1:7001, 127.0.0.1:7002] +""" + + +class FakeImageService: + def download(self, context, image_id, path): + pass + + +class SheepdogTestCase(test.TestCase): + def setUp(self): + super(SheepdogTestCase, self).setUp() + self.driver = SheepdogDriver() + + def test_update_volume_stats(self): + def fake_stats(*args): + return COLLIE_NODE_INFO, '' + self.stubs.Set(self.driver, '_execute', fake_stats) + expected = dict( + volume_backend_name='sheepdog', + vendor_name='Open Source', + dirver_version=self.driver.VERSION, + storage_protocol='sheepdog', + total_capacity_gb=float(107287605248) / units.GiB, + free_capacity_gb=float(107287605248 - 3623897354) / units.GiB, + reserved_percentage=0, + QoS_support=False) + actual = self.driver.get_volume_stats(True) + self.assertDictMatch(expected, actual) + + def test_update_volume_stats_error(self): + def fake_stats(*args): + raise processutils.ProcessExecutionError() + self.stubs.Set(self.driver, '_execute', fake_stats) + expected = dict( + volume_backend_name='sheepdog', + vendor_name='Open Source', + dirver_version=self.driver.VERSION, + storage_protocol='sheepdog', + total_capacity_gb='unknown', + free_capacity_gb='unknown', + reserved_percentage=0, + QoS_support=False) + actual = self.driver.get_volume_stats(True) + self.assertDictMatch(expected, actual) + + def test_check_for_setup_error_0_5(self): + def fake_stats(*args): + return COLLIE_CLUSTER_INFO_0_5, '' + self.stubs.Set(self.driver, '_execute', fake_stats) + self.driver.check_for_setup_error() + + def test_check_for_setup_error_0_6(self): + def fake_stats(*args): + return COLLIE_CLUSTER_INFO_0_6, '' + self.stubs.Set(self.driver, '_execute', fake_stats) + self.driver.check_for_setup_error() + + def test_copy_image_to_volume(self): + @contextlib.contextmanager + def fake_temp_file(dir): + class FakeTmp: + def __init__(self, name): + self.name = name + yield FakeTmp('test') + + def fake_try_execute(obj, *command, **kwargs): + return True + + self.stubs.Set(tempfile, 'NamedTemporaryFile', fake_temp_file) + self.stubs.Set(os.path, 'exists', lambda x: True) + self.stubs.Set(image_utils, 'fetch_verify_image', + lambda w, x, y, z: None) + self.stubs.Set(image_utils, 'convert_image', + lambda x, y, z: None) + self.stubs.Set(SheepdogDriver, '_try_execute', fake_try_execute) + self.driver.copy_image_to_volume(None, {'name': 'test', + 'size': 1}, + FakeImageService(), None) + + def test_extend_volume(self): + fake_name = u'volume-00000001' + fake_size = '20' + fake_vol = {'project_id': 'testprjid', 'name': fake_name, + 'size': fake_size, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + + self.mox.StubOutWithMock(self.driver, '_resize') + size = int(fake_size) * units.GiB + self.driver._resize(fake_vol, size=size) + + self.mox.ReplayAll() + self.driver.extend_volume(fake_vol, fake_size) + + self.mox.VerifyAll() diff --git a/cinder/tests/test_skip_examples.py b/cinder/tests/test_skip_examples.py deleted file mode 100644 index 2e51aef342..0000000000 --- a/cinder/tests/test_skip_examples.py +++ /dev/null @@ -1,47 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import test - - -class ExampleSkipTestCase(test.TestCase): - test_counter = 0 - - @test.skip_test("Example usage of @test.skip_test()") - def test_skip_test_example(self): - self.fail("skip_test failed to work properly.") - - @test.skip_if(True, "Example usage of @test.skip_if()") - def test_skip_if_example(self): - self.fail("skip_if failed to work properly.") - - @test.skip_unless(False, "Example usage of @test.skip_unless()") - def test_skip_unless_example(self): - self.fail("skip_unless failed to work properly.") - - @test.skip_if(False, "This test case should never be skipped.") - def test_001_increase_test_counter(self): - ExampleSkipTestCase.test_counter += 1 - - @test.skip_unless(True, "This test case should never be skipped.") - def test_002_increase_test_counter(self): - ExampleSkipTestCase.test_counter += 1 - - def test_003_verify_test_counter(self): - self.assertEquals(ExampleSkipTestCase.test_counter, 2, - "Tests were not skipped appropriately") diff --git a/cinder/tests/test_solidfire.py b/cinder/tests/test_solidfire.py index d6d0772b6c..1a46bc5f58 100644 --- a/cinder/tests/test_solidfire.py +++ b/cinder/tests/test_solidfire.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2012 OpenStack LLC. +# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,20 +14,59 @@ # License for the specific language governing permissions and limitations # under the License. +import mox + +from cinder import context from cinder import exception from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils from cinder import test -from cinder.volume.solidfire import SolidFire +from cinder import units +from cinder.volume import configuration as conf +from cinder.volume.drivers.solidfire import SolidFireDriver +from cinder.volume import qos_specs +from cinder.volume import volume_types LOG = logging.getLogger(__name__) +def create_configuration(): + configuration = mox.MockObject(conf.Configuration) + configuration.san_is_local = False + configuration.append_config_values(mox.IgnoreArg()) + return configuration + + class SolidFireVolumeTestCase(test.TestCase): def setUp(self): + self.ctxt = context.get_admin_context() + self._mox = mox.Mox() + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.sf_allow_tenant_qos = True + self.configuration.san_is_local = True + self.configuration.sf_emulate_512 = True + self.configuration.sf_account_prefix = 'cinder' + super(SolidFireVolumeTestCase, self).setUp() + self.stubs.Set(SolidFireDriver, '_issue_api_request', + self.fake_issue_api_request) + + self.expected_qos_results = {'minIOPS': 1000, + 'maxIOPS': 10000, + 'burstIOPS': 20000} - def fake_issue_api_request(obj, method, params): - if method is 'GetClusterInfo': + def fake_issue_api_request(obj, method, params, version='1.0'): + if method is 'GetClusterCapacity' and version == '1.0': + LOG.info('Called Fake GetClusterCapacity...') + data = {'result': + {'clusterCapacity': {'maxProvisionedSpace': 99999999, + 'usedSpace': 999, + 'compressionPercent': 100, + 'deDuplicationPercent': 100, + 'thinProvisioningPercent': 100}}} + return data + + elif method is 'GetClusterInfo' and version == '1.0': LOG.info('Called Fake GetClusterInfo...') results = {'result': {'clusterInfo': {'name': 'fake-cluster', @@ -39,11 +77,11 @@ def fake_issue_api_request(obj, method, params): 'attributes': {}}}} return results - elif method is 'AddAccount': + elif method is 'AddAccount' and version == '1.0': LOG.info('Called Fake AddAccount...') return {'result': {'accountID': 25}, 'id': 1} - elif method is 'GetAccountByName': + elif method is 'GetAccountByName' and version == '1.0': LOG.info('Called Fake GetAccountByName...') results = {'result': {'account': {'accountID': 25, @@ -56,15 +94,25 @@ def fake_issue_api_request(obj, method, params): "id": 1} return results - elif method is 'CreateVolume': + elif method is 'CreateVolume' and version == '1.0': LOG.info('Called Fake CreateVolume...') return {'result': {'volumeID': 5}, 'id': 1} - elif method is 'DeleteVolume': + elif method is 'DeleteVolume' and version == '1.0': LOG.info('Called Fake DeleteVolume...') return {'result': {}, 'id': 1} - elif method is 'ListVolumesForAccount': + elif method is 'ModifyVolume' and version == '5.0': + LOG.info('Called Fake ModifyVolume...') + return {'result': {}, 'id': 1} + + elif method is 'CloneVolume': + return {'result': {'volumeID': 6}, 'id': 2} + + elif method is 'ModifyVolume': + return + + elif method is 'ListVolumesForAccount' and version == '1.0': test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66' LOG.info('Called Fake ListVolumesForAccount...') result = {'result': { @@ -72,11 +120,11 @@ def fake_issue_api_request(obj, method, params): 'name': test_name, 'accountID': 25, 'sliceCount': 1, - 'totalSize': 1048576 * 1024, + 'totalSize': 1 * units.GiB, 'enable512e': True, 'access': "readWrite", 'status': "active", - 'attributes':None, + 'attributes': None, 'qos': None, 'iqn': test_name}]}} return result @@ -84,48 +132,172 @@ def fake_issue_api_request(obj, method, params): else: LOG.error('Crap, unimplemented API call in Fake:%s' % method) - def fake_issue_api_request_fails(obj, method, params): + def fake_issue_api_request_fails(obj, method, params, version='1.0'): return {'error': {'code': 000, 'name': 'DummyError', 'message': 'This is a fake error response'}, 'id': 1} + def fake_set_qos_by_volume_type(self, type_id, ctxt): + return {'minIOPS': 500, + 'maxIOPS': 1000, + 'burstIOPS': 1000} + def fake_volume_get(obj, key, default=None): return {'qos': 'fast'} + def fake_update_cluster_status(self): + return + + def fake_get_model_info(self, account, vid): + return {'fake': 'fake-model'} + + def test_create_with_qos_type(self): + self.stubs.Set(SolidFireDriver, '_issue_api_request', + self.fake_issue_api_request) + self.stubs.Set(SolidFireDriver, '_set_qos_by_volume_type', + self.fake_set_qos_by_volume_type) + testvol = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'volume_type_id': 'fast', + 'created_at': timeutils.utcnow()} + + sfv = SolidFireDriver(configuration=self.configuration) + model_update = sfv.create_volume(testvol) + self.assertIsNotNone(model_update) + def test_create_volume(self): - self.stubs.Set(SolidFire, '_issue_api_request', + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} - sfv = SolidFire() + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'volume_type_id': None, + 'created_at': timeutils.utcnow()} + + sfv = SolidFireDriver(configuration=self.configuration) + model_update = sfv.create_volume(testvol) + self.assertIsNotNone(model_update) + self.assertIsNone(model_update.get('provider_geometry', None)) + + def test_create_volume_non_512(self): + self.stubs.Set(SolidFireDriver, '_issue_api_request', + self.fake_issue_api_request) + testvol = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'volume_type_id': None, + 'created_at': timeutils.utcnow()} + + self.configuration.sf_emulate_512 = False + sfv = SolidFireDriver(configuration=self.configuration) model_update = sfv.create_volume(testvol) + self.assertEqual(model_update.get('provider_geometry', None), + '4096 4096') + self.configuration.sf_emulate_512 = True + + def test_create_snapshot(self): + self.stubs.Set(SolidFireDriver, '_issue_api_request', + self.fake_issue_api_request) + self.stubs.Set(SolidFireDriver, '_get_model_info', + self.fake_get_model_info) + testvol = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'volume_type_id': None, + 'created_at': timeutils.utcnow()} + + testsnap = {'project_id': 'testprjid', + 'name': 'testvol', + 'volume_size': 1, + 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', + 'volume_id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'volume_type_id': None, + 'created_at': timeutils.utcnow()} + + sfv = SolidFireDriver(configuration=self.configuration) + model_update = sfv.create_volume(testvol) + sfv.create_snapshot(testsnap) + + def test_create_clone(self): + self.stubs.Set(SolidFireDriver, '_issue_api_request', + self.fake_issue_api_request) + self.stubs.Set(SolidFireDriver, '_get_model_info', + self.fake_get_model_info) + testvol = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'volume_type_id': None, + 'created_at': timeutils.utcnow()} + + testvol_b = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1, + 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', + 'volume_type_id': None, + 'created_at': timeutils.utcnow()} + + sfv = SolidFireDriver(configuration=self.configuration) + sfv.create_cloned_volume(testvol_b, testvol) + + def test_initialize_connector_with_blocksizes(self): + connector = {'initiator': 'iqn.2012-07.org.fake:01'} + testvol = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'volume_type_id': None, + 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' + 'solidfire:87hg.uuid-2cc06226-cc' + '74-4cb7-bd55-14aed659a0cc.4060 0', + 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' + 'c76370d66b 2FE0CQ8J196R', + 'provider_geometry': '4096 4096', + 'created_at': timeutils.utcnow(), + } + + sfv = SolidFireDriver(configuration=self.configuration) + properties = sfv.initialize_connection(testvol, connector) + self.assertEqual(properties['data']['physical_block_size'], '4096') + self.assertEqual(properties['data']['logical_block_size'], '4096') def test_create_volume_with_qos(self): preset_qos = {} preset_qos['qos'] = 'fast' - self.stubs.Set(SolidFire, '_issue_api_request', + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'metadata': [preset_qos]} + 'metadata': [preset_qos], + 'volume_type_id': None, + 'created_at': timeutils.utcnow()} - sfv = SolidFire() + sfv = SolidFireDriver(configuration=self.configuration) model_update = sfv.create_volume(testvol) + self.assertIsNotNone(model_update) def test_create_volume_fails(self): - self.stubs.Set(SolidFire, '_issue_api_request', + # NOTE(JDG) This test just fakes update_cluster_status + # this is inentional for this test + self.stubs.Set(SolidFireDriver, '_update_cluster_status', + self.fake_update_cluster_status) + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} - sfv = SolidFire() + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} + sfv = SolidFireDriver(configuration=self.configuration) try: sfv.create_volume(testvol) self.fail("Should have thrown Error") @@ -133,78 +305,177 @@ def test_create_volume_fails(self): pass def test_create_sfaccount(self): - sfv = SolidFire() - self.stubs.Set(SolidFire, '_issue_api_request', + sfv = SolidFireDriver(configuration=self.configuration) + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) account = sfv._create_sfaccount('project-id') - self.assertNotEqual(account, None) + self.assertIsNotNone(account) def test_create_sfaccount_fails(self): - sfv = SolidFire() - self.stubs.Set(SolidFire, '_issue_api_request', + sfv = SolidFireDriver(configuration=self.configuration) + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) account = sfv._create_sfaccount('project-id') - self.assertEqual(account, None) + self.assertIsNone(account) def test_get_sfaccount_by_name(self): - sfv = SolidFire() - self.stubs.Set(SolidFire, '_issue_api_request', + sfv = SolidFireDriver(configuration=self.configuration) + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) account = sfv._get_sfaccount_by_name('some-name') - self.assertNotEqual(account, None) + self.assertIsNotNone(account) def test_get_sfaccount_by_name_fails(self): - sfv = SolidFire() - self.stubs.Set(SolidFire, '_issue_api_request', + sfv = SolidFireDriver(configuration=self.configuration) + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) account = sfv._get_sfaccount_by_name('some-name') - self.assertEqual(account, None) + self.assertIsNone(account) def test_delete_volume(self): - self.stubs.Set(SolidFire, '_issue_api_request', + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} - sfv = SolidFire() - model_update = sfv.delete_volume(testvol) + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} + + sfv = SolidFireDriver(configuration=self.configuration) + sfv.delete_volume(testvol) def test_delete_volume_fails_no_volume(self): - self.stubs.Set(SolidFire, '_issue_api_request', + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} - sfv = SolidFire() + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} + + sfv = SolidFireDriver(configuration=self.configuration) try: - model_update = sfv.delete_volume(testvol) + sfv.delete_volume(testvol) self.fail("Should have thrown Error") except Exception: pass def test_delete_volume_fails_account_lookup(self): - self.stubs.Set(SolidFire, '_issue_api_request', + # NOTE(JDG) This test just fakes update_cluster_status + # this is inentional for this test + self.stubs.Set(SolidFireDriver, '_update_cluster_status', + self.fake_update_cluster_status) + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} - sfv = SolidFire() - self.assertRaises(exception.SfAccountNotFound, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} + + sfv = SolidFireDriver(configuration=self.configuration) + self.assertRaises(exception.SolidFireAccountNotFound, sfv.delete_volume, testvol) def test_get_cluster_info(self): - self.stubs.Set(SolidFire, '_issue_api_request', + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) - sfv = SolidFire() + sfv = SolidFireDriver(configuration=self.configuration) sfv._get_cluster_info() def test_get_cluster_info_fail(self): - self.stubs.Set(SolidFire, '_issue_api_request', + # NOTE(JDG) This test just fakes update_cluster_status + # this is inentional for this test + self.stubs.Set(SolidFireDriver, '_update_cluster_status', + self.fake_update_cluster_status) + self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) - sfv = SolidFire() + sfv = SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.SolidFireAPIException, sfv._get_cluster_info) + + def test_extend_volume(self): + self.stubs.Set(SolidFireDriver, '_issue_api_request', + self.fake_issue_api_request) + testvol = {'project_id': 'testprjid', + 'name': 'test_volume', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} + + sfv = SolidFireDriver(configuration=self.configuration) + sfv.extend_volume(testvol, 2) + + def test_extend_volume_fails_no_volume(self): + self.stubs.Set(SolidFireDriver, '_issue_api_request', + self.fake_issue_api_request) + testvol = {'project_id': 'testprjid', + 'name': 'no-name', + 'size': 1, + 'id': 'not-found'} + sfv = SolidFireDriver(configuration=self.configuration) + self.assertRaises(exception.VolumeNotFound, + sfv.extend_volume, + testvol, 2) + + def test_extend_volume_fails_account_lookup(self): + # NOTE(JDG) This test just fakes update_cluster_status + # this is intentional for this test + self.stubs.Set(SolidFireDriver, '_update_cluster_status', + self.fake_update_cluster_status) + self.stubs.Set(SolidFireDriver, '_issue_api_request', + self.fake_issue_api_request_fails) + testvol = {'project_id': 'testprjid', + 'name': 'no-name', + 'size': 1, + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} + + sfv = SolidFireDriver(configuration=self.configuration) + self.assertRaises(exception.SolidFireAccountNotFound, + sfv.extend_volume, + testvol, 2) + + def test_set_by_qos_spec_with_scoping(self): + sfv = SolidFireDriver(configuration=self.configuration) + qos_ref = qos_specs.create(self.ctxt, + 'qos-specs-1', {'qos:minIOPS': '1000', + 'qos:maxIOPS': '10000', + 'qos:burstIOPS': '20000'}) + type_ref = volume_types.create(self.ctxt, + "type1", {"qos:minIOPS": "100", + "qos:burstIOPS": "300", + "qos:maxIOPS": "200"}) + qos_specs.associate_qos_with_type(self.ctxt, + qos_ref['id'], + type_ref['id']) + qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id']) + self.assertEqual(qos, self.expected_qos_results) + + def test_set_by_qos_spec(self): + sfv = SolidFireDriver(configuration=self.configuration) + qos_ref = qos_specs.create(self.ctxt, + 'qos-specs-1', {'minIOPS': '1000', + 'maxIOPS': '10000', + 'burstIOPS': '20000'}) + type_ref = volume_types.create(self.ctxt, + "type1", {"qos:minIOPS": "100", + "qos:burstIOPS": "300", + "qos:maxIOPS": "200"}) + qos_specs.associate_qos_with_type(self.ctxt, + qos_ref['id'], + type_ref['id']) + qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id']) + self.assertEqual(qos, self.expected_qos_results) + + def test_set_by_qos_by_type_only(self): + sfv = SolidFireDriver(configuration=self.configuration) + type_ref = volume_types.create(self.ctxt, + "type1", {"qos:minIOPS": "100", + "qos:burstIOPS": "300", + "qos:maxIOPS": "200"}) + qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id']) + self.assertEqual(qos, {'minIOPS': 100, + 'maxIOPS': 200, + 'burstIOPS': 300}) diff --git a/cinder/tests/test_storwize_svc.py b/cinder/tests/test_storwize_svc.py index c1d308aa85..2d8719f2d2 100644 --- a/cinder/tests/test_storwize_svc.py +++ b/cinder/tests/test_storwize_svc.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 IBM, Inc. -# Copyright (c) 2012 OpenStack LLC. +# Copyright 2013 IBM Corp. +# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,96 +14,164 @@ # License for the specific language governing permissions and limitations # under the License. # -# Authors: -# Ronen Kat -# Avishay Traeger - """ -Tests for the IBM Storwize V7000 and SVC volume driver. +Tests for the IBM Storwize family and SVC volume driver. """ +import mock import random -import socket +import re +from cinder import context from cinder import exception -from cinder import flags from cinder.openstack.common import excutils from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils from cinder import test -from cinder.volume import storwize_svc - -FLAGS = flags.FLAGS +from cinder import units +from cinder import utils +from cinder.volume import configuration as conf +from cinder.volume.drivers.ibm import storwize_svc +from cinder.volume.drivers.ibm.storwize_svc import ssh +from cinder.volume import volume_types LOG = logging.getLogger(__name__) +class StorwizeSVCFakeDB: + def __init__(self): + self.volume = None + + def volume_get(self, ctxt, vol_id): + return self.volume + + def volume_set(self, vol): + self.volume = vol + + class StorwizeSVCManagementSimulator: def __init__(self, pool_name): - self._flags = {"storwize_svc_volpool_name": pool_name} + self._flags = {'storwize_svc_volpool_name': pool_name} self._volumes_list = {} self._hosts_list = {} self._mappings_list = {} self._fcmappings_list = {} + self._other_pools = {'openstack2': {}, 'openstack3': {}} self._next_cmd_error = { - "lsportip": "", - "lsnodecanister": "", - "mkvdisk": "", - "lsvdisk": "", - "lsfcmap": "", - "prestartfcmap": "", - "startfcmap": "", - "rmfcmap": "", + 'lsportip': '', + 'lsfabric': '', + 'lsiscsiauth': '', + 'lsnodecanister': '', + 'mkvdisk': '', + 'lsvdisk': '', + 'lsfcmap': '', + 'prestartfcmap': '', + 'startfcmap': '', + 'rmfcmap': '', + 'lslicense': '', } self._errors = { - "CMMVC5701E": ("", "CMMVC5701E No object ID was specified."), - "CMMVC6035E": ("", "CMMVC6035E The action failed as the " + - "object already exists."), - "CMMVC5753E": ("", "CMMVC5753E The specified object does not " + - "exist or is not a suitable candidate."), - "CMMVC5707E": ("", "CMMVC5707E Required parameters are missing."), - "CMMVC6581E": ("", "CMMVC6581E The command has failed because " + - "the maximum number of allowed iSCSI " + - "qualified names (IQNs) has been reached, " + - "or the IQN is already assigned or is not " + - "valid."), - "CMMVC5754E": ("", "CMMVC5754E The specified object does not " + - "exist, or the name supplied does not meet " + - "the naming rules."), - "CMMVC6071E": ("", "CMMVC6071E The VDisk-to-host mapping was " + - "not created because the VDisk is already " + - "mapped to a host."), - "CMMVC5879E": ("", "CMMVC5879E The VDisk-to-host mapping was " + - "not created because a VDisk is already " + - "mapped to this host with this SCSI LUN."), - "CMMVC5840E": ("", "CMMVC5840E The virtual disk (VDisk) was " + - "not deleted because it is mapped to a " + - "host or because it is part of a FlashCopy " + - "or Remote Copy mapping, or is involved in " + - "an image mode migrate."), - "CMMVC6527E": ("", "CMMVC6527E The name that you have entered " + - "is not valid. The name can contain letters, " + - "numbers, spaces, periods, dashes, and " + - "underscores. The name must begin with a " + - "letter or an underscore. The name must not " + - "begin or end with a space."), - "CMMVC5871E": ("", "CMMVC5871E The action failed because one or " + - "more of the configured port names is in a " + - "mapping."), - "CMMVC5924E": ("", "CMMVC5924E The FlashCopy mapping was not " + - "created because the source and target " + - "virtual disks (VDisks) are different sizes."), - "CMMVC6303E": ("", "CMMVC6303E The create failed because the " + - "source and target VDisks are the same."), - "CMMVC7050E": ("", "CMMVC7050E The command failed because at " + - "least one node in the I/O group does not " + - "support compressed VDisks."), + 'CMMVC5701E': ('', 'CMMVC5701E No object ID was specified.'), + 'CMMVC6035E': ('', 'CMMVC6035E The action failed as the ' + 'object already exists.'), + 'CMMVC5753E': ('', 'CMMVC5753E The specified object does not ' + 'exist or is not a suitable candidate.'), + 'CMMVC5707E': ('', 'CMMVC5707E Required parameters are missing.'), + 'CMMVC6581E': ('', 'CMMVC6581E The command has failed because ' + 'the maximum number of allowed iSCSI ' + 'qualified names (IQNs) has been reached, ' + 'or the IQN is already assigned or is not ' + 'valid.'), + 'CMMVC5754E': ('', 'CMMVC5754E The specified object does not ' + 'exist, or the name supplied does not meet ' + 'the naming rules.'), + 'CMMVC6071E': ('', 'CMMVC6071E The VDisk-to-host mapping was ' + 'not created because the VDisk is already ' + 'mapped to a host.'), + 'CMMVC5879E': ('', 'CMMVC5879E The VDisk-to-host mapping was ' + 'not created because a VDisk is already ' + 'mapped to this host with this SCSI LUN.'), + 'CMMVC5840E': ('', 'CMMVC5840E The virtual disk (VDisk) was ' + 'not deleted because it is mapped to a ' + 'host or because it is part of a FlashCopy ' + 'or Remote Copy mapping, or is involved in ' + 'an image mode migrate.'), + 'CMMVC6527E': ('', 'CMMVC6527E The name that you have entered ' + 'is not valid. The name can contain letters, ' + 'numbers, spaces, periods, dashes, and ' + 'underscores. The name must begin with a ' + 'letter or an underscore. The name must not ' + 'begin or end with a space.'), + 'CMMVC5871E': ('', 'CMMVC5871E The action failed because one or ' + 'more of the configured port names is in a ' + 'mapping.'), + 'CMMVC5924E': ('', 'CMMVC5924E The FlashCopy mapping was not ' + 'created because the source and target ' + 'virtual disks (VDisks) are different sizes.'), + 'CMMVC6303E': ('', 'CMMVC6303E The create failed because the ' + 'source and target VDisks are the same.'), + 'CMMVC7050E': ('', 'CMMVC7050E The command failed because at ' + 'least one node in the I/O group does not ' + 'support compressed VDisks.'), + 'CMMVC6430E': ('', 'CMMVC6430E The command failed because the ' + 'target and source managed disk groups must ' + 'be different.'), + 'CMMVC6353E': ('', 'CMMVC6353E The command failed because the ' + 'copy specified does not exist.'), + 'CMMVC6446E': ('', 'The command failed because the managed disk ' + 'groups have different extent sizes.'), + # Catch-all for invalid state transitions: + 'CMMVC5903E': ('', 'CMMVC5903E The FlashCopy mapping was not ' + 'changed because the mapping or consistency ' + 'group is another state.'), + 'CMMVC5709E': ('', 'CMMVC5709E [-%(VALUE)s] is not a supported ' + 'parameter.'), } + self._transitions = {'begin': {'make': 'idle_or_copied'}, + 'idle_or_copied': {'prepare': 'preparing', + 'delete': 'end', + 'delete_force': 'end'}, + 'preparing': {'flush_failed': 'stopped', + 'wait': 'prepared'}, + 'end': None, + 'stopped': {'prepare': 'preparing', + 'delete_force': 'end'}, + 'prepared': {'stop': 'stopped', + 'start': 'copying'}, + 'copying': {'wait': 'idle_or_copied', + 'stop': 'stopping'}, + # Assume the worst case where stopping->stopped + # rather than stopping idle_or_copied + 'stopping': {'wait': 'stopped'}, + } + + def _state_transition(self, function, fcmap): + if (function == 'wait' and + 'wait' not in self._transitions[fcmap['status']]): + return ('', '') + + if fcmap['status'] == 'copying' and function == 'wait': + if fcmap['copyrate'] != '0': + if fcmap['progress'] == '0': + fcmap['progress'] = '50' + else: + fcmap['progress'] = '100' + fcmap['status'] = 'idle_or_copied' + return ('', '') + else: + try: + curr_state = fcmap['status'] + fcmap['status'] = self._transitions[curr_state][function] + return ('', '') + except Exception: + return self._errors['CMMVC5903E'] # Find an unused ID - def _find_unused_id(self, d): + @staticmethod + def _find_unused_id(d): ids = [] - for k, v in d.iteritems(): - ids.append(int(v["id"])) + for v in d.itervalues(): + ids.append(int(v['id'])) ids.sort() for index, n in enumerate(ids): if n > index: @@ -113,772 +179,1313 @@ def _find_unused_id(self, d): return str(len(ids)) # Check if name is valid - def _is_invalid_name(self, name): - if (name[0] == " ") or (name[-1] == " "): - return True - for c in name: - if ((not c.isalnum()) and (c != " ") and (c != ".") - and (c != "-") and (c != "_")): - return True - return False + @staticmethod + def _is_invalid_name(name): + if re.match(r'^[a-zA-Z_][\w ._-]*$', name): + return False + return True # Convert argument string to dictionary - def _cmd_to_dict(self, cmd): - arg_list = cmd.split() + @staticmethod + def _cmd_to_dict(arg_list): no_param_args = [ - "autodelete", - "autoexpand", - "bytes", - "compressed", - "force", - "nohdr", + 'autodelete', + 'bytes', + 'compressed', + 'force', + 'nohdr', ] one_param_args = [ - "cleanrate", - "delim", - "filtervalue", - "grainsize", - "host", - "iogrp", - "iscsiname", - "mdiskgrp", - "name", - "rsize", - "scsi", - "size", - "source", - "target", - "unit", - "easytier", - "warning", + 'chapsecret', + 'cleanrate', + 'copy', + 'copyrate', + 'delim', + 'easytier', + 'filtervalue', + 'grainsize', + 'hbawwpn', + 'host', + 'iogrp', + 'iscsiname', + 'mdiskgrp', + 'name', + 'rsize', + 'scsi', + 'size', + 'source', + 'target', + 'unit', + 'vdisk', + 'warning', + 'wwpn', + ] + no_or_one_param_args = [ + 'autoexpand', ] # Handle the special case of lsnode which is a two-word command # Use the one word version of the command internally - if arg_list[0] == "svcinfo" and arg_list[1] == "lsnode": - ret = {"cmd": "lsnodecanister"} + if arg_list[0] in ('svcinfo', 'svctask'): + if arg_list[1] == 'lsnode': + if len(arg_list) > 4: # e.g. svcinfo lsnode -delim ! + ret = {'cmd': 'lsnode', 'node_id': arg_list[-1]} + else: + ret = {'cmd': 'lsnodecanister'} + else: + ret = {'cmd': arg_list[1]} arg_list.pop(0) else: - ret = {"cmd": arg_list[0]} + ret = {'cmd': arg_list[0]} skip = False for i in range(1, len(arg_list)): if skip: skip = False continue - if arg_list[i][0] == "-": + if arg_list[i][0] == '-': if arg_list[i][1:] in no_param_args: ret[arg_list[i][1:]] = True elif arg_list[i][1:] in one_param_args: ret[arg_list[i][1:]] = arg_list[i + 1] skip = True + elif arg_list[i][1:] in no_or_one_param_args: + if i == (len(arg_list) - 1) or arg_list[i + 1][0] == '-': + ret[arg_list[i][1:]] = True + else: + ret[arg_list[i][1:]] = arg_list[i + 1] + skip = True else: raise exception.InvalidInput( reason=_('unrecognized argument %s') % arg_list[i]) else: - ret["obj"] = arg_list[i] + ret['obj'] = arg_list[i] return ret - # Generic function for printing information - def _print_info_cmd(self, rows, delim=" ", nohdr=False, **kwargs): + @staticmethod + def _print_info_cmd(rows, delim=' ', nohdr=False, **kwargs): + """Generic function for printing information.""" if nohdr: - del rows[0] + del rows[0] for index in range(len(rows)): rows[index] = delim.join(rows[index]) - return ("%s" % "\n".join(rows), "") + return ('%s' % '\n'.join(rows), '') + + @staticmethod + def _print_info_obj_cmd(header, row, delim=' ', nohdr=False): + """Generic function for printing information for a specific object.""" + objrows = [] + for idx, val in enumerate(header): + objrows.append([val, row[idx]]) + + if nohdr: + for index in range(len(objrows)): + objrows[index] = ' '.join(objrows[index][1:]) + for index in range(len(objrows)): + objrows[index] = delim.join(objrows[index]) + return ('%s' % '\n'.join(objrows), '') + + @staticmethod + def _convert_bytes_units(bytestr): + num = int(bytestr) + unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + unit_index = 0 + + while num > 1024: + num = num / 1024 + unit_index += 1 + + return '%d%s' % (num, unit_array[unit_index]) + + @staticmethod + def _convert_units_bytes(num, unit): + unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + unit_index = 0 + + while unit.lower() != unit_array[unit_index].lower(): + num = num * 1024 + unit_index += 1 + + return str(num) + + def _cmd_lslicense(self, **kwargs): + rows = [None] * 3 + rows[0] = ['used_compression_capacity', '0.08'] + rows[1] = ['license_compression_capacity', '0'] + if self._next_cmd_error['lslicense'] == 'no_compression': + self._next_cmd_error['lslicense'] = '' + rows[2] = ['license_compression_enclosures', '0'] + else: + rows[2] = ['license_compression_enclosures', '1'] + return self._print_info_cmd(rows=rows, **kwargs) # Print mostly made-up stuff in the correct syntax - def _cmd_lsmdiskgrp(self, **kwargs): + def _cmd_lssystem(self, **kwargs): rows = [None] * 3 - rows[0] = ["id", "name", "status", "mdisk_count", - "vdisk_count capacity", "extent_size", "free_capacity", - "virtual_capacity", "used_capacity", "real_capacity", - "overallocation", "warning", "easy_tier", - "easy_tier_status"] - rows[1] = ["1", self._flags["storwize_svc_volpool_name"], "online", - "1", str(len(self._volumes_list)), "3.25TB", "256", - "3.21TB", "1.54TB", "264.97MB", "35.58GB", "47", "80", - "auto", "inactive"] - rows[2] = ["2", "volpool2", "online", - "1", "0", "3.25TB", "256", - "3.21TB", "1.54TB", "264.97MB", "35.58GB", "47", "80", - "auto", "inactive"] + rows[0] = ['id', '0123456789ABCDEF'] + rows[1] = ['name', 'storwize-svc-sim'] + rows[2] = ['code_level', '7.2.0.0 (build 87.0.1311291000)'] return self._print_info_cmd(rows=rows, **kwargs) + # Print mostly made-up stuff in the correct syntax, assume -bytes passed + def _cmd_lsmdiskgrp(self, **kwargs): + rows = [None] * 4 + rows[0] = ['id', 'name', 'status', 'mdisk_count', + 'vdisk_count', 'capacity', 'extent_size', + 'free_capacity', 'virtual_capacity', 'used_capacity', + 'real_capacity', 'overallocation', 'warning', + 'easy_tier', 'easy_tier_status'] + rows[1] = ['1', self._flags['storwize_svc_volpool_name'], 'online', + '1', str(len(self._volumes_list)), '3573412790272', + '256', '3529926246400', '1693247906775', '277841182', + '38203734097', '47', '80', 'auto', 'inactive'] + rows[2] = ['2', 'openstack2', 'online', + '1', '0', '3573412790272', '256', + '3529432325160', '1693247906775', '277841182', + '38203734097', '47', '80', 'auto', 'inactive'] + rows[3] = ['3', 'openstack3', 'online', + '1', '0', '3573412790272', '128', + '3529432325160', '1693247906775', '277841182', + '38203734097', '47', '80', 'auto', 'inactive'] + if 'obj' not in kwargs: + return self._print_info_cmd(rows=rows, **kwargs) + else: + if kwargs['obj'] == self._flags['storwize_svc_volpool_name']: + row = rows[1] + elif kwargs['obj'] == 'openstack2': + row = rows[2] + elif kwargs['obj'] == 'openstack3': + row = rows[3] + else: + return self._errors['CMMVC5754E'] + + objrows = [] + for idx, val in enumerate(rows[0]): + objrows.append([val, row[idx]]) + + if 'nohdr' in kwargs: + for index in range(len(objrows)): + objrows[index] = ' '.join(objrows[index][1:]) + + if 'delim' in kwargs: + for index in range(len(objrows)): + objrows[index] = kwargs['delim'].join(objrows[index]) + + return ('%s' % '\n'.join(objrows), '') + # Print mostly made-up stuff in the correct syntax def _cmd_lsnodecanister(self, **kwargs): rows = [None] * 3 - rows[0] = ["id", "name", "UPS_serial_number", "WWNN", "status", - "IO_group_id", "IO_group_name", "config_node", - "UPS_unique_id", "hardware", "iscsi_name", "iscsi_alias", - "panel_name", "enclosure_id", "canister_id", - "enclosure_serial_number"] - rows[1] = ["5", "node1", "", "123456789ABCDEF0", "online", "0", - "io_grp0", - "yes", "123456789ABCDEF0", "100", - "iqn.1982-01.com.ibm:1234.sim.node1", "", "01-1", "1", "1", - "0123ABC"] - rows[2] = ["6", "node2", "", "123456789ABCDEF1", "online", "0", - "io_grp0", - "no", "123456789ABCDEF1", "100", - "iqn.1982-01.com.ibm:1234.sim.node2", "", "01-2", "1", "2", - "0123ABC"] - - if self._next_cmd_error["lsnodecanister"] == "header_mismatch": + rows[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status', + 'IO_group_id', 'IO_group_name', 'config_node', + 'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias', + 'panel_name', 'enclosure_id', 'canister_id', + 'enclosure_serial_number'] + rows[1] = ['1', 'node1', '', '123456789ABCDEF0', 'online', '0', + 'io_grp0', + 'yes', '123456789ABCDEF0', '100', + 'iqn.1982-01.com.ibm:1234.sim.node1', '', '01-1', '1', '1', + '0123ABC'] + rows[2] = ['2', 'node2', '', '123456789ABCDEF1', 'online', '0', + 'io_grp0', + 'no', '123456789ABCDEF1', '100', + 'iqn.1982-01.com.ibm:1234.sim.node2', '', '01-2', '1', '2', + '0123ABC'] + + if self._next_cmd_error['lsnodecanister'] == 'header_mismatch': rows[0].pop(2) - self._next_cmd_error["lsnodecanister"] = "" - if self._next_cmd_error["lsnodecanister"] == "remove_field": + self._next_cmd_error['lsnodecanister'] = '' + if self._next_cmd_error['lsnodecanister'] == 'remove_field': for row in rows: row.pop(0) - self._next_cmd_error["lsnodecanister"] = "" + self._next_cmd_error['lsnodecanister'] = '' return self._print_info_cmd(rows=rows, **kwargs) + # Print information of every single node of SVC + def _cmd_lsnode(self, **kwargs): + node_infos = dict() + node_infos['1'] = r'''id!1 +name!node1 +port_id!500507680210C744 +port_status!active +port_speed!8Gb +port_id!500507680220C744 +port_status!active +port_speed!8Gb +''' + node_infos['2'] = r'''id!2 +name!node2 +port_id!500507680220C745 +port_status!active +port_speed!8Gb +port_id!500507680230C745 +port_status!inactive +port_speed!N/A +''' + node_id = kwargs.get('node_id', None) + stdout = node_infos.get(node_id, '') + return stdout, '' + # Print mostly made-up stuff in the correct syntax def _cmd_lsportip(self, **kwargs): - if self._next_cmd_error["lsportip"] == "ip_no_config": - self._next_cmd_error["lsportip"] = "" - ip_addr1 = "" - ip_addr2 = "" - gw = "" + if self._next_cmd_error['lsportip'] == 'ip_no_config': + self._next_cmd_error['lsportip'] = '' + ip_addr1 = '' + ip_addr2 = '' + gw = '' else: - ip_addr1 = "1.234.56.78" - ip_addr2 = "1.234.56.79" - gw = "1.234.56.1" + ip_addr1 = '1.234.56.78' + ip_addr2 = '1.234.56.79' + gw = '1.234.56.1' rows = [None] * 17 - rows[0] = ["id", "node_id", "node_name", "IP_address", "mask", - "gateway", "IP_address_6", "prefix_6", "gateway_6", "MAC", - "duplex", "state", "speed", "failover"] - rows[1] = ["1", "5", "node1", ip_addr1, "255.255.255.0", - gw, "", "", "", "01:23:45:67:89:00", "Full", - "online", "1Gb/s", "no"] - rows[2] = ["1", "5", "node1", "", "", "", "", "", "", - "01:23:45:67:89:00", "Full", "online", "1Gb/s", "yes"] - rows[3] = ["2", "5", "node1", "", "", "", "", "", "", - "01:23:45:67:89:01", "Full", "unconfigured", "1Gb/s", "no"] - rows[4] = ["2", "5", "node1", "", "", "", "", "", "", - "01:23:45:67:89:01", "Full", "unconfigured", "1Gb/s", "yes"] - rows[5] = ["3", "5", "node1", "", "", "", "", "", "", "", "", - "unconfigured", "", "no"] - rows[6] = ["3", "5", "node1", "", "", "", "", "", "", "", "", - "unconfigured", "", "yes"] - rows[7] = ["4", "5", "node1", "", "", "", "", "", "", "", "", - "unconfigured", "", "no"] - rows[8] = ["4", "5", "node1", "", "", "", "", "", "", "", "", - "unconfigured", "", "yes"] - rows[9] = ["1", "6", "node2", ip_addr2, "255.255.255.0", - gw, "", "", "", "01:23:45:67:89:02", "Full", - "online", "1Gb/s", "no"] - rows[10] = ["1", "6", "node2", "", "", "", "", "", "", - "01:23:45:67:89:02", "Full", "online", "1Gb/s", "yes"] - rows[11] = ["2", "6", "node2", "", "", "", "", "", "", - "01:23:45:67:89:03", "Full", "unconfigured", "1Gb/s", "no"] - rows[12] = ["2", "6", "node2", "", "", "", "", "", "", - "01:23:45:67:89:03", "Full", "unconfigured", "1Gb/s", - "yes"] - rows[13] = ["3", "6", "node2", "", "", "", "", "", "", "", "", - "unconfigured", "", "no"] - rows[14] = ["3", "6", "node2", "", "", "", "", "", "", "", "", - "unconfigured", "", "yes"] - rows[15] = ["4", "6", "node2", "", "", "", "", "", "", "", "", - "unconfigured", "", "no"] - rows[16] = ["4", "6", "node2", "", "", "", "", "", "", "", "", - "unconfigured", "", "yes"] - - if self._next_cmd_error["lsportip"] == "header_mismatch": + rows[0] = ['id', 'node_id', 'node_name', 'IP_address', 'mask', + 'gateway', 'IP_address_6', 'prefix_6', 'gateway_6', 'MAC', + 'duplex', 'state', 'speed', 'failover'] + rows[1] = ['1', '1', 'node1', ip_addr1, '255.255.255.0', + gw, '', '', '', '01:23:45:67:89:00', 'Full', + 'online', '1Gb/s', 'no'] + rows[2] = ['1', '1', 'node1', '', '', '', '', '', '', + '01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'yes'] + rows[3] = ['2', '1', 'node1', '', '', '', '', '', '', + '01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s', 'no'] + rows[4] = ['2', '1', 'node1', '', '', '', '', '', '', + '01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s', 'yes'] + rows[5] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'no'] + rows[6] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'yes'] + rows[7] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'no'] + rows[8] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'yes'] + rows[9] = ['1', '2', 'node2', ip_addr2, '255.255.255.0', + gw, '', '', '', '01:23:45:67:89:02', 'Full', + 'online', '1Gb/s', 'no'] + rows[10] = ['1', '2', 'node2', '', '', '', '', '', '', + '01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'yes'] + rows[11] = ['2', '2', 'node2', '', '', '', '', '', '', + '01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s', 'no'] + rows[12] = ['2', '2', 'node2', '', '', '', '', '', '', + '01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s', + 'yes'] + rows[13] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'no'] + rows[14] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'yes'] + rows[15] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'no'] + rows[16] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', + 'unconfigured', '', 'yes'] + + if self._next_cmd_error['lsportip'] == 'header_mismatch': rows[0].pop(2) - self._next_cmd_error["lsportip"] = "" - if self._next_cmd_error["lsportip"] == "remove_field": + self._next_cmd_error['lsportip'] = '' + if self._next_cmd_error['lsportip'] == 'remove_field': for row in rows: row.pop(1) - self._next_cmd_error["lsportip"] = "" + self._next_cmd_error['lsportip'] = '' return self._print_info_cmd(rows=rows, **kwargs) + def _cmd_lsfabric(self, **kwargs): + host_name = kwargs['host'] if 'host' in kwargs else None + target_wwpn = kwargs['wwpn'] if 'wwpn' in kwargs else None + host_infos = [] + + for hv in self._hosts_list.itervalues(): + if not host_name or hv['host_name'].startswith(host_name): + for mv in self._mappings_list.itervalues(): + if mv['host'] == hv['host_name']: + if not target_wwpn or target_wwpn in hv['wwpns']: + host_infos.append(hv) + break + + if not len(host_infos): + return ('', '') + + rows = [] + rows.append(['remote_wwpn', 'remote_nportid', 'id', 'node_name', + 'local_wwpn', 'local_port', 'local_nportid', 'state', + 'name', 'cluster_name', 'type']) + for host_info in host_infos: + for wwpn in host_info['wwpns']: + rows.append([wwpn, '123456', host_info['id'], 'nodeN', + 'AABBCCDDEEFF0011', '1', '0123ABC', 'active', + host_info['host_name'], '', 'host']) + + if self._next_cmd_error['lsfabric'] == 'header_mismatch': + rows[0].pop(0) + self._next_cmd_error['lsfabric'] = '' + if self._next_cmd_error['lsfabric'] == 'remove_field': + for row in rows: + row.pop(0) + self._next_cmd_error['lsfabric'] = '' + return self._print_info_cmd(rows=rows, **kwargs) + # Create a vdisk def _cmd_mkvdisk(self, **kwargs): # We only save the id/uid, name, and size - all else will be made up volume_info = {} - volume_info["id"] = self._find_unused_id(self._volumes_list) - volume_info["uid"] = ("ABCDEF" * 3) + ("0" * 14) + volume_info["id"] + volume_info['id'] = self._find_unused_id(self._volumes_list) + volume_info['uid'] = ('ABCDEF' * 3) + ('0' * 14) + volume_info['id'] - if "name" in kwargs: - volume_info["name"] = kwargs["name"].strip('\'\"') + if 'name' in kwargs: + volume_info['name'] = kwargs['name'].strip('\'\'') else: - volume_info["name"] = "vdisk" + volume_info["id"] + volume_info['name'] = 'vdisk' + volume_info['id'] # Assume size and unit are given, store it in bytes - capacity = int(kwargs["size"]) - unit = kwargs["unit"] - - if unit == "b": - cap_bytes = capacity - elif unit == "kb": - cap_bytes = capacity * pow(1024, 1) - elif unit == "mb": - cap_bytes = capacity * pow(1024, 2) - elif unit == "gb": - cap_bytes = capacity * pow(1024, 3) - elif unit == "tb": - cap_bytes = capacity * pow(1024, 4) - elif unit == "pb": - cap_bytes = capacity * pow(1024, 5) - volume_info["cap_bytes"] = str(cap_bytes) - volume_info["capacity"] = str(capacity) + unit.upper() - - if "easytier" in kwargs: - if kwargs["easytier"] == "on": - volume_info["easy_tier"] = "on" + capacity = int(kwargs['size']) + unit = kwargs['unit'] + volume_info['capacity'] = self._convert_units_bytes(capacity, unit) + volume_info['IO_group_id'] = kwargs['iogrp'] + volume_info['IO_group_name'] = 'io_grp%s' % kwargs['iogrp'] + + if 'easytier' in kwargs: + if kwargs['easytier'] == 'on': + volume_info['easy_tier'] = 'on' else: - volume_info["easy_tier"] = "off" + volume_info['easy_tier'] = 'off' - if "rsize" in kwargs: + if 'rsize' in kwargs: # Fake numbers - volume_info["used_capacity"] = "0.75MB" - volume_info["real_capacity"] = "36.98MB" - volume_info["free_capacity"] = "36.23MB" - volume_info["used_capacity_bytes"] = "786432" - volume_info["real_capacity_bytes"] = "38776340" - volume_info["free_capacity_bytes"] = "37989908" - if "warning" in kwargs: - volume_info["warning"] = kwargs["warning"].rstrip('%') + volume_info['used_capacity'] = '786432' + volume_info['real_capacity'] = '21474816' + volume_info['free_capacity'] = '38219264' + if 'warning' in kwargs: + volume_info['warning'] = kwargs['warning'].rstrip('%') else: - volume_info["warning"] = "80" - if "autoexpand" in kwargs: - volume_info["autoexpand"] = "on" + volume_info['warning'] = '80' + if 'autoexpand' in kwargs: + volume_info['autoexpand'] = 'on' else: - volume_info["autoexpand"] = "off" - if "grainsize" in kwargs: - volume_info["grainsize"] = kwargs["grainsize"] + volume_info['autoexpand'] = 'off' + if 'grainsize' in kwargs: + volume_info['grainsize'] = kwargs['grainsize'] else: - volume_info["grainsize"] = "32" - if "compressed" in kwargs: - if self._next_cmd_error["mkvdisk"] == "no_compression": - self._next_cmd_error["mkvdisk"] = "" - return self._errors["CMMVC7050E"] - volume_info["compressed_copy"] = "yes" + volume_info['grainsize'] = '32' + if 'compressed' in kwargs: + volume_info['compressed_copy'] = 'yes' else: - volume_info["compressed_copy"] = "no" + volume_info['compressed_copy'] = 'no' else: - volume_info["used_capacity"] = volume_info["capacity"] - volume_info["real_capacity"] = volume_info["capacity"] - volume_info["free_capacity"] = "0.00MB" - volume_info["used_capacity_bytes"] = volume_info["cap_bytes"] - volume_info["real_capacity_bytes"] = volume_info["cap_bytes"] - volume_info["free_capacity_bytes"] = "0" - volume_info["warning"] = "" - volume_info["autoexpand"] = "" - volume_info["grainsize"] = "" - volume_info["compressed_copy"] = "no" - - if volume_info["name"] in self._volumes_list: - return self._errors["CMMVC6035E"] + volume_info['used_capacity'] = volume_info['capacity'] + volume_info['real_capacity'] = volume_info['capacity'] + volume_info['free_capacity'] = '0' + volume_info['warning'] = '' + volume_info['autoexpand'] = '' + volume_info['grainsize'] = '' + volume_info['compressed_copy'] = 'no' + + vol_cp = {'id': '0', + 'status': 'online', + 'sync': 'yes', + 'primary': 'yes', + 'mdisk_grp_id': '1', + 'mdisk_grp_name': self._flags['storwize_svc_volpool_name'], + 'easy_tier': volume_info['easy_tier'], + 'compressed_copy': volume_info['compressed_copy']} + volume_info['copies'] = {'0': vol_cp} + + if volume_info['name'] in self._volumes_list: + return self._errors['CMMVC6035E'] else: - self._volumes_list[volume_info["name"]] = volume_info - return ("Virtual Disk, id [%s], successfully created" % - (volume_info["id"]), "") + self._volumes_list[volume_info['name']] = volume_info + return ('Virtual Disk, id [%s], successfully created' % + (volume_info['id']), '') # Delete a vdisk def _cmd_rmvdisk(self, **kwargs): - force = 0 - if "force" in kwargs: - force = 1 - - if "obj" not in kwargs: - return self._errors["CMMVC5701E"] - vol_name = kwargs["obj"].strip('\'\"') - - if not vol_name in self._volumes_list: - return self._errors["CMMVC5753E"] - - if force == 0: - for k, mapping in self._mappings_list.iteritems(): - if mapping["vol"] == vol_name: - return self._errors["CMMVC5840E"] - for k, fcmap in self._fcmappings_list.iteritems(): - if ((fcmap["source"] == vol_name) or - (fcmap["target"] == vol_name)): - return self._errors["CMMVC5840E"] + force = True if 'force' in kwargs else False + + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol_name = kwargs['obj'].strip('\'\'') + + if vol_name not in self._volumes_list: + return self._errors['CMMVC5753E'] + + if not force: + for mapping in self._mappings_list.itervalues(): + if mapping['vol'] == vol_name: + return self._errors['CMMVC5840E'] + for fcmap in self._fcmappings_list.itervalues(): + if ((fcmap['source'] == vol_name) or + (fcmap['target'] == vol_name)): + return self._errors['CMMVC5840E'] del self._volumes_list[vol_name] - return ("", "") + return ('', '') + + def _cmd_expandvdisksize(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol_name = kwargs['obj'].strip('\'\'') + + # Assume unit is gb + if 'size' not in kwargs: + return self._errors['CMMVC5707E'] + size = int(kwargs['size']) + + if vol_name not in self._volumes_list: + return self._errors['CMMVC5753E'] + + curr_size = int(self._volumes_list[vol_name]['capacity']) + addition = size * units.GiB + self._volumes_list[vol_name]['capacity'] = str(curr_size + addition) + return ('', '') def _get_fcmap_info(self, vol_name): ret_vals = { - "fc_id": "", - "fc_name": "", - "fc_map_count": "0", + 'fc_id': '', + 'fc_name': '', + 'fc_map_count': '0', } - for k, fcmap in self._fcmappings_list.iteritems(): - if ((fcmap["source"] == vol_name) or - (fcmap["target"] == vol_name)): - ret_vals["fc_id"] = fcmap["id"] - ret_vals["fc_name"] = fcmap["name"] - ret_vals["fc_map_count"] = "1" + for fcmap in self._fcmappings_list.itervalues(): + if ((fcmap['source'] == vol_name) or + (fcmap['target'] == vol_name)): + ret_vals['fc_id'] = fcmap['id'] + ret_vals['fc_name'] = fcmap['name'] + ret_vals['fc_map_count'] = '1' return ret_vals # List information about vdisks def _cmd_lsvdisk(self, **kwargs): - if "obj" not in kwargs: - rows = [] - rows.append(["id", "name", "IO_group_id", "IO_group_name", - "status", "mdisk_grp_id", "mdisk_grp_name", - "capacity", "type", "FC_id", "FC_name", "RC_id", - "RC_name", "vdisk_UID", "fc_map_count", "copy_count", - "fast_write_state", "se_copy_count", "RC_change"]) - - for k, vol in self._volumes_list.iteritems(): - if (("filtervalue" not in kwargs) or - (kwargs["filtervalue"] == "name=" + vol["name"])): - fcmap_info = self._get_fcmap_info(vol["name"]) - - if "bytes" in kwargs: - cap = vol["cap_bytes"] - else: - cap = vol["capacity"] - rows.append([str(vol["id"]), vol["name"], "0", "io_grp0", - "online", "0", - self._flags["storwize_svc_volpool_name"], - cap, "striped", - fcmap_info["fc_id"], fcmap_info["fc_name"], - "", "", vol["uid"], - fcmap_info["fc_map_count"], "1", "empty", - "1", "no"]) - + rows = [] + rows.append(['id', 'name', 'IO_group_id', 'IO_group_name', + 'status', 'mdisk_grp_id', 'mdisk_grp_name', + 'capacity', 'type', 'FC_id', 'FC_name', 'RC_id', + 'RC_name', 'vdisk_UID', 'fc_map_count', 'copy_count', + 'fast_write_state', 'se_copy_count', 'RC_change']) + + for vol in self._volumes_list.itervalues(): + if (('filtervalue' not in kwargs) or + (kwargs['filtervalue'] == 'name=' + vol['name'])): + fcmap_info = self._get_fcmap_info(vol['name']) + + if 'bytes' in kwargs: + cap = self._convert_bytes_units(vol['capacity']) + else: + cap = vol['capacity'] + rows.append([str(vol['id']), vol['name'], vol['IO_group_id'], + vol['IO_group_name'], 'online', '0', + self._flags['storwize_svc_volpool_name'], + cap, 'striped', + fcmap_info['fc_id'], fcmap_info['fc_name'], + '', '', vol['uid'], + fcmap_info['fc_map_count'], '1', 'empty', + '1', 'no']) + + if 'obj' not in kwargs: return self._print_info_cmd(rows=rows, **kwargs) - else: - if kwargs["obj"] not in self._volumes_list: - return self._errors["CMMVC5754E"] - vol = self._volumes_list[kwargs["obj"]] - fcmap_info = self._get_fcmap_info(vol["name"]) - if "bytes" in kwargs: - cap = vol["cap_bytes"] - cap_u = vol["used_capacity_bytes"] - cap_r = vol["real_capacity_bytes"] - cap_f = vol["free_capacity_bytes"] - else: - cap = vol["capacity"] - cap_u = vol["used_capacity"] - cap_r = vol["real_capacity"] - cap_f = vol["free_capacity"] + if kwargs['obj'] not in self._volumes_list: + return self._errors['CMMVC5754E'] + vol = self._volumes_list[kwargs['obj']] + fcmap_info = self._get_fcmap_info(vol['name']) + cap = vol['capacity'] + cap_u = vol['used_capacity'] + cap_r = vol['real_capacity'] + cap_f = vol['free_capacity'] + if 'bytes' not in kwargs: + for item in [cap, cap_u, cap_r, cap_f]: + item = self._convert_bytes_units(item) rows = [] - rows.append(["id", str(vol["id"])]) - rows.append(["name", vol["name"]]) - rows.append(["IO_group_id", "0"]) - rows.append(["IO_group_name", "io_grp0"]) - rows.append(["status", "online"]) - rows.append(["mdisk_grp_id", "0"]) - rows.append(["mdisk_grp_name", - self._flags["storwize_svc_volpool_name"]]) - rows.append(["capacity", cap]) - rows.append(["type", "striped"]) - rows.append(["formatted", "no"]) - rows.append(["mdisk_id", ""]) - rows.append(["mdisk_name", ""]) - rows.append(["FC_id", fcmap_info["fc_id"]]) - rows.append(["FC_name", fcmap_info["fc_name"]]) - rows.append(["RC_id", ""]) - rows.append(["RC_name", ""]) - rows.append(["vdisk_UID", vol["uid"]]) - rows.append(["throttling", "0"]) - - if self._next_cmd_error["lsvdisk"] == "blank_pref_node": - rows.append(["preferred_node_id", ""]) - self._next_cmd_error["lsvdisk"] = "" - elif self._next_cmd_error["lsvdisk"] == "no_pref_node": - self._next_cmd_error["lsvdisk"] = "" + rows.append(['id', str(vol['id'])]) + rows.append(['name', vol['name']]) + rows.append(['IO_group_id', vol['IO_group_id']]) + rows.append(['IO_group_name', vol['IO_group_name']]) + rows.append(['status', 'online']) + rows.append(['capacity', cap]) + rows.append(['formatted', 'no']) + rows.append(['mdisk_id', '']) + rows.append(['mdisk_name', '']) + rows.append(['FC_id', fcmap_info['fc_id']]) + rows.append(['FC_name', fcmap_info['fc_name']]) + rows.append(['RC_id', '']) + rows.append(['RC_name', '']) + rows.append(['vdisk_UID', vol['uid']]) + rows.append(['throttling', '0']) + + if self._next_cmd_error['lsvdisk'] == 'blank_pref_node': + rows.append(['preferred_node_id', '']) + self._next_cmd_error['lsvdisk'] = '' + elif self._next_cmd_error['lsvdisk'] == 'no_pref_node': + self._next_cmd_error['lsvdisk'] = '' else: - rows.append(["preferred_node_id", "6"]) - rows.append(["fast_write_state", "empty"]) - rows.append(["cache", "readwrite"]) - rows.append(["udid", ""]) - rows.append(["fc_map_count", fcmap_info["fc_map_count"]]) - rows.append(["sync_rate", "50"]) - rows.append(["copy_count", "1"]) - rows.append(["se_copy_count", "0"]) - rows.append(["mirror_write_priority", "latency"]) - rows.append(["RC_change", "no"]) - rows.append(["used_capacity", cap_u]) - rows.append(["real_capacity", cap_r]) - rows.append(["free_capacity", cap_f]) - rows.append(["autoexpand", vol["autoexpand"]]) - rows.append(["warning", vol["warning"]]) - rows.append(["grainsize", vol["grainsize"]]) - rows.append(["easy_tier", vol["easy_tier"]]) - rows.append(["compressed_copy", vol["compressed_copy"]]) - - if "nohdr" in kwargs: + rows.append(['preferred_node_id', '1']) + rows.append(['fast_write_state', 'empty']) + rows.append(['cache', 'readwrite']) + rows.append(['udid', '']) + rows.append(['fc_map_count', fcmap_info['fc_map_count']]) + rows.append(['sync_rate', '50']) + rows.append(['copy_count', '1']) + rows.append(['se_copy_count', '0']) + rows.append(['mirror_write_priority', 'latency']) + rows.append(['RC_change', 'no']) + + for copy in vol['copies'].itervalues(): + rows.append(['copy_id', copy['id']]) + rows.append(['status', copy['status']]) + rows.append(['primary', copy['primary']]) + rows.append(['mdisk_grp_id', copy['mdisk_grp_id']]) + rows.append(['mdisk_grp_name', copy['mdisk_grp_name']]) + rows.append(['type', 'striped']) + rows.append(['used_capacity', cap_u]) + rows.append(['real_capacity', cap_r]) + rows.append(['free_capacity', cap_f]) + rows.append(['easy_tier', copy['easy_tier']]) + rows.append(['compressed_copy', copy['compressed_copy']]) + rows.append(['autoexpand', vol['autoexpand']]) + rows.append(['warning', vol['warning']]) + rows.append(['grainsize', vol['grainsize']]) + + if 'nohdr' in kwargs: for index in range(len(rows)): - rows[index] = " ".join(rows[index][1:]) + rows[index] = ' '.join(rows[index][1:]) - if "delim" in kwargs: + if 'delim' in kwargs: for index in range(len(rows)): - rows[index] = kwargs["delim"].join(rows[index]) + rows[index] = kwargs['delim'].join(rows[index]) + + return ('%s' % '\n'.join(rows), '') + + def _cmd_lsiogrp(self, **kwargs): + rows = [None] * 6 + rows[0] = ['id', 'name', 'node_count', 'vdisk_count', 'host_count'] + rows[1] = ['0', 'io_grp0', '2', '0', '4'] + rows[2] = ['1', 'io_grp1', '2', '0', '4'] + rows[3] = ['2', 'io_grp2', '0', '0', '4'] + rows[4] = ['3', 'io_grp3', '0', '0', '4'] + rows[5] = ['4', 'recovery_io_grp', '0', '0', '0'] + return self._print_info_cmd(rows=rows, **kwargs) + + def _add_port_to_host(self, host_info, **kwargs): + if 'iscsiname' in kwargs: + added_key = 'iscsi_names' + added_val = kwargs['iscsiname'].strip('\'\"') + elif 'hbawwpn' in kwargs: + added_key = 'wwpns' + added_val = kwargs['hbawwpn'].strip('\'\"') + else: + return self._errors['CMMVC5707E'] - return ("%s" % "\n".join(rows), "") + host_info[added_key].append(added_val) + + for v in self._hosts_list.itervalues(): + if v['id'] == host_info['id']: + continue + for port in v[added_key]: + if port == added_val: + return self._errors['CMMVC6581E'] + return ('', '') # Make a host def _cmd_mkhost(self, **kwargs): host_info = {} - host_info["id"] = self._find_unused_id(self._hosts_list) + host_info['id'] = self._find_unused_id(self._hosts_list) - if "name" in kwargs: - host_name = kwargs["name"].strip('\'\"') + if 'name' in kwargs: + host_name = kwargs['name'].strip('\'\"') else: - host_name = "host" + str(host_info["id"]) - host_info["host_name"] = host_name - - if "iscsiname" not in kwargs: - return self._errors["CMMVC5707E"] - host_info["iscsi_name"] = kwargs["iscsiname"].strip('\'\"') + host_name = 'host' + str(host_info['id']) if self._is_invalid_name(host_name): - return self._errors["CMMVC6527E"] + return self._errors['CMMVC6527E'] if host_name in self._hosts_list: - return self._errors["CMMVC6035E"] + return self._errors['CMMVC6035E'] - for k, v in self._hosts_list.iteritems(): - if v["iscsi_name"] == host_info["iscsi_name"]: - return self._errors["CMMVC6581E"] + host_info['host_name'] = host_name + host_info['iscsi_names'] = [] + host_info['wwpns'] = [] - self._hosts_list[host_name] = host_info - return ("Host, id [%s], successfully created" % - (host_info["id"]), "") + out, err = self._add_port_to_host(host_info, **kwargs) + if not len(err): + self._hosts_list[host_name] = host_info + return ('Host, id [%s], successfully created' % + (host_info['id']), '') + else: + return (out, err) + + # Add ports to an existing host + def _cmd_addhostport(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + host_name = kwargs['obj'].strip('\'\'') + + if host_name not in self._hosts_list: + return self._errors['CMMVC5753E'] + + host_info = self._hosts_list[host_name] + return self._add_port_to_host(host_info, **kwargs) + + # Change host properties + def _cmd_chhost(self, **kwargs): + if 'chapsecret' not in kwargs: + return self._errors['CMMVC5707E'] + secret = kwargs['obj'].strip('\'\'') + + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + host_name = kwargs['obj'].strip('\'\'') + + if host_name not in self._hosts_list: + return self._errors['CMMVC5753E'] + + self._hosts_list[host_name]['chapsecret'] = secret + return ('', '') # Remove a host def _cmd_rmhost(self, **kwargs): - if "obj" not in kwargs: - return self._errors["CMMVC5701E"] + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] - host_name = kwargs["obj"].strip('\'\"') + host_name = kwargs['obj'].strip('\'\'') if host_name not in self._hosts_list: - return self._errors["CMMVC5753E"] + return self._errors['CMMVC5753E'] - for k, v in self._mappings_list.iteritems(): - if (v["host"] == host_name): - return self._errors["CMMVC5871E"] + for v in self._mappings_list.itervalues(): + if (v['host'] == host_name): + return self._errors['CMMVC5871E'] del self._hosts_list[host_name] - return ("", "") + return ('', '') # List information about hosts def _cmd_lshost(self, **kwargs): - if "obj" not in kwargs: + if 'obj' not in kwargs: rows = [] - rows.append(["id", "name", "port_count", "iogrp_count", "status"]) + rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) found = False - for k, host in self._hosts_list.iteritems(): - filterstr = "name=" + host["host_name"] - if (("filtervalue" not in kwargs) or - (kwargs["filtervalue"] == filterstr)): - rows.append([host["id"], host["host_name"], "1", "4", - "offline"]) + for host in self._hosts_list.itervalues(): + filterstr = 'name=' + host['host_name'] + if (('filtervalue' not in kwargs) or + (kwargs['filtervalue'] == filterstr)): + rows.append([host['id'], host['host_name'], '1', '4', + 'offline']) found = True if found: return self._print_info_cmd(rows=rows, **kwargs) else: - return ("", "") + return ('', '') else: - if kwargs["obj"] not in self._hosts_list: - return self._errors["CMMVC5754E"] - host = self._hosts_list[kwargs["obj"]] + if kwargs['obj'] not in self._hosts_list: + return self._errors['CMMVC5754E'] + host = self._hosts_list[kwargs['obj']] rows = [] - rows.append(["id", host["id"]]) - rows.append(["name", host["host_name"]]) - rows.append(["port_count", "1"]) - rows.append(["type", "generic"]) - rows.append(["mask", "1111"]) - rows.append(["iogrp_count", "4"]) - rows.append(["status", "offline"]) - rows.append(["iscsi_name", host["iscsi_name"]]) - rows.append(["node_logged_in_count", "0"]) - rows.append(["state", "offline"]) - - if "nohdr" in kwargs: + rows.append(['id', host['id']]) + rows.append(['name', host['host_name']]) + rows.append(['port_count', '1']) + rows.append(['type', 'generic']) + rows.append(['mask', '1111']) + rows.append(['iogrp_count', '4']) + rows.append(['status', 'online']) + for port in host['iscsi_names']: + rows.append(['iscsi_name', port]) + rows.append(['node_logged_in_count', '0']) + rows.append(['state', 'offline']) + for port in host['wwpns']: + rows.append(['WWPN', port]) + rows.append(['node_logged_in_count', '0']) + rows.append(['state', 'active']) + + if 'nohdr' in kwargs: for index in range(len(rows)): - rows[index] = " ".join(rows[index][1:]) + rows[index] = ' '.join(rows[index][1:]) - if "delim" in kwargs: + if 'delim' in kwargs: for index in range(len(rows)): - rows[index] = kwargs["delim"].join(rows[index]) + rows[index] = kwargs['delim'].join(rows[index]) - return ("%s" % "\n".join(rows), "") + return ('%s' % '\n'.join(rows), '') + + # List iSCSI authorization information about hosts + def _cmd_lsiscsiauth(self, **kwargs): + if self._next_cmd_error['lsiscsiauth'] == 'no_info': + self._next_cmd_error['lsiscsiauth'] = '' + return ('', '') + rows = [] + rows.append(['type', 'id', 'name', 'iscsi_auth_method', + 'iscsi_chap_secret']) + + for host in self._hosts_list.itervalues(): + method = 'none' + secret = '' + if 'chapsecret' in host: + method = 'chap' + secret = host['chapsecret'] + rows.append(['host', host['id'], host['host_name'], method, + secret]) + return self._print_info_cmd(rows=rows, **kwargs) # Create a vdisk-host mapping def _cmd_mkvdiskhostmap(self, **kwargs): mapping_info = {} - mapping_info["id"] = self._find_unused_id(self._mappings_list) + mapping_info['id'] = self._find_unused_id(self._mappings_list) + + if 'host' not in kwargs: + return self._errors['CMMVC5707E'] + mapping_info['host'] = kwargs['host'].strip('\'\'') - if "host" not in kwargs: - return self._errors["CMMVC5707E"] - mapping_info["host"] = kwargs["host"].strip('\'\"') + if 'scsi' not in kwargs: + return self._errors['CMMVC5707E'] + mapping_info['lun'] = kwargs['scsi'].strip('\'\'') - if "scsi" not in kwargs: - return self._errors["CMMVC5707E"] - mapping_info["lun"] = kwargs["scsi"].strip('\'\"') + if 'obj' not in kwargs: + return self._errors['CMMVC5707E'] + mapping_info['vol'] = kwargs['obj'].strip('\'\'') - if "obj" not in kwargs: - return self._errors["CMMVC5707E"] - mapping_info["vol"] = kwargs["obj"].strip('\'\"') + if mapping_info['vol'] not in self._volumes_list: + return self._errors['CMMVC5753E'] - if not mapping_info["vol"] in self._volumes_list: - return self._errors["CMMVC5753E"] + if mapping_info['host'] not in self._hosts_list: + return self._errors['CMMVC5754E'] - if not mapping_info["host"] in self._hosts_list: - return self._errors["CMMVC5754E"] + if mapping_info['vol'] in self._mappings_list: + return self._errors['CMMVC6071E'] - if mapping_info["vol"] in self._mappings_list: - return self._errors["CMMVC6071E"] + for v in self._mappings_list.itervalues(): + if ((v['host'] == mapping_info['host']) and + (v['lun'] == mapping_info['lun'])): + return self._errors['CMMVC5879E'] - for k, v in self._mappings_list.iteritems(): - if ((v["host"] == mapping_info["host"]) and - (v["lun"] == mapping_info["lun"])): - return self._errors["CMMVC5879E"] + for v in self._mappings_list.itervalues(): + if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs): + return self._errors['CMMVC6071E'] - self._mappings_list[mapping_info["vol"]] = mapping_info - return ("Virtual Disk to Host map, id [%s], successfully created" - % (mapping_info["id"]), "") + self._mappings_list[mapping_info['id']] = mapping_info + return ('Virtual Disk to Host map, id [%s], successfully created' + % (mapping_info['id']), '') # Delete a vdisk-host mapping def _cmd_rmvdiskhostmap(self, **kwargs): - if "host" not in kwargs: - return self._errors["CMMVC5707E"] - host = kwargs["host"].strip('\'\"') - - if "obj" not in kwargs: - return self._errors["CMMVC5701E"] - vol = kwargs["obj"].strip('\'\"') - - if not vol in self._mappings_list: - return self._errors["CMMVC5753E"] - - if self._mappings_list[vol]["host"] != host: - return self._errors["CMMVC5753E"] - - del self._mappings_list[vol] - return ("", "") - - # List information about vdisk-host mappings + if 'host' not in kwargs: + return self._errors['CMMVC5707E'] + host = kwargs['host'].strip('\'\'') + + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol = kwargs['obj'].strip('\'\'') + + mapping_ids = [] + for v in self._mappings_list.itervalues(): + if v['vol'] == vol: + mapping_ids.append(v['id']) + if not mapping_ids: + return self._errors['CMMVC5753E'] + + this_mapping = None + for mapping_id in mapping_ids: + if self._mappings_list[mapping_id]['host'] == host: + this_mapping = mapping_id + if this_mapping is None: + return self._errors['CMMVC5753E'] + + del self._mappings_list[this_mapping] + return ('', '') + + # List information about host->vdisk mappings def _cmd_lshostvdiskmap(self, **kwargs): - index = 1 - no_hdr = 0 - delimeter = "" - host_name = kwargs["obj"] + host_name = kwargs['obj'] if host_name not in self._hosts_list: - return self._errors["CMMVC5754E"] + return self._errors['CMMVC5754E'] rows = [] - rows.append(["id", "name", "SCSI_id", "vdisk_id", "vdisk_name", - "vdisk_UID"]) + rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name', + 'vdisk_UID']) - for k, mapping in self._mappings_list.iteritems(): - if (host_name == "") or (mapping["host"] == host_name): - volume = self._volumes_list[mapping["vol"]] - rows.append([mapping["id"], mapping["host"], - mapping["lun"], volume["id"], - volume["name"], volume["uid"]]) + for mapping in self._mappings_list.itervalues(): + if (host_name == '') or (mapping['host'] == host_name): + volume = self._volumes_list[mapping['vol']] + rows.append([mapping['id'], mapping['host'], + mapping['lun'], volume['id'], + volume['name'], volume['uid']]) return self._print_info_cmd(rows=rows, **kwargs) - # Create a FlashCopy mapping - def _cmd_mkfcmap(self, **kwargs): - source = "" - target = "" + # List information about vdisk->host mappings + def _cmd_lsvdiskhostmap(self, **kwargs): + mappings_found = 0 + vdisk_name = kwargs['obj'] - if "source" not in kwargs: - return self._errors["CMMVC5707E"] - source = kwargs["source"].strip('\'\"') - if not source in self._volumes_list: - return self._errors["CMMVC5754E"] + if vdisk_name not in self._volumes_list: + return self._errors['CMMVC5753E'] - if "target" not in kwargs: - return self._errors["CMMVC5707E"] - target = kwargs["target"].strip('\'\"') - if not target in self._volumes_list: - return self._errors["CMMVC5754E"] + rows = [] + rows.append(['id name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID', + 'IO_group_id', 'IO_group_name']) + + for mapping in self._mappings_list.itervalues(): + if (mapping['vol'] == vdisk_name): + mappings_found += 1 + volume = self._volumes_list[mapping['vol']] + host = self._hosts_list[mapping['host']] + rows.append([volume['id'], volume['name'], host['id'], + host['host_name'], volume['uid'], + volume['IO_group_id'], volume['IO_group_name']]) + + if mappings_found: + return self._print_info_cmd(rows=rows, **kwargs) + else: + return ('', '') + + # Create a FlashCopy mapping + def _cmd_mkfcmap(self, **kwargs): + source = '' + target = '' + copyrate = kwargs['copyrate'] if 'copyrate' in kwargs else '50' + + if 'source' not in kwargs: + return self._errors['CMMVC5707E'] + source = kwargs['source'].strip('\'\'') + if source not in self._volumes_list: + return self._errors['CMMVC5754E'] + + if 'target' not in kwargs: + return self._errors['CMMVC5707E'] + target = kwargs['target'].strip('\'\'') + if target not in self._volumes_list: + return self._errors['CMMVC5754E'] if source == target: - return self._errors["CMMVC6303E"] + return self._errors['CMMVC6303E'] - if (self._volumes_list[source]["cap_bytes"] != - self._volumes_list[target]["cap_bytes"]): - return self._errors["CMMVC5924E"] + if (self._volumes_list[source]['capacity'] != + self._volumes_list[target]['capacity']): + return self._errors['CMMVC5924E'] fcmap_info = {} - fcmap_info["source"] = source - fcmap_info["target"] = target - fcmap_info["id"] = self._find_unused_id(self._fcmappings_list) - fcmap_info["name"] = "fcmap" + fcmap_info["id"] - fcmap_info["status"] = "idle_or_copied" - fcmap_info["progress"] = "0" - self._fcmappings_list[target] = fcmap_info - - return("FlashCopy Mapping, id [" + fcmap_info["id"] + - "], successfully created", "") - - # Same function used for both prestartfcmap and startfcmap - def _cmd_gen_startfcmap(self, mode, **kwargs): - if "obj" not in kwargs: - return self._errors["CMMVC5701E"] - id_num = kwargs["obj"] - - if mode == "pre": - if self._next_cmd_error["prestartfcmap"] == "bad_id": - id_num = -1 - self._next_cmd_error["prestartfcmap"] = "" - else: - if self._next_cmd_error["startfcmap"] == "bad_id": - id_num = -1 - self._next_cmd_error["startfcmap"] = "" - - for k, fcmap in self._fcmappings_list.iteritems(): - if fcmap["id"] == id_num: - if mode == "pre": - fcmap["status"] = "preparing" - else: - fcmap["status"] = "copying" - fcmap["progress"] = "0" - return ("", "") - return self._errors["CMMVC5753E"] - - # Same function used for both stopfcmap and rmfcmap - # Assumes it is called with "-force " - def _cmd_stoprmfcmap(self, mode, **kwargs): - if "obj" not in kwargs: - return self._errors["CMMVC5701E"] - id_num = kwargs["obj"] - - if self._next_cmd_error["rmfcmap"] == "bad_id": + fcmap_info['source'] = source + fcmap_info['target'] = target + fcmap_info['id'] = self._find_unused_id(self._fcmappings_list) + fcmap_info['name'] = 'fcmap' + fcmap_info['id'] + fcmap_info['copyrate'] = copyrate + fcmap_info['progress'] = '0' + fcmap_info['autodelete'] = True if 'autodelete' in kwargs else False + fcmap_info['status'] = 'idle_or_copied' + self._fcmappings_list[fcmap_info['id']] = fcmap_info + + return('FlashCopy Mapping, id [' + fcmap_info['id'] + + '], successfully created', '') + + def _cmd_gen_prestartfcmap(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + id_num = kwargs['obj'] + + if self._next_cmd_error['prestartfcmap'] == 'bad_id': id_num = -1 - self._next_cmd_error["rmfcmap"] = "" + self._next_cmd_error['prestartfcmap'] = '' - to_delete = None - found = False - for k, fcmap in self._fcmappings_list.iteritems(): - if fcmap["id"] == id_num: - found = True - if mode == "rm": - to_delete = k + try: + fcmap = self._fcmappings_list[id_num] + except KeyError: + return self._errors['CMMVC5753E'] - if to_delete: - del self._fcmappings_list[to_delete] + return self._state_transition('prepare', fcmap) - if found: - return ("", "") - else: - return self._errors["CMMVC5753E"] + def _cmd_gen_startfcmap(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + id_num = kwargs['obj'] + + if self._next_cmd_error['startfcmap'] == 'bad_id': + id_num = -1 + self._next_cmd_error['startfcmap'] = '' + + try: + fcmap = self._fcmappings_list[id_num] + except KeyError: + return self._errors['CMMVC5753E'] + + return self._state_transition('start', fcmap) + + def _cmd_stopfcmap(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + id_num = kwargs['obj'] + + try: + fcmap = self._fcmappings_list[id_num] + except KeyError: + return self._errors['CMMVC5753E'] + + return self._state_transition('stop', fcmap) + + def _cmd_rmfcmap(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + id_num = kwargs['obj'] + force = True if 'force' in kwargs else False + + if self._next_cmd_error['rmfcmap'] == 'bad_id': + id_num = -1 + self._next_cmd_error['rmfcmap'] = '' + + try: + fcmap = self._fcmappings_list[id_num] + except KeyError: + return self._errors['CMMVC5753E'] + + function = 'delete_force' if force else 'delete' + ret = self._state_transition(function, fcmap) + if fcmap['status'] == 'end': + del self._fcmappings_list[id_num] + return ret + + def _cmd_lsvdiskfcmappings(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5707E'] + vdisk = kwargs['obj'] + rows = [] + rows.append(['id', 'name']) + for v in self._fcmappings_list.itervalues(): + if v['source'] == vdisk or v['target'] == vdisk: + rows.append([v['id'], v['name']]) + return self._print_info_cmd(rows=rows, **kwargs) + + def _cmd_chfcmap(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5707E'] + id_num = kwargs['obj'] + + try: + fcmap = self._fcmappings_list[id_num] + except KeyError: + return self._errors['CMMVC5753E'] + + for key in ['name', 'copyrate', 'autodelete']: + if key in kwargs: + fcmap[key] = kwargs[key] + return ('', '') def _cmd_lsfcmap(self, **kwargs): rows = [] - rows.append(["id", "name", "source_vdisk_id", "source_vdisk_name", - "target_vdisk_id", "target_vdisk_name", "group_id", - "group_name", "status", "progress", "copy_rate", - "clean_progress", "incremental", "partner_FC_id", - "partner_FC_name", "restoring", "start_time", - "rc_controlled"]) + rows.append(['id', 'name', 'source_vdisk_id', 'source_vdisk_name', + 'target_vdisk_id', 'target_vdisk_name', 'group_id', + 'group_name', 'status', 'progress', 'copy_rate', + 'clean_progress', 'incremental', 'partner_FC_id', + 'partner_FC_name', 'restoring', 'start_time', + 'rc_controlled']) # Assume we always get a filtervalue argument - filter_key = kwargs["filtervalue"].split("=")[0] - filter_value = kwargs["filtervalue"].split("=")[1] + filter_key = kwargs['filtervalue'].split('=')[0] + filter_value = kwargs['filtervalue'].split('=')[1] to_delete = [] for k, v in self._fcmappings_list.iteritems(): if str(v[filter_key]) == filter_value: - source = self._volumes_list[v["source"]] - target = self._volumes_list[v["target"]] - old_status = v["status"] - if old_status == "preparing": - new_status = "prepared" - if self._next_cmd_error["lsfcmap"] == "bogus_prepare": - new_status = "bogus" - elif (old_status == "copying") and (v["progress"] == "0"): - new_status = "copying" - v["progress"] = "50" - elif (old_status == "copying") and (v["progress"] == "50"): - new_status = "idle_or_copied" + source = self._volumes_list[v['source']] + target = self._volumes_list[v['target']] + self._state_transition('wait', v) + + if self._next_cmd_error['lsfcmap'] == 'speed_up': + self._next_cmd_error['lsfcmap'] = '' + curr_state = v['status'] + while self._state_transition('wait', v) == ("", ""): + if curr_state == v['status']: + break + curr_state = v['status'] + + if ((v['status'] == 'idle_or_copied' and v['autodelete'] and + v['progress'] == '100') or (v['status'] == 'end')): to_delete.append(k) else: - new_status = old_status - v["status"] = new_status - - if ((self._next_cmd_error["lsfcmap"] == "speed_up") or - (self._next_cmd_error["lsfcmap"] == "bogus_prepare")): - print_status = new_status - self._next_cmd_error["lsfcmap"] = "" - else: - print_status = old_status - - rows.append([v["id"], v["name"], source["id"], - source["name"], target["id"], target["name"], "", - "", print_status, v["progress"], "50", "100", - "off", "", "", "no", "", "no"]) + rows.append([v['id'], v['name'], source['id'], + source['name'], target['id'], target['name'], + '', '', v['status'], v['progress'], + v['copyrate'], '100', 'off', '', '', 'no', '', + 'no']) for d in to_delete: - del self._fcmappings_list[k] + del self._fcmappings_list[d] return self._print_info_cmd(rows=rows, **kwargs) + def _cmd_migratevdisk(self, **kwargs): + if 'mdiskgrp' not in kwargs or 'vdisk' not in kwargs: + return self._errors['CMMVC5707E'] + mdiskgrp = kwargs['mdiskgrp'].strip('\'\'') + vdisk = kwargs['vdisk'].strip('\'\'') + + if vdisk in self._volumes_list: + curr_mdiskgrp = self._volumes_list + else: + for pool in self._other_pools: + if vdisk in pool: + curr_mdiskgrp = pool + break + else: + return self._errors['CMMVC5754E'] + + if mdiskgrp == self._flags['storwize_svc_volpool_name']: + tgt_mdiskgrp = self._volumes_list + elif mdiskgrp == 'openstack2': + tgt_mdiskgrp = self._other_pools['openstack2'] + elif mdiskgrp == 'openstack3': + tgt_mdiskgrp = self._other_pools['openstack3'] + else: + return self._errors['CMMVC5754E'] + + if curr_mdiskgrp == tgt_mdiskgrp: + return self._errors['CMMVC6430E'] + + vol = curr_mdiskgrp[vdisk] + tgt_mdiskgrp[vdisk] = vol + del curr_mdiskgrp[vdisk] + return ('', '') + + def _cmd_addvdiskcopy(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol_name = kwargs['obj'].strip('\'\'') + if vol_name not in self._volumes_list: + return self._errors['CMMVC5753E'] + vol = self._volumes_list[vol_name] + if 'mdiskgrp' not in kwargs: + return self._errors['CMMVC5707E'] + mdiskgrp = kwargs['mdiskgrp'].strip('\'\'') + + copy_info = {} + copy_info['id'] = self._find_unused_id(vol['copies']) + copy_info['status'] = 'online' + copy_info['sync'] = 'no' + copy_info['primary'] = 'no' + copy_info['mdisk_grp_name'] = mdiskgrp + if mdiskgrp == self._flags['storwize_svc_volpool_name']: + copy_info['mdisk_grp_id'] = '1' + elif mdiskgrp == 'openstack2': + copy_info['mdisk_grp_id'] = '2' + elif mdiskgrp == 'openstack3': + copy_info['mdisk_grp_id'] = '3' + if 'easytier' in kwargs: + if kwargs['easytier'] == 'on': + copy_info['easy_tier'] = 'on' + else: + copy_info['easy_tier'] = 'off' + if 'rsize' in kwargs: + if 'compressed' in kwargs: + copy_info['compressed_copy'] = 'yes' + else: + copy_info['compressed_copy'] = 'no' + vol['copies'][copy_info['id']] = copy_info + return ('Vdisk [%(vid)s] copy [%(cid)s] successfully created' % + {'vid': vol['id'], 'cid': copy_info['id']}, '') + + def _cmd_lsvdiskcopy(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5804E'] + name = kwargs['obj'] + vol = self._volumes_list[name] + rows = [] + rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'status', 'sync', + 'primary', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity', + 'type', 'se_copy', 'easy_tier', 'easy_tier_status', + 'compressed_copy']) + for copy in vol['copies'].itervalues(): + rows.append([vol['id'], vol['name'], copy['id'], + copy['status'], copy['sync'], copy['primary'], + copy['mdisk_grp_id'], copy['mdisk_grp_name'], + vol['capacity'], 'striped', 'yes', copy['easy_tier'], + 'inactive', copy['compressed_copy']]) + if 'copy' not in kwargs: + return self._print_info_cmd(rows=rows, **kwargs) + else: + copy_id = kwargs['copy'].strip('\'\'') + if copy_id not in vol['copies']: + return self._errors['CMMVC6353E'] + copy = vol['copies'][copy_id] + rows = [] + rows.append(['vdisk_id', vol['id']]) + rows.append(['vdisk_name', vol['name']]) + rows.append(['capacity', vol['capacity']]) + rows.append(['copy_id', copy['id']]) + rows.append(['status', copy['status']]) + rows.append(['sync', copy['sync']]) + copy['sync'] = 'yes' + rows.append(['primary', copy['primary']]) + rows.append(['mdisk_grp_id', copy['mdisk_grp_id']]) + rows.append(['mdisk_grp_name', copy['mdisk_grp_name']]) + rows.append(['easy_tier', copy['easy_tier']]) + rows.append(['easy_tier_status', 'inactive']) + rows.append(['compressed_copy', copy['compressed_copy']]) + + if 'delim' in kwargs: + for index in range(len(rows)): + rows[index] = kwargs['delim'].join(rows[index]) + + return ('%s' % '\n'.join(rows), '') + + def _cmd_rmvdiskcopy(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol_name = kwargs['obj'].strip('\'\'') + if 'copy' not in kwargs: + return self._errors['CMMVC5707E'] + copy_id = kwargs['copy'].strip('\'\'') + if vol_name not in self._volumes_list: + return self._errors['CMMVC5753E'] + vol = self._volumes_list[vol_name] + if copy_id not in vol['copies']: + return self._errors['CMMVC6353E'] + del vol['copies'][copy_id] + + return ('', '') + + def _cmd_chvdisk(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol_name = kwargs['obj'].strip('\'\'') + vol = self._volumes_list[vol_name] + kwargs.pop('obj') + + params = ['name', 'warning', 'udid', 'autoexpand', 'easytier'] + for key, value in kwargs.iteritems(): + if key == 'easytier': + vol['easy_tier'] = value + continue + if key == 'warning': + vol['warning'] = value.rstrip('%') + continue + if key in params: + vol[key] = value + else: + err = self._errors['CMMVC5709E'][1] % {'VALUE': key} + return ('', err) + return ('', '') + + def _cmd_movevdisk(self, **kwargs): + if 'obj' not in kwargs: + return self._errors['CMMVC5701E'] + vol_name = kwargs['obj'].strip('\'\'') + vol = self._volumes_list[vol_name] + + if 'iogrp' not in kwargs: + return self._errors['CMMVC5707E'] + + iogrp = kwargs['iogrp'] + if iogrp.isdigit(): + vol['IO_group_id'] = iogrp + vol['IO_group_name'] = 'io_grp%s' % iogrp + else: + vol['IO_group_id'] = iogrp[6:] + vol['IO_group_name'] = iogrp + return ('', '') + + def _add_host_to_list(self, connector): + host_info = {} + host_info['id'] = self._find_unused_id(self._hosts_list) + host_info['host_name'] = connector['host'] + host_info['iscsi_names'] = [] + host_info['wwpns'] = [] + if 'initiator' in connector: + host_info['iscsi_names'].append(connector['initiator']) + if 'wwpns' in connector: + host_info['wwpns'] = host_info['wwpns'] + connector['wwpns'] + self._hosts_list[connector['host']] = host_info + + def _host_in_list(self, host_name): + for k in self._hosts_list: + if k.startswith(host_name): + return k + return None + # The main function to run commands on the management simulator def execute_command(self, cmd, check_exit_code=True): try: kwargs = self._cmd_to_dict(cmd) except IndexError: - return self._errors["CMMVC5707E"] + return self._errors['CMMVC5707E'] - command = kwargs["cmd"] - del kwargs["cmd"] - arg_list = cmd.split() + command = kwargs['cmd'] + del kwargs['cmd'] - if command == "lsmdiskgrp": + if command == 'lsmdiskgrp': out, err = self._cmd_lsmdiskgrp(**kwargs) - elif command == "lsnodecanister": + elif command == 'lslicense': + out, err = self._cmd_lslicense(**kwargs) + elif command == 'lssystem': + out, err = self._cmd_lssystem(**kwargs) + elif command == 'lsnodecanister': out, err = self._cmd_lsnodecanister(**kwargs) - elif command == "lsportip": + elif command == 'lsnode': + out, err = self._cmd_lsnode(**kwargs) + elif command == 'lsportip': out, err = self._cmd_lsportip(**kwargs) - elif command == "mkvdisk": + elif command == 'lsfabric': + out, err = self._cmd_lsfabric(**kwargs) + elif command == 'mkvdisk': out, err = self._cmd_mkvdisk(**kwargs) - elif command == "rmvdisk": + elif command == 'rmvdisk': out, err = self._cmd_rmvdisk(**kwargs) - elif command == "lsvdisk": + elif command == 'expandvdisksize': + out, err = self._cmd_expandvdisksize(**kwargs) + elif command == 'lsvdisk': out, err = self._cmd_lsvdisk(**kwargs) - elif command == "mkhost": + elif command == 'lsiogrp': + out, err = self._cmd_lsiogrp(**kwargs) + elif command == 'mkhost': out, err = self._cmd_mkhost(**kwargs) - elif command == "rmhost": + elif command == 'addhostport': + out, err = self._cmd_addhostport(**kwargs) + elif command == 'chhost': + out, err = self._cmd_chhost(**kwargs) + elif command == 'rmhost': out, err = self._cmd_rmhost(**kwargs) - elif command == "lshost": + elif command == 'lshost': out, err = self._cmd_lshost(**kwargs) - elif command == "mkvdiskhostmap": + elif command == 'lsiscsiauth': + out, err = self._cmd_lsiscsiauth(**kwargs) + elif command == 'mkvdiskhostmap': out, err = self._cmd_mkvdiskhostmap(**kwargs) - elif command == "rmvdiskhostmap": + elif command == 'rmvdiskhostmap': out, err = self._cmd_rmvdiskhostmap(**kwargs) - elif command == "lshostvdiskmap": + elif command == 'lshostvdiskmap': out, err = self._cmd_lshostvdiskmap(**kwargs) - elif command == "mkfcmap": + elif command == 'lsvdiskhostmap': + out, err = self._cmd_lsvdiskhostmap(**kwargs) + elif command == 'mkfcmap': out, err = self._cmd_mkfcmap(**kwargs) - elif command == "prestartfcmap": - out, err = self._cmd_gen_startfcmap(mode="pre", **kwargs) - elif command == "startfcmap": - out, err = self._cmd_gen_startfcmap(mode="start", **kwargs) - elif command == "stopfcmap": - out, err = self._cmd_stoprmfcmap(mode="stop", **kwargs) - elif command == "rmfcmap": - out, err = self._cmd_stoprmfcmap(mode="rm", **kwargs) - elif command == "lsfcmap": + elif command == 'prestartfcmap': + out, err = self._cmd_gen_prestartfcmap(**kwargs) + elif command == 'startfcmap': + out, err = self._cmd_gen_startfcmap(**kwargs) + elif command == 'stopfcmap': + out, err = self._cmd_stopfcmap(**kwargs) + elif command == 'rmfcmap': + out, err = self._cmd_rmfcmap(**kwargs) + elif command == 'chfcmap': + out, err = self._cmd_chfcmap(**kwargs) + elif command == 'lsfcmap': out, err = self._cmd_lsfcmap(**kwargs) + elif command == 'lsvdiskfcmappings': + out, err = self._cmd_lsvdiskfcmappings(**kwargs) + elif command == 'migratevdisk': + out, err = self._cmd_migratevdisk(**kwargs) + elif command == 'addvdiskcopy': + out, err = self._cmd_addvdiskcopy(**kwargs) + elif command == 'lsvdiskcopy': + out, err = self._cmd_lsvdiskcopy(**kwargs) + elif command == 'rmvdiskcopy': + out, err = self._cmd_rmvdiskcopy(**kwargs) + elif command == 'chvdisk': + out, err = self._cmd_chvdisk(**kwargs) + elif command == 'movevdisk': + out, err = self._cmd_movevdisk(**kwargs) else: - out, err = ("", "ERROR: Unsupported command") + out, err = ('', 'ERROR: Unsupported command') if (check_exit_code) and (len(err) != 0): - raise exception.ProcessExecutionError(exit_code=1, - stdout=out, - stderr=err, - cmd=' '.join(cmd)) + raise processutils.ProcessExecutionError(exit_code=1, + stdout=out, + stderr=err, + cmd=' '.join(cmd)) return (out, err) @@ -895,19 +1502,19 @@ def __init__(self, *args, **kwargs): def set_fake_storage(self, fake): self.fake_storage = fake - def _run_ssh(self, cmd, check_exit_code=True): + def _run_ssh(self, cmd, check_exit_code=True, attempts=1): try: LOG.debug(_('Run CLI command: %s') % cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) (stdout, stderr) = ret - LOG.debug(_('CLI output:\n stdout: %(out)s\n stderr: %(err)s') % - {'out': stdout, 'err': stderr}) + LOG.debug(_('CLI output:\n stdout: %(stdout)s\n stderr: ' + '%(stderr)s') % {'stdout': stdout, 'stderr': stderr}) - except exception.ProcessExecutionError as e: + except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.debug(_('CLI Exception output:\n stdout: %(out)s\n ' 'stderr: %(err)s') % {'out': e.stdout, - 'err': e.stderr}) + 'err': e.stderr}) return ret @@ -915,250 +1522,295 @@ def _run_ssh(self, cmd, check_exit_code=True): class StorwizeSVCDriverTestCase(test.TestCase): def setUp(self): super(StorwizeSVCDriverTestCase, self).setUp() - self.USESIM = 1 - if self.USESIM == 1: - self.flags( - san_ip="hostname", - san_login="user", - san_password="pass", - storwize_svc_flashcopy_timeout="20", - ) - self.sim = StorwizeSVCManagementSimulator("volpool") - self.driver = StorwizeSVCFakeDriver() + self.USESIM = True + if self.USESIM: + self.driver = StorwizeSVCFakeDriver( + configuration=conf.Configuration(None)) + self._def_flags = {'san_ip': 'hostname', + 'san_login': 'user', + 'san_password': 'pass', + 'storwize_svc_volpool_name': 'openstack', + 'storwize_svc_flashcopy_timeout': 20, + # Test ignore capitalization + 'storwize_svc_connection_protocol': 'iScSi', + 'storwize_svc_multipath_enabled': False} + wwpns = [str(random.randint(0, 9999999999999999)).zfill(16), + str(random.randint(0, 9999999999999999)).zfill(16)] + initiator = 'test.initiator.%s' % str(random.randint(10000, 99999)) + self._connector = {'ip': '1.234.56.78', + 'host': 'storwize-svc-test', + 'wwpns': wwpns, + 'initiator': initiator} + self.sim = StorwizeSVCManagementSimulator('openstack') + self.driver.set_fake_storage(self.sim) else: - self.flags( - san_ip="-1.-1.-1.-1", - san_login="user", - san_password="password", - storwize_svc_volpool_name="pool", - ) - self.driver = storwize_svc.StorwizeSVCDriver() - + self.driver = storwize_svc.StorwizeSVCDriver( + configuration=conf.Configuration(None)) + self._def_flags = {'san_ip': '1.111.11.11', + 'san_login': 'user', + 'san_password': 'password', + 'storwize_svc_volpool_name': 'openstack', + # Test ignore capitalization + 'storwize_svc_connection_protocol': 'iScSi', + 'storwize_svc_multipath_enabled': False, + 'ssh_conn_timeout': 0} + config_group = self.driver.configuration.config_group + self.driver.configuration.set_override('rootwrap_config', + '/etc/cinder/rootwrap.conf', + config_group) + self._connector = utils.brick_get_connector_properties() + + self._reset_flags() + self.driver.db = StorwizeSVCFakeDB() self.driver.do_setup(None) self.driver.check_for_setup_error() - self.stubs.Set(storwize_svc.time, 'sleep', lambda s: None) - - def test_storwize_svc_volume_tests(self): - self.flags(storwize_svc_vol_rsize="-1") - volume = {} - volume["name"] = "test1_volume%s" % random.randint(10000, 99999) - volume["size"] = 10 - volume["id"] = 1 - self.driver.create_volume(volume) - # Make sure that the volume has been created - is_volume_defined = self.driver._is_volume_defined(volume["name"]) - self.assertEqual(is_volume_defined, True) - self.driver.delete_volume(volume) + self.sleeppatch = mock.patch('eventlet.greenthread.sleep') + self.sleeppatch.start() + self.driver._helpers.check_fcmapping_interval = 0 - if self.USESIM == 1: - self.flags(storwize_svc_vol_rsize="2%") - self.flags(storwize_svc_vol_compression=True) - self.driver.create_volume(volume) - is_volume_defined = self.driver._is_volume_defined(volume["name"]) - self.assertEqual(is_volume_defined, True) - self.driver.delete_volume(volume) - FLAGS.reset() + def tearDown(self): + if self.USESIM: + self.sleeppatch.stop() + super(StorwizeSVCDriverTestCase, self).tearDown() - def test_storwize_svc_ip_connectivity(self): - # Check for missing san_ip - self.flags(san_ip=None) - self.assertRaises(exception.InvalidInput, - self.driver._check_flags) + def _set_flag(self, flag, value): + group = self.driver.configuration.config_group + self.driver.configuration.set_override(flag, value, group) - if self.USESIM != 1: - # Check for invalid ip - self.flags(san_ip="-1.-1.-1.-1") - self.assertRaises(socket.gaierror, - self.driver.check_for_setup_error) + def _reset_flags(self): + self.driver.configuration.local_conf.reset() + for k, v in self._def_flags.iteritems(): + self._set_flag(k, v) - # Check for unreachable IP - self.flags(san_ip="1.1.1.1") - self.assertRaises(socket.error, - self.driver.check_for_setup_error) + def _assert_vol_exists(self, name, exists): + is_vol_defined = self.driver._helpers.is_vdisk_defined(name) + self.assertEqual(is_vol_defined, exists) def test_storwize_svc_connectivity(self): # Make sure we detect if the pool doesn't exist - no_exist_pool = "i-dont-exist-%s" % random.randint(10000, 99999) - self.flags(storwize_svc_volpool_name=no_exist_pool) + no_exist_pool = 'i-dont-exist-%s' % random.randint(10000, 99999) + self._set_flag('storwize_svc_volpool_name', no_exist_pool) self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - FLAGS.reset() + self.driver.do_setup, None) + self._reset_flags() # Check the case where the user didn't configure IP addresses # as well as receiving unexpected results from the storage - if self.USESIM == 1: - self.sim.error_injection("lsnodecanister", "header_mismatch") + if self.USESIM: + self.sim.error_injection('lsnodecanister', 'header_mismatch') self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - self.sim.error_injection("lsnodecanister", "remove_field") + self.driver.do_setup, None) + self.sim.error_injection('lsnodecanister', 'remove_field') self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - self.sim.error_injection("lsportip", "ip_no_config") + self.driver.do_setup, None) + self.sim.error_injection('lsportip', 'header_mismatch') self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - self.sim.error_injection("lsportip", "header_mismatch") + self.driver.do_setup, None) + self.sim.error_injection('lsportip', 'remove_field') self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - self.sim.error_injection("lsportip", "remove_field") - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) + self.driver.do_setup, None) # Check with bad parameters - self.flags(san_password=None) - self.flags(san_private_key=None) + self._set_flag('san_ip', '') + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + self._set_flag('san_password', None) + self._set_flag('san_private_key', None) + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() + + self._set_flag('storwize_svc_vol_rsize', 101) self.assertRaises(exception.InvalidInput, - self.driver._check_flags) - FLAGS.reset() + self.driver.check_for_setup_error) + self._reset_flags() - self.flags(storwize_svc_vol_rsize="invalid") + self._set_flag('storwize_svc_vol_warning', 101) self.assertRaises(exception.InvalidInput, - self.driver._check_flags) - FLAGS.reset() + self.driver.check_for_setup_error) + self._reset_flags() - self.flags(storwize_svc_vol_warning="invalid") + self._set_flag('storwize_svc_vol_grainsize', 42) self.assertRaises(exception.InvalidInput, - self.driver._check_flags) - FLAGS.reset() + self.driver.check_for_setup_error) + self._reset_flags() - self.flags(storwize_svc_vol_autoexpand="invalid") + self._set_flag('storwize_svc_flashcopy_timeout', 601) self.assertRaises(exception.InvalidInput, - self.driver._check_flags) - FLAGS.reset() + self.driver.check_for_setup_error) + self._reset_flags() - self.flags(storwize_svc_vol_grainsize=str(42)) + self._set_flag('storwize_svc_vol_compression', True) + self._set_flag('storwize_svc_vol_rsize', -1) self.assertRaises(exception.InvalidInput, - self.driver._check_flags) - FLAGS.reset() + self.driver.check_for_setup_error) + self._reset_flags() - self.flags(storwize_svc_flashcopy_timeout=str(601)) + self._set_flag('storwize_svc_connection_protocol', 'foo') self.assertRaises(exception.InvalidInput, - self.driver._check_flags) - FLAGS.reset() + self.driver.check_for_setup_error) + self._reset_flags() - self.flags(storwize_svc_vol_compression=True) - self.flags(storwize_svc_vol_rsize="-1") + self._set_flag('storwize_svc_vol_iogrp', 5) self.assertRaises(exception.InvalidInput, - self.driver._check_flags) - FLAGS.reset() + self.driver.check_for_setup_error) + self._reset_flags() + + if self.USESIM: + self.sim.error_injection('lslicense', 'no_compression') + self._set_flag('storwize_svc_vol_compression', True) + self.driver.do_setup(None) + self.assertRaises(exception.InvalidInput, + self.driver.check_for_setup_error) + self._reset_flags() # Finally, check with good parameters - self.driver.check_for_setup_error() + self.driver.do_setup(None) - def test_storwize_svc_flashcopy(self): - volume1 = {} - volume1["name"] = "test1_volume%s" % random.randint(10000, 99999) - volume1["size"] = 10 - volume1["id"] = 10 - self.driver.create_volume(volume1) + def _generate_vol_info(self, vol_name, vol_id): + rand_id = str(random.randint(10000, 99999)) + if vol_name: + return {'name': 'snap_volume%s' % rand_id, + 'volume_name': vol_name, + 'id': rand_id, + 'volume_id': vol_id, + 'volume_size': 10, + 'mdisk_grp_name': 'openstack'} + else: + return {'name': 'test_volume%s' % rand_id, + 'size': 10, + 'id': '%s' % rand_id, + 'volume_type_id': None, + 'mdisk_grp_name': 'openstack'} + + def _create_test_vol(self, opts): + ctxt = context.get_admin_context() + type_ref = volume_types.create(ctxt, 'testtype', opts) + volume = self._generate_vol_info(None, None) + type_id = type_ref['id'] + type_ref = volume_types.get_volume_type(ctxt, type_id) + volume['volume_type_id'] = type_id + volume['volume_type'] = type_ref + self.driver.create_volume(volume) + + attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) + self.driver.delete_volume(volume) + volume_types.destroy(ctxt, type_ref['id']) + return attrs - snapshot = {} - snapshot["name"] = "snap_volume%s" % random.randint(10000, 99999) - snapshot["volume_name"] = volume1["name"] + def test_storwize_svc_snapshots(self): + vol1 = self._generate_vol_info(None, None) + self.driver.create_volume(vol1) + self.driver.db.volume_set(vol1) + snap1 = self._generate_vol_info(vol1['name'], vol1['id']) # Test timeout and volume cleanup - self.flags(storwize_svc_flashcopy_timeout=str(1)) - self.assertRaises(exception.InvalidSnapshot, - self.driver.create_snapshot, snapshot) - is_volume_defined = self.driver._is_volume_defined(snapshot["name"]) - self.assertEqual(is_volume_defined, False) - FLAGS.reset() - - # Test bogus statuses - if self.USESIM == 1: - self.sim.error_injection("lsfcmap", "bogus_prepare") + self._set_flag('storwize_svc_flashcopy_timeout', 1) + self.assertRaises(exception.VolumeDriverException, + self.driver.create_snapshot, snap1) + self._assert_vol_exists(snap1['name'], False) + self._reset_flags() + + # Test prestartfcmap failing + with mock.patch.object(ssh.StorwizeSSH, 'prestartfcmap') as prestart: + prestart.side_effect = exception.VolumeBackendAPIException self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, snapshot) - - # Test prestartfcmap, startfcmap, and rmfcmap failing - if self.USESIM == 1: - self.sim.error_injection("prestartfcmap", "bad_id") - self.assertRaises(exception.ProcessExecutionError, - self.driver.create_snapshot, snapshot) - self.sim.error_injection("lsfcmap", "speed_up") - self.sim.error_injection("startfcmap", "bad_id") - self.assertRaises(exception.ProcessExecutionError, - self.driver.create_snapshot, snapshot) - self.sim.error_injection("prestartfcmap", "bad_id") - self.sim.error_injection("rmfcmap", "bad_id") - self.assertRaises(exception.ProcessExecutionError, - self.driver.create_snapshot, snapshot) + self.driver.create_snapshot, snap1) - # Test successful snapshot - self.driver.create_snapshot(snapshot) + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.sim.error_injection('startfcmap', 'bad_id') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, snap1) + self._assert_vol_exists(snap1['name'], False) + self.sim.error_injection('prestartfcmap', 'bad_id') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, snap1) + self._assert_vol_exists(snap1['name'], False) - # Ensure snapshot is defined - is_volume_defined = self.driver._is_volume_defined(snapshot["name"]) - self.assertEqual(is_volume_defined, True) + # Test successful snapshot + self.driver.create_snapshot(snap1) + self._assert_vol_exists(snap1['name'], True) # Try to create a snapshot from an non-existing volume - should fail - snapshot2 = {} - snapshot2["name"] = "snap_volume%s" % random.randint(10000, 99999) - snapshot2["volume_name"] = "undefined-vol" - self.assertRaises(exception.VolumeNotFound, + snap_novol = self._generate_vol_info('undefined-vol', '12345') + self.assertRaises(exception.VolumeDriverException, self.driver.create_snapshot, - snapshot2) - - # Create volume from snapshot - volume2 = {} - volume2["name"] = "snap2vol_volume%s" % random.randint(10000, 99999) - - # Create volume from snapshot into an existsing volume - self.assertRaises(exception.InvalidSnapshot, - self.driver.create_volume_from_snapshot, - volume1, - snapshot) + snap_novol) + + # We support deleting a volume that has snapshots, so delete the volume + # first + self.driver.delete_volume(vol1) + self.driver.delete_snapshot(snap1) + + def test_storwize_svc_create_volfromsnap_clone(self): + vol1 = self._generate_vol_info(None, None) + self.driver.create_volume(vol1) + self.driver.db.volume_set(vol1) + snap1 = self._generate_vol_info(vol1['name'], vol1['id']) + self.driver.create_snapshot(snap1) + vol2 = self._generate_vol_info(None, None) + vol3 = self._generate_vol_info(None, None) # Try to create a volume from a non-existing snapshot - self.assertRaises(exception.SnapshotNotFound, + snap_novol = self._generate_vol_info('undefined-vol', '12345') + vol_novol = self._generate_vol_info(None, None) + self.assertRaises(exception.VolumeDriverException, self.driver.create_volume_from_snapshot, - volume2, - snapshot2) + vol_novol, + snap_novol) # Fail the snapshot - if self.USESIM == 1: - self.sim.error_injection("prestartfcmap", "bad_id") - self.assertRaises(exception.ProcessExecutionError, - self.driver.create_volume_from_snapshot, volume2, snapshot) + with mock.patch.object(ssh.StorwizeSSH, 'prestartfcmap') as prestart: + prestart.side_effect = exception.VolumeBackendAPIException + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + vol2, snap1) + self._assert_vol_exists(vol2['name'], False) + + # Try to create where source size != target size + vol2['size'] += 1 + self.assertRaises(exception.VolumeDriverException, + self.driver.create_volume_from_snapshot, + vol2, snap1) + self._assert_vol_exists(vol2['name'], False) + vol2['size'] -= 1 # Succeed - if self.USESIM == 1: - self.sim.error_injection("lsfcmap", "speed_up") - self.driver.create_volume_from_snapshot(volume2, snapshot) - - # Ensure volume is defined - is_volume_defined = self.driver._is_volume_defined(volume2["name"]) - self.assertEqual(is_volume_defined, True) - - self.driver._delete_volume(volume2, True) - self.driver._delete_snapshot(snapshot, True) - - # Check with target with different size - volume3 = {} - volume3["name"] = "test3_volume%s" % random.randint(10000, 99999) - volume3["size"] = 11 - volume3["id"] = 11 - self.driver.create_volume(volume3) - snapshot["name"] = volume3["name"] - self.assertRaises(exception.InvalidSnapshot, - self.driver.create_snapshot, snapshot) - self.driver._delete_volume(volume1, True) - self.driver._delete_volume(volume3, True) - - # Snapshot volume that doesn't exist - snapshot = {} - snapshot["name"] = "snap_volume%s" % random.randint(10000, 99999) - snapshot["volume_name"] = "no_exist" - self.assertRaises(exception.VolumeNotFound, - self.driver.create_snapshot, snapshot) + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_volume_from_snapshot(vol2, snap1) + self._assert_vol_exists(vol2['name'], True) + + # Try to clone where source size != target size + vol3['size'] += 1 + self.assertRaises(exception.VolumeDriverException, + self.driver.create_cloned_volume, + vol3, vol2) + self._assert_vol_exists(vol3['name'], False) + vol3['size'] -= 1 + + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_cloned_volume(vol3, vol2) + self._assert_vol_exists(vol3['name'], True) + + # Delete in the 'opposite' order to make sure it works + self.driver.delete_volume(vol3) + self._assert_vol_exists(vol3['name'], False) + self.driver.delete_volume(vol2) + self._assert_vol_exists(vol2['name'], False) + self.driver.delete_snapshot(snap1) + self._assert_vol_exists(snap1['name'], False) + self.driver.delete_volume(vol1) + self._assert_vol_exists(vol1['name'], False) def test_storwize_svc_volumes(self): # Create a first volume - volume = {} - volume["name"] = "test1_volume%s" % random.randint(10000, 99999) - volume["size"] = 10 - volume["id"] = 1 - + volume = self._generate_vol_info(None, None) self.driver.create_volume(volume) self.driver.ensure_export(None, volume) @@ -1168,18 +1820,19 @@ def test_storwize_svc_volumes(self): self.driver.remove_export(None, volume) # Make sure volume attributes are as they should be - attributes = self.driver._get_volume_attributes(volume["name"]) - attr_size = float(attributes["capacity"]) / 1073741824 # bytes to GB - self.assertEqual(attr_size, float(volume["size"])) - pool = storwize_svc.FLAGS.storwize_svc_volpool_name - self.assertEqual(attributes["mdisk_grp_name"], pool) + attributes = self.driver._helpers.get_vdisk_attributes(volume['name']) + attr_size = float(attributes['capacity']) / units.GiB # bytes to GB + self.assertEqual(attr_size, float(volume['size'])) + pool = self.driver.configuration.local_conf.storwize_svc_volpool_name + self.assertEqual(attributes['mdisk_grp_name'], pool) # Try to create the volume again (should fail) - self.assertRaises(exception.ProcessExecutionError, - self.driver.create_volume, volume) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, + volume) # Try to delete a volume that doesn't exist (should not fail) - vol_no_exist = {"name": "i_dont_exist"} + vol_no_exist = {'name': 'i_dont_exist'} self.driver.delete_volume(vol_no_exist) # Ensure export for volume that doesn't exist (should not fail) self.driver.ensure_export(None, vol_no_exist) @@ -1187,24 +1840,13 @@ def test_storwize_svc_volumes(self): # Delete the volume self.driver.delete_volume(volume) - def _create_test_vol(self): - volume = {} - volume["name"] = "testparam_volume%s" % random.randint(10000, 99999) - volume["size"] = 1 - volume["id"] = 1 - self.driver.create_volume(volume) - - attrs = self.driver._get_volume_attributes(volume["name"]) - self.driver.delete_volume(volume) - return attrs - def test_storwize_svc_volume_params(self): # Option test matrix # Option Value Covered by test # # rsize -1 1 - # rsize 2% 2,3 + # rsize 2 2,3 # warning 0 2 - # warning 80% 3 + # warning 80 3 # autoexpand True 2 # autoexpand False 3 # grainsize 32 2 @@ -1213,166 +1855,640 @@ def test_storwize_svc_volume_params(self): # compression False 2,3 # easytier True 1,3 # easytier False 2 - - # Test 1 - self.flags(storwize_svc_vol_rsize="-1") - self.flags(storwize_svc_vol_easytier=True) - attrs = self._create_test_vol() - self.assertEquals(attrs["free_capacity"], "0") - self.assertEquals(attrs["easy_tier"], "on") - FLAGS.reset() - - # Test 2 - self.flags(storwize_svc_vol_rsize="2%") - self.flags(storwize_svc_vol_compression=False) - self.flags(storwize_svc_vol_warning="0") - self.flags(storwize_svc_vol_autoexpand=True) - self.flags(storwize_svc_vol_grainsize="32") - self.flags(storwize_svc_vol_easytier=False) - attrs = self._create_test_vol() - self.assertNotEqual(attrs["capacity"], attrs["real_capacity"]) - self.assertEquals(attrs["compressed_copy"], "no") - self.assertEquals(attrs["warning"], "0") - self.assertEquals(attrs["autoexpand"], "on") - self.assertEquals(attrs["grainsize"], "32") - self.assertEquals(attrs["easy_tier"], "off") - FLAGS.reset() - - # Test 3 - self.flags(storwize_svc_vol_rsize="2%") - self.flags(storwize_svc_vol_compression=False) - self.flags(storwize_svc_vol_warning="80%") - self.flags(storwize_svc_vol_autoexpand=False) - self.flags(storwize_svc_vol_grainsize="256") - self.flags(storwize_svc_vol_easytier=True) - attrs = self._create_test_vol() - self.assertNotEqual(attrs["capacity"], attrs["real_capacity"]) - self.assertEquals(attrs["compressed_copy"], "no") - self.assertEquals(attrs["warning"], "80") - self.assertEquals(attrs["autoexpand"], "off") - self.assertEquals(attrs["grainsize"], "256") - self.assertEquals(attrs["easy_tier"], "on") - FLAGS.reset() - - # Test 4 - self.flags(storwize_svc_vol_rsize="2%") - self.flags(storwize_svc_vol_compression=True) - try: - attrs = self._create_test_vol() - self.assertNotEqual(attrs["capacity"], attrs["real_capacity"]) - self.assertEquals(attrs["compressed_copy"], "yes") - except exception.ProcessExecutionError as e: - if "CMMVC7050E" not in e.stderr: - raise exception.ProcessExecutionError(exit_code=e.exit_code, - stdout=e.stdout, - stderr=e.stderr, - cmd=e.cmd) - if self.USESIM == 1: - self.sim.error_injection("mkvdisk", "no_compression") - self.assertRaises(exception.ProcessExecutionError, - self._create_test_vol) - FLAGS.reset() + # iogrp 0 1 + # iogrp 1 2 + + opts_list = [] + chck_list = [] + opts_list.append({'rsize': -1, 'easytier': True, 'iogrp': 0}) + chck_list.append({'free_capacity': '0', 'easy_tier': 'on', + 'IO_group_id': '0'}) + test_iogrp = 1 if self.USESIM else 0 + opts_list.append({'rsize': 2, 'compression': False, 'warning': 0, + 'autoexpand': True, 'grainsize': 32, + 'easytier': False, 'iogrp': test_iogrp}) + chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', + 'warning': '0', 'autoexpand': 'on', + 'grainsize': '32', 'easy_tier': 'off', + 'IO_group_id': str(test_iogrp)}) + opts_list.append({'rsize': 2, 'compression': False, 'warning': 80, + 'autoexpand': False, 'grainsize': 256, + 'easytier': True}) + chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', + 'warning': '80', 'autoexpand': 'off', + 'grainsize': '256', 'easy_tier': 'on'}) + opts_list.append({'rsize': 2, 'compression': True}) + chck_list.append({'-free_capacity': '0', + 'compressed_copy': 'yes'}) + + for idx in range(len(opts_list)): + attrs = self._create_test_vol(opts_list[idx]) + for k, v in chck_list[idx].iteritems(): + try: + if k[0] == '-': + k = k[1:] + self.assertNotEqual(attrs[k], v) + else: + self.assertEqual(attrs[k], v) + except processutils.ProcessExecutionError as e: + if 'CMMVC7050E' not in e.stderr: + raise def test_storwize_svc_unicode_host_and_volume_names(self): - volume1 = {} - volume1["name"] = u"unicode1_volume%s" % random.randint(10000, 99999) - volume1["size"] = 2 - volume1["id"] = 1 + # We'll check with iSCSI only - nothing protocol-dependednt here + self._set_flag('storwize_svc_connection_protocol', 'iSCSI') + self.driver.do_setup(None) + + rand_id = random.randint(10000, 99999) + volume1 = {'name': u'unicode1_volume%s' % rand_id, + 'size': 2, + 'id': 1, + 'volume_type_id': None} self.driver.create_volume(volume1) - # Make sure that the volumes have been created - is_volume_defined = self.driver._is_volume_defined(volume1["name"]) - self.assertEqual(is_volume_defined, True) - conn = {} - conn["initiator"] = u"unicode:init:%s" % random.randint(10000, 99999) - conn["ip"] = "10.10.10.10" # Bogus ip for testing + self._assert_vol_exists(volume1['name'], True) + + self.assertRaises(exception.VolumeDriverException, + self.driver._helpers.create_host, + {'host': 12345}) + + # Add a host first to make life interesting (this host and + # conn['host'] should be translated to the same prefix, and the + # initiator should differentiate + tmpconn1 = {'initiator': u'unicode:initiator1.%s' % rand_id, + 'ip': '10.10.10.10', + 'host': u'unicode.foo}.bar{.baz-%s' % rand_id} + self.driver._helpers.create_host(tmpconn1) + + # Add a host with a different prefix + tmpconn2 = {'initiator': u'unicode:initiator2.%s' % rand_id, + 'ip': '10.10.10.11', + 'host': u'unicode.hello.world-%s' % rand_id} + self.driver._helpers.create_host(tmpconn2) + + conn = {'initiator': u'unicode:initiator3.%s' % rand_id, + 'ip': '10.10.10.12', + 'host': u'unicode.foo}.bar}.baz-%s' % rand_id} self.driver.initialize_connection(volume1, conn) + host_name = self.driver._helpers.get_host_from_connector(conn) + self.assertIsNotNone(host_name) self.driver.terminate_connection(volume1, conn) + host_name = self.driver._helpers.get_host_from_connector(conn) + self.assertIsNone(host_name) self.driver.delete_volume(volume1) + # Clean up temporary hosts + for tmpconn in [tmpconn1, tmpconn2]: + host_name = self.driver._helpers.get_host_from_connector(tmpconn) + self.assertIsNotNone(host_name) + self.driver._helpers.delete_host(host_name) + + def test_storwize_svc_validate_connector(self): + conn_neither = {'host': 'host'} + conn_iscsi = {'host': 'host', 'initiator': 'foo'} + conn_fc = {'host': 'host', 'wwpns': 'bar'} + conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} + + self.driver._state['enabled_protocols'] = set(['iSCSI']) + self.driver.validate_connector(conn_iscsi) + self.driver.validate_connector(conn_both) + self.assertRaises(exception.VolumeDriverException, + self.driver.validate_connector, conn_fc) + self.assertRaises(exception.VolumeDriverException, + self.driver.validate_connector, conn_neither) + + self.driver._state['enabled_protocols'] = set(['FC']) + self.driver.validate_connector(conn_fc) + self.driver.validate_connector(conn_both) + self.assertRaises(exception.VolumeDriverException, + self.driver.validate_connector, conn_iscsi) + self.assertRaises(exception.VolumeDriverException, + self.driver.validate_connector, conn_neither) + + self.driver._state['enabled_protocols'] = set(['iSCSI', 'FC']) + self.driver.validate_connector(conn_iscsi) + self.driver.validate_connector(conn_fc) + self.driver.validate_connector(conn_both) + self.assertRaises(exception.VolumeDriverException, + self.driver.validate_connector, conn_neither) + def test_storwize_svc_host_maps(self): # Create two volumes to be used in mappings - volume1 = {} - volume1["name"] = "test1_volume%s" % random.randint(10000, 99999) - volume1["size"] = 2 - volume1["id"] = 1 + + ctxt = context.get_admin_context() + volume1 = self._generate_vol_info(None, None) self.driver.create_volume(volume1) - volume2 = {} - volume2["name"] = "test2_volume%s" % random.randint(10000, 99999) - volume2["size"] = 2 - volume2["id"] = 1 + volume2 = self._generate_vol_info(None, None) self.driver.create_volume(volume2) - # Check case where no hosts exist - if self.USESIM == 1: - ret = self.driver._get_host_from_iscsiname("foo") - self.assertEquals(ret, None) - ret = self.driver._is_host_defined("foo") - self.assertEquals(ret, False) - - # Make sure that the volumes have been created - is_volume_defined = self.driver._is_volume_defined(volume1["name"]) - self.assertEqual(is_volume_defined, True) - is_volume_defined = self.driver._is_volume_defined(volume2["name"]) - self.assertEqual(is_volume_defined, True) - - # Initialize connection from the first volume to a host - # Add some characters to the initiator name that should be converted - # when used for the host name - conn = {} - conn["initiator"] = "test:init:%s" % random.randint(10000, 99999) - conn["ip"] = "10.10.10.10" # Bogus ip for testing - self.driver.initialize_connection(volume1, conn) + # Create volume types that we created + types = {} + for protocol in ['FC', 'iSCSI']: + opts = {'storage_protocol': ' ' + protocol} + types[protocol] = volume_types.create(ctxt, protocol, opts) - # Initialize again, should notice it and do nothing - self.driver.initialize_connection(volume1, conn) + for protocol in ['FC', 'iSCSI']: + volume1['volume_type_id'] = types[protocol]['id'] + volume2['volume_type_id'] = types[protocol]['id'] - # Try to delete the 1st volume (should fail because it is mapped) - self.assertRaises(exception.ProcessExecutionError, - self.driver.delete_volume, volume1) + # Check case where no hosts exist + if self.USESIM: + ret = self.driver._helpers.get_host_from_connector( + self._connector) + self.assertIsNone(ret) + + # Make sure that the volumes have been created + self._assert_vol_exists(volume1['name'], True) + self._assert_vol_exists(volume2['name'], True) + + # Initialize connection from the first volume to a host + self.driver.initialize_connection(volume1, self._connector) + + # Initialize again, should notice it and do nothing + self.driver.initialize_connection(volume1, self._connector) + + # Try to delete the 1st volume (should fail because it is mapped) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.delete_volume, + volume1) + + # Check bad output from lsfabric for the 2nd volume + if protocol == 'FC' and self.USESIM: + for error in ['remove_field', 'header_mismatch']: + self.sim.error_injection('lsfabric', error) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + volume2, self._connector) + + self.driver.terminate_connection(volume1, self._connector) + if self.USESIM: + ret = self.driver._helpers.get_host_from_connector( + self._connector) + self.assertIsNone(ret) + + # Check cases with no auth set for host + if self.USESIM: + for auth_enabled in [True, False]: + for host_exists in ['yes-auth', 'yes-noauth', 'no']: + self._set_flag('storwize_svc_iscsi_chap_enabled', + auth_enabled) + case = 'en' + str(auth_enabled) + 'ex' + str(host_exists) + conn_na = {'initiator': 'test:init:%s' % + random.randint(10000, 99999), + 'ip': '11.11.11.11', + 'host': 'host-%s' % case} + if host_exists.startswith('yes'): + self.sim._add_host_to_list(conn_na) + if host_exists == 'yes-auth': + kwargs = {'chapsecret': 'foo', + 'obj': conn_na['host']} + self.sim._cmd_chhost(**kwargs) + volume1['volume_type_id'] = types['iSCSI']['id'] + + init_ret = self.driver.initialize_connection(volume1, + conn_na) + host_name = self.sim._host_in_list(conn_na['host']) + chap_ret = self.driver._helpers.get_chap_secret_for_host( + host_name) + if auth_enabled or host_exists == 'yes-auth': + self.assertIn('auth_password', init_ret['data']) + self.assertIsNotNone(chap_ret) + else: + self.assertNotIn('auth_password', init_ret['data']) + self.assertIsNone(chap_ret) + self.driver.terminate_connection(volume1, conn_na) + self._set_flag('storwize_svc_iscsi_chap_enabled', True) # Test no preferred node - self.driver.terminate_connection(volume1, conn) - if self.USESIM == 1: - self.sim.error_injection("lsvdisk", "no_pref_node") - self.driver.initialize_connection(volume1, conn) + if self.USESIM: + self.sim.error_injection('lsvdisk', 'no_pref_node') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.initialize_connection, + volume1, self._connector) # Initialize connection from the second volume to the host with no # preferred node set if in simulation mode, otherwise, just # another initialize connection. - if self.USESIM == 1: - self.sim.error_injection("lsvdisk", "blank_pref_node") - self.driver.initialize_connection(volume2, conn) + if self.USESIM: + self.sim.error_injection('lsvdisk', 'blank_pref_node') + self.driver.initialize_connection(volume2, self._connector) # Try to remove connection from host that doesn't exist (should fail) - conn_no_exist = {"initiator": "i_dont_exist"} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.terminate_connection, volume1, conn_no_exist) + conn_no_exist = self._connector.copy() + conn_no_exist['initiator'] = 'i_dont_exist' + conn_no_exist['wwpns'] = ['0000000000000000'] + self.assertRaises(exception.VolumeDriverException, + self.driver.terminate_connection, + volume1, + conn_no_exist) # Try to remove connection from volume that isn't mapped (should print # message but NOT fail) - vol_no_exist = {"name": "i_dont_exist"} - self.driver.terminate_connection(vol_no_exist, conn) + unmapped_vol = self._generate_vol_info(None, None) + self.driver.create_volume(unmapped_vol) + self.driver.terminate_connection(unmapped_vol, self._connector) + self.driver.delete_volume(unmapped_vol) # Remove the mapping from the 1st volume and delete it - self.driver.terminate_connection(volume1, conn) + self.driver.terminate_connection(volume1, self._connector) self.driver.delete_volume(volume1) - vol_def = self.driver._is_volume_defined(volume1["name"]) - self.assertEqual(vol_def, False) + self._assert_vol_exists(volume1['name'], False) # Make sure our host still exists - host_name = self.driver._get_host_from_iscsiname(conn["initiator"]) - host_def = self.driver._is_host_defined(host_name) - self.assertEquals(host_def, True) + host_name = self.driver._helpers.get_host_from_connector( + self._connector) + self.assertIsNotNone(host_name) - # Remove the mapping from the 2nd volume and delete it. The host should + # Remove the mapping from the 2nd volume. The host should # be automatically removed because there are no more mappings. - self.driver.terminate_connection(volume2, conn) + self.driver.terminate_connection(volume2, self._connector) + + # Check if we successfully terminate connections when the host is not + # specified (see bug #1244257) + fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} + self.driver.initialize_connection(volume2, self._connector) + host_name = self.driver._helpers.get_host_from_connector( + self._connector) + self.assertIsNotNone(host_name) + self.driver.terminate_connection(volume2, fake_conn) + host_name = self.driver._helpers.get_host_from_connector( + self._connector) + self.assertIsNone(host_name) self.driver.delete_volume(volume2) - vol_def = self.driver._is_volume_defined(volume2["name"]) - self.assertEqual(vol_def, False) + self._assert_vol_exists(volume2['name'], False) + + # Delete volume types that we created + for protocol in ['FC', 'iSCSI']: + volume_types.destroy(ctxt, types[protocol]['id']) # Check if our host still exists (it should not) - ret = self.driver._get_host_from_iscsiname(conn["initiator"]) - self.assertEquals(ret, None) - ret = self.driver._is_host_defined(host_name) - self.assertEquals(ret, False) + if self.USESIM: + ret = self.driver._helpers.get_host_from_connector(self._connector) + self.assertIsNone(ret) + + def test_storwize_svc_multi_host_maps(self): + # We can't test connecting to multiple hosts from a single host when + # using real storage + if not self.USESIM: + return + + # Create a volume to be used in mappings + ctxt = context.get_admin_context() + volume = self._generate_vol_info(None, None) + self.driver.create_volume(volume) + + # Create volume types for protocols + types = {} + for protocol in ['FC', 'iSCSI']: + opts = {'storage_protocol': ' ' + protocol} + types[protocol] = volume_types.create(ctxt, protocol, opts) + + # Create a connector for the second 'host' + wwpns = [str(random.randint(0, 9999999999999999)).zfill(16), + str(random.randint(0, 9999999999999999)).zfill(16)] + initiator = 'test.initiator.%s' % str(random.randint(10000, 99999)) + conn2 = {'ip': '1.234.56.79', + 'host': 'storwize-svc-test2', + 'wwpns': wwpns, + 'initiator': initiator} + + for protocol in ['FC', 'iSCSI']: + volume['volume_type_id'] = types[protocol]['id'] + + # Make sure that the volume has been created + self._assert_vol_exists(volume['name'], True) + + self.driver.initialize_connection(volume, self._connector) + + self._set_flag('storwize_svc_multihostmap_enabled', False) + self.assertRaises(exception.CinderException, + self.driver.initialize_connection, volume, conn2) + + self._set_flag('storwize_svc_multihostmap_enabled', True) + self.driver.initialize_connection(volume, conn2) + + self.driver.terminate_connection(volume, conn2) + self.driver.terminate_connection(volume, self._connector) + + def test_storwize_svc_delete_volume_snapshots(self): + # Create a volume with two snapshots + master = self._generate_vol_info(None, None) + self.driver.create_volume(master) + self.driver.db.volume_set(master) + + # Fail creating a snapshot - will force delete the snapshot + if self.USESIM and False: + snap = self._generate_vol_info(master['name'], master['id']) + self.sim.error_injection('startfcmap', 'bad_id') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_snapshot, snap) + self._assert_vol_exists(snap['name'], False) + + # Delete a snapshot + snap = self._generate_vol_info(master['name'], master['id']) + self.driver.create_snapshot(snap) + self._assert_vol_exists(snap['name'], True) + self.driver.delete_snapshot(snap) + self._assert_vol_exists(snap['name'], False) + + # Delete a volume with snapshots (regular) + snap = self._generate_vol_info(master['name'], master['id']) + self.driver.create_snapshot(snap) + self._assert_vol_exists(snap['name'], True) + self.driver.delete_volume(master) + self._assert_vol_exists(master['name'], False) + + # Fail create volume from snapshot - will force delete the volume + if self.USESIM: + volfs = self._generate_vol_info(None, None) + self.sim.error_injection('startfcmap', 'bad_id') + self.sim.error_injection('lsfcmap', 'speed_up') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + volfs, snap) + self._assert_vol_exists(volfs['name'], False) + + # Create volume from snapshot and delete it + volfs = self._generate_vol_info(None, None) + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_volume_from_snapshot(volfs, snap) + self._assert_vol_exists(volfs['name'], True) + self.driver.delete_volume(volfs) + self._assert_vol_exists(volfs['name'], False) + + # Create volume from snapshot and delete the snapshot + volfs = self._generate_vol_info(None, None) + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_volume_from_snapshot(volfs, snap) + self.driver.delete_snapshot(snap) + self._assert_vol_exists(snap['name'], False) + + # Fail create clone - will force delete the target volume + if self.USESIM: + clone = self._generate_vol_info(None, None) + self.sim.error_injection('startfcmap', 'bad_id') + self.sim.error_injection('lsfcmap', 'speed_up') + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_cloned_volume, + clone, volfs) + self._assert_vol_exists(clone['name'], False) + + # Create the clone, delete the source and target + clone = self._generate_vol_info(None, None) + if self.USESIM: + self.sim.error_injection('lsfcmap', 'speed_up') + self.driver.create_cloned_volume(clone, volfs) + self._assert_vol_exists(clone['name'], True) + self.driver.delete_volume(volfs) + self._assert_vol_exists(volfs['name'], False) + self.driver.delete_volume(clone) + self._assert_vol_exists(clone['name'], False) + + # Note defined in python 2.6, so define here... + def assertLessEqual(self, a, b, msg=None): + if not a <= b: + self.fail('%s not less than or equal to %s' % (repr(a), repr(b))) + + def test_storwize_svc_get_volume_stats(self): + self._set_flag('reserved_percentage', 25) + stats = self.driver.get_volume_stats() + self.assertLessEqual(stats['free_capacity_gb'], + stats['total_capacity_gb']) + self.assertEqual(stats['reserved_percentage'], 25) + pool = self.driver.configuration.local_conf.storwize_svc_volpool_name + if self.USESIM: + expected = 'storwize-svc-sim_' + pool + self.assertEqual(stats['volume_backend_name'], expected) + self.assertAlmostEqual(stats['total_capacity_gb'], 3328.0) + self.assertAlmostEqual(stats['free_capacity_gb'], 3287.5) + + def test_storwize_svc_extend_volume(self): + volume = self._generate_vol_info(None, None) + self.driver.db.volume_set(volume) + self.driver.create_volume(volume) + self.driver.extend_volume(volume, '13') + attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) + vol_size = int(attrs['capacity']) / units.GiB + self.assertAlmostEqual(vol_size, 13) + + snap = self._generate_vol_info(volume['name'], volume['id']) + self.driver.create_snapshot(snap) + self._assert_vol_exists(snap['name'], True) + self.assertRaises(exception.VolumeDriverException, + self.driver.extend_volume, volume, '16') + + self.driver.delete_snapshot(snap) + self.driver.delete_volume(volume) + + def _check_loc_info(self, capabilities, expected): + host = {'host': 'foo', 'capabilities': capabilities} + vol = {'name': 'test', 'id': 1, 'size': 1} + ctxt = context.get_admin_context() + moved, model_update = self.driver.migrate_volume(ctxt, vol, host) + self.assertEqual(moved, expected['moved']) + self.assertEqual(model_update, expected['model_update']) + + def test_storwize_svc_migrate_bad_loc_info(self): + self._check_loc_info({}, {'moved': False, 'model_update': None}) + cap = {'location_info': 'foo'} + self._check_loc_info(cap, {'moved': False, 'model_update': None}) + cap = {'location_info': 'FooDriver:foo:bar'} + self._check_loc_info(cap, {'moved': False, 'model_update': None}) + cap = {'location_info': 'StorwizeSVCDriver:foo:bar'} + self._check_loc_info(cap, {'moved': False, 'model_update': None}) + + def test_storwize_svc_migrate_same_extent_size(self): + # Make sure we don't call migrate_volume_vdiskcopy + with mock.patch.object(self.driver._helpers, + 'migrate_volume_vdiskcopy') as migr_vdiskcopy: + migr_vdiskcopy.side_effect = KeyError + self.driver.do_setup(None) + loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + + ':openstack2') + cap = {'location_info': loc, 'extent_size': '256'} + host = {'host': 'foo', 'capabilities': cap} + ctxt = context.get_admin_context() + volume = self._generate_vol_info(None, None) + volume['volume_type_id'] = None + self.driver.create_volume(volume) + self.driver.migrate_volume(ctxt, volume, host) + self.driver.delete_volume(volume) + + def test_storwize_svc_migrate_diff_extent_size(self): + self.driver.do_setup(None) + loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + + ':openstack3') + cap = {'location_info': loc, 'extent_size': '128'} + host = {'host': 'foo', 'capabilities': cap} + ctxt = context.get_admin_context() + volume = self._generate_vol_info(None, None) + volume['volume_type_id'] = None + self.driver.create_volume(volume) + self.assertNotEqual(cap['extent_size'], + self.driver._state['extent_size']) + self.driver.migrate_volume(ctxt, volume, host) + attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) + print('AVISHAY ' + str(attrs)) + self.assertIn('openstack3', attrs['mdisk_grp_name']) + self.driver.delete_volume(volume) + + def test_storwize_svc_retype_no_copy(self): + self.driver.do_setup(None) + loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + + ':openstack') + cap = {'location_info': loc, 'extent_size': '128'} + self.driver._stats = {'location_info': loc} + host = {'host': 'foo', 'capabilities': cap} + ctxt = context.get_admin_context() + + key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True} + key_specs_new = {'easytier': True, 'warning': 5, 'autoexpand': False} + old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) + new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) + + diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], + new_type_ref['id']) + + volume = self._generate_vol_info(None, None) + old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) + volume['volume_type'] = old_type + volume['host'] = host + new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) + + self.driver.create_volume(volume) + self.driver.retype(ctxt, volume, new_type, diff, host) + attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) + self.assertEqual('on', attrs['easy_tier'], 'Volume retype failed') + self.assertEqual('5', attrs['warning'], 'Volume retype failed') + self.assertEqual('off', attrs['autoexpand'], 'Volume retype failed') + self.driver.delete_volume(volume) + + def test_storwize_svc_retype_only_change_iogrp(self): + self.driver.do_setup(None) + loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + + ':openstack') + cap = {'location_info': loc, 'extent_size': '128'} + self.driver._stats = {'location_info': loc} + host = {'host': 'foo', 'capabilities': cap} + ctxt = context.get_admin_context() + + key_specs_old = {'iogrp': 0} + key_specs_new = {'iogrp': 1} + old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) + new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) + + diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], + new_type_ref['id']) + + volume = self._generate_vol_info(None, None) + old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) + volume['volume_type'] = old_type + volume['host'] = host + new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) + + self.driver.create_volume(volume) + self.driver.retype(ctxt, volume, new_type, diff, host) + attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) + self.assertEqual('1', attrs['IO_group_id'], 'Volume retype ' + 'failed') + self.driver.delete_volume(volume) + + def test_storwize_svc_retype_need_copy(self): + self.driver.do_setup(None) + loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + + ':openstack') + cap = {'location_info': loc, 'extent_size': '128'} + self.driver._stats = {'location_info': loc} + host = {'host': 'foo', 'capabilities': cap} + ctxt = context.get_admin_context() + + key_specs_old = {'compression': True} + key_specs_new = {'compression': False} + old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) + new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) + + diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], + new_type_ref['id']) + + volume = self._generate_vol_info(None, None) + old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) + volume['volume_type'] = old_type + volume['host'] = host + new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) + + self.driver.create_volume(volume) + self.driver.retype(ctxt, volume, new_type, diff, host) + attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) + self.assertEqual('no', attrs['compressed_copy']) + self.driver.delete_volume(volume) + + def test_set_storage_code_level_success(self): + res = self.driver._helpers.get_system_info() + self.assertEqual((7, 2, 0, 0), res['code_level'], + 'Get code level error') + + +class CLIResponseTestCase(test.TestCase): + def test_empty(self): + self.assertEqual(0, len(ssh.CLIResponse(''))) + self.assertEqual(0, len(ssh.CLIResponse(('', 'stderr')))) + + def test_header(self): + raw = r'''id!name +1!node1 +2!node2 +''' + resp = ssh.CLIResponse(raw, with_header=True) + self.assertEqual(2, len(resp)) + self.assertEqual('1', resp[0]['id']) + self.assertEqual('2', resp[1]['id']) + + def test_select(self): + raw = r'''id!123 +name!Bill +name!Bill2 +age!30 +home address!s1 +home address!s2 + +id! 7 +name!John +name!John2 +age!40 +home address!s3 +home address!s4 +''' + resp = ssh.CLIResponse(raw, with_header=False) + self.assertEqual(list(resp.select('home address', 'name', + 'home address')), + [('s1', 'Bill', 's1'), ('s2', 'Bill2', 's2'), + ('s3', 'John', 's3'), ('s4', 'John2', 's4')]) + + def test_lsnode_all(self): + raw = r'''id!name!UPS_serial_number!WWNN!status +1!node1!!500507680200C744!online +2!node2!!500507680200C745!online +''' + resp = ssh.CLIResponse(raw) + self.assertEqual(2, len(resp)) + self.assertEqual('1', resp[0]['id']) + self.assertEqual('500507680200C744', resp[0]['WWNN']) + self.assertEqual('2', resp[1]['id']) + self.assertEqual('500507680200C745', resp[1]['WWNN']) + + def test_lsnode_single(self): + raw = r'''id!1 +port_id!500507680210C744 +port_status!active +port_speed!8Gb +port_id!500507680240C744 +port_status!inactive +port_speed!8Gb +''' + resp = ssh.CLIResponse(raw, with_header=False) + self.assertEqual(1, len(resp)) + self.assertEqual('1', resp[0]['id']) + self.assertEqual(list(resp.select('port_id', 'port_status')), + [('500507680210C744', 'active'), + ('500507680240C744', 'inactive')]) diff --git a/cinder/tests/test_test.py b/cinder/tests/test_test.py index be17f39167..b4c1af7abb 100644 --- a/cinder/tests/test_test.py +++ b/cinder/tests/test_test.py @@ -1,4 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. diff --git a/cinder/tests/test_test_utils.py b/cinder/tests/test_test_utils.py index 06e38a42b7..6887913d77 100644 --- a/cinder/tests/test_test_utils.py +++ b/cinder/tests/test_test_utils.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 # -# Copyright 2010 OpenStack LLC +# Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -20,7 +19,7 @@ class TestUtilsTestCase(test.TestCase): def test_get_test_admin_context(self): - """get_test_admin_context's return value behaves like admin context""" + """get_test_admin_context's return value behaves like admin context.""" ctxt = test_utils.get_test_admin_context() # TODO(soren): This should verify the full interface context diff --git a/cinder/tests/test_utils.py b/cinder/tests/test_utils.py index 92be797b8f..380c135222 100644 --- a/cinder/tests/test_utils.py +++ b/cinder/tests/test_utils.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -14,25 +12,31 @@ # License for the specific language governing permissions and limitations # under the License. + import __builtin__ import datetime import hashlib import os -import os.path +import socket import StringIO import tempfile +import uuid import mox +from oslo.config import cfg +import paramiko import cinder +from cinder.brick.initiator import connector +from cinder.brick.initiator import linuxfc from cinder import exception -from cinder import flags +from cinder.openstack.common import processutils as putils from cinder.openstack.common import timeutils from cinder import test from cinder import utils -FLAGS = flags.FLAGS +CONF = cfg.CONF class ExecuteTestCase(test.TestCase): @@ -62,8 +66,8 @@ def test_retry_on_failure(self): exit 1 ''') fp.close() - os.chmod(tmpfilename, 0755) - self.assertRaises(exception.ProcessExecutionError, + os.chmod(tmpfilename, 0o755) + self.assertRaises(putils.ProcessExecutionError, utils.execute, tmpfilename, tmpfilename2, attempts=10, process_input='foo', @@ -71,25 +75,24 @@ def test_retry_on_failure(self): fp = open(tmpfilename2, 'r+') runs = fp.read() fp.close() - self.assertNotEquals(runs.strip(), 'failure', 'stdin did not ' - 'always get passed ' - 'correctly') + self.assertNotEqual(runs.strip(), 'failure', 'stdin did not ' + 'always get passed ' + 'correctly') runs = int(runs.strip()) - self.assertEquals(runs, 10, - 'Ran %d times instead of 10.' % (runs,)) + self.assertEqual(runs, 10, 'Ran %d times instead of 10.' % (runs,)) finally: os.unlink(tmpfilename) os.unlink(tmpfilename2) def test_unknown_kwargs_raises_error(self): - self.assertRaises(exception.Error, + self.assertRaises(putils.UnknownArgumentError, utils.execute, '/usr/bin/env', 'true', this_is_not_a_valid_kwarg=True) def test_check_exit_code_boolean(self): utils.execute('/usr/bin/env', 'false', check_exit_code=False) - self.assertRaises(exception.ProcessExecutionError, + self.assertRaises(putils.ProcessExecutionError, utils.execute, '/usr/bin/env', 'false', check_exit_code=True) @@ -107,7 +110,7 @@ def test_no_retry_on_success(self): grep foo ''') fp.close() - os.chmod(tmpfilename, 0755) + os.chmod(tmpfilename, 0o755) utils.execute(tmpfilename, tmpfilename2, process_input='foo', @@ -122,104 +125,104 @@ def test_tolerates_nones(self): f = utils.get_from_path input = [] - self.assertEquals([], f(input, "a")) - self.assertEquals([], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([], f(input, "a")) + self.assertEqual([], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [None] - self.assertEquals([], f(input, "a")) - self.assertEquals([], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([], f(input, "a")) + self.assertEqual([], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [{'a': None}] - self.assertEquals([], f(input, "a")) - self.assertEquals([], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([], f(input, "a")) + self.assertEqual([], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': None}}] - self.assertEquals([{'b': None}], f(input, "a")) - self.assertEquals([], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([{'b': None}], f(input, "a")) + self.assertEqual([], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': {'c': None}}}] - self.assertEquals([{'b': {'c': None}}], f(input, "a")) - self.assertEquals([{'c': None}], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([{'b': {'c': None}}], f(input, "a")) + self.assertEqual([{'c': None}], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': {'c': None}}}, {'a': None}] - self.assertEquals([{'b': {'c': None}}], f(input, "a")) - self.assertEquals([{'c': None}], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([{'b': {'c': None}}], f(input, "a")) + self.assertEqual([{'c': None}], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}] - self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a")) - self.assertEquals([{'c': None}], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([{'b': {'c': None}}, {'b': None}], f(input, "a")) + self.assertEqual([{'c': None}], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) def test_does_select(self): f = utils.get_from_path input = [{'a': 'a_1'}] - self.assertEquals(['a_1'], f(input, "a")) - self.assertEquals([], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual(['a_1'], f(input, "a")) + self.assertEqual([], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': 'b_1'}}] - self.assertEquals([{'b': 'b_1'}], f(input, "a")) - self.assertEquals(['b_1'], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([{'b': 'b_1'}], f(input, "a")) + self.assertEqual(['b_1'], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}] - self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) - self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) - self.assertEquals(['c_1'], f(input, "a/b/c")) + self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEqual([{'c': 'c_1'}], f(input, "a/b")) + self.assertEqual(['c_1'], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}] - self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) - self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) - self.assertEquals(['c_1'], f(input, "a/b/c")) + self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEqual([{'c': 'c_1'}], f(input, "a/b")) + self.assertEqual(['c_1'], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}, {'a': {'b': None}}] - self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a")) - self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) - self.assertEquals(['c_1'], f(input, "a/b/c")) + self.assertEqual([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a")) + self.assertEqual([{'c': 'c_1'}], f(input, "a/b")) + self.assertEqual(['c_1'], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}, {'a': {'b': {'c': 'c_2'}}}] - self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}], - f(input, "a")) - self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b")) - self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c")) + self.assertEqual([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}], + f(input, "a")) + self.assertEqual([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b")) + self.assertEqual(['c_1', 'c_2'], f(input, "a/b/c")) - self.assertEquals([], f(input, "a/b/c/d")) - self.assertEquals([], f(input, "c/a/b/d")) - self.assertEquals([], f(input, "i/r/t")) + self.assertEqual([], f(input, "a/b/c/d")) + self.assertEqual([], f(input, "c/a/b/d")) + self.assertEqual([], f(input, "i/r/t")) def test_flattens_lists(self): f = utils.get_from_path input = [{'a': [1, 2, 3]}] - self.assertEquals([1, 2, 3], f(input, "a")) - self.assertEquals([], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([1, 2, 3], f(input, "a")) + self.assertEqual([], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': [1, 2, 3]}}] - self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) - self.assertEquals([1, 2, 3], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEqual([1, 2, 3], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}] - self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}] - self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = [{'a': [1, 2, {'b': 'b_1'}]}] - self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) - self.assertEquals(['b_1'], f(input, "a/b")) + self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEqual(['b_1'], f(input, "a/b")) def test_bad_xpath(self): f = utils.get_from_path @@ -247,29 +250,29 @@ def test_real_failure1(self): private_ips = f(inst, 'fixed_ip/address') public_ips = f(inst, 'fixed_ip/floating_ips/address') - self.assertEquals(['192.168.0.3'], private_ips) - self.assertEquals(['1.2.3.4'], public_ips) + self.assertEqual(['192.168.0.3'], private_ips) + self.assertEqual(['1.2.3.4'], public_ips) def test_accepts_dictionaries(self): f = utils.get_from_path input = {'a': [1, 2, 3]} - self.assertEquals([1, 2, 3], f(input, "a")) - self.assertEquals([], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([1, 2, 3], f(input, "a")) + self.assertEqual([], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = {'a': {'b': [1, 2, 3]}} - self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) - self.assertEquals([1, 2, 3], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEqual([1, 2, 3], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]} - self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) - self.assertEquals([], f(input, "a/b/c")) + self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEqual([], f(input, "a/b/c")) input = {'a': [1, 2, {'b': 'b_1'}]} - self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) - self.assertEquals(['b_1'], f(input, "a/b")) + self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEqual(['b_1'], f(input, "a/b")) class GenericUtilsTestCase(test.TestCase): @@ -297,22 +300,10 @@ def test_hostname_translate(self): hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>" self.assertEqual("hello", utils.sanitize_hostname(hostname)) - def test_bool_from_str(self): - self.assertTrue(utils.bool_from_str('1')) - self.assertTrue(utils.bool_from_str('2')) - self.assertTrue(utils.bool_from_str('-1')) - self.assertTrue(utils.bool_from_str('true')) - self.assertTrue(utils.bool_from_str('True')) - self.assertTrue(utils.bool_from_str('tRuE')) - self.assertFalse(utils.bool_from_str('False')) - self.assertFalse(utils.bool_from_str('false')) - self.assertFalse(utils.bool_from_str('0')) - self.assertFalse(utils.bool_from_str(None)) - self.assertFalse(utils.bool_from_str('junk')) - def test_generate_glance_url(self): generated_url = utils.generate_glance_url() - actual_url = "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port) + actual_url = "http://%s:%d" % (CONF.glance_host, + CONF.glance_port) self.assertEqual(generated_url, actual_url) def test_read_cached_file(self): @@ -348,8 +339,9 @@ def test_reload(reloaded_data): self.assertEqual(reloaded_data, fake_contents) self.reload_called = True - data = utils.read_cached_file("/this/is/a/fake", cache_data, - reload_func=test_reload) + data = utils.read_cached_file("/this/is/a/fake", + cache_data, + reload_func=test_reload) self.assertEqual(data, fake_contents) self.assertTrue(self.reload_called) @@ -364,7 +356,7 @@ def test_generate_password(self): def test_read_file_as_root(self): def fake_execute(*args, **kwargs): if args[1] == 'bad': - raise exception.ProcessExecutionError + raise putils.ProcessExecutionError return 'fakecontents', None self.stubs.Set(utils, 'execute', fake_execute) @@ -373,11 +365,6 @@ def fake_execute(*args, **kwargs): self.assertRaises(exception.FileNotFound, utils.read_file_as_root, 'bad') - def test_strcmp_const_time(self): - self.assertTrue(utils.strcmp_const_time('abc123', 'abc123')) - self.assertFalse(utils.strcmp_const_time('a', 'aaaaa')) - self.assertFalse(utils.strcmp_const_time('ABC123', 'abc123')) - def test_temporary_chown(self): def fake_execute(*args, **kwargs): if args[0] == 'chown': @@ -423,6 +410,36 @@ def test_service_is_up(self): result = utils.service_is_up(service) self.assertFalse(result) + def test_safe_parse_xml(self): + + normal_body = ('' + 'heythere') + + def killer_body(): + return ((""" + + ]> + + + %(d)s + + """) % { + 'a': 'A' * 10, + 'b': '&a;' * 10, + 'c': '&b;' * 10, + 'd': '&c;' * 9999, + }).strip() + + dom = utils.safe_minidom_parse_string(normal_body) + # Some versions of minidom inject extra newlines so we ignore them + result = str(dom.toxml()).replace('\n', '') + self.assertEqual(normal_body, result) + + self.assertRaises(ValueError, + utils.safe_minidom_parse_string, + killer_body()) + def test_xhtml_escape(self): self.assertEqual('"foo"', utils.xhtml_escape('"foo"')) self.assertEqual(''foo'', utils.xhtml_escape("'foo'")) @@ -432,32 +449,118 @@ def test_hash_file(self): flo = StringIO.StringIO(data) h1 = utils.hash_file(flo) h2 = hashlib.sha1(data).hexdigest() - self.assertEquals(h1, h2) + self.assertEqual(h1, h2) + + def test_check_ssh_injection(self): + cmd_list = ['ssh', '-D', 'my_name@name_of_remote_computer'] + self.assertIsNone(utils.check_ssh_injection(cmd_list)) + cmd_list = ['echo', '"quoted arg with space"'] + self.assertIsNone(utils.check_ssh_injection(cmd_list)) + cmd_list = ['echo', "'quoted arg with space'"] + self.assertIsNone(utils.check_ssh_injection(cmd_list)) + + def test_check_ssh_injection_on_error(self): + with_unquoted_space = ['ssh', 'my_name@ name_of_remote_computer'] + self.assertRaises(exception.SSHInjectionThreat, + utils.check_ssh_injection, + with_unquoted_space) + with_danger_char = ['||', 'my_name@name_of_remote_computer'] + self.assertRaises(exception.SSHInjectionThreat, + utils.check_ssh_injection, + with_danger_char) + with_special = ['cmd', 'virus;ls'] + self.assertRaises(exception.SSHInjectionThreat, + utils.check_ssh_injection, + with_special) + quoted_with_unescaped = ['cmd', '"arg\"withunescaped"'] + self.assertRaises(exception.SSHInjectionThreat, + utils.check_ssh_injection, + quoted_with_unescaped) + bad_before_quotes = ['cmd', 'virus;"quoted argument"'] + self.assertRaises(exception.SSHInjectionThreat, + utils.check_ssh_injection, + bad_before_quotes) + bad_after_quotes = ['echo', '"quoted argument";rm -rf'] + self.assertRaises(exception.SSHInjectionThreat, + utils.check_ssh_injection, + bad_after_quotes) + bad_within_quotes = ['echo', "'quoted argument `rm -rf`'"] + self.assertRaises(exception.SSHInjectionThreat, + utils.check_ssh_injection, + bad_within_quotes) + with_multiple_quotes = ['echo', '"quoted";virus;"quoted"'] + self.assertRaises(exception.SSHInjectionThreat, + utils.check_ssh_injection, + with_multiple_quotes) + with_multiple_quotes = ['echo', '"quoted";virus;\'quoted\''] + self.assertRaises(exception.SSHInjectionThreat, + utils.check_ssh_injection, + with_multiple_quotes) + + def test_create_channel(self): + client = paramiko.SSHClient() + channel = paramiko.Channel(123) + self.mox.StubOutWithMock(client, 'invoke_shell') + self.mox.StubOutWithMock(channel, 'resize_pty') + + client.invoke_shell().AndReturn(channel) + channel.resize_pty(600, 800) + + self.mox.ReplayAll() + utils.create_channel(client, 600, 800) + self.mox.VerifyAll() -class IsUUIDLikeTestCase(test.TestCase): - def assertUUIDLike(self, val, expected): - result = utils.is_uuid_like(val) - self.assertEqual(result, expected) + def _make_fake_stat(self, test_file, orig_os_stat): + """Create a fake method to stub out os.stat(). - def test_good_uuid(self): - val = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' - self.assertUUIDLike(val, True) + Generate a function that will return a particular + stat object for a given file. - def test_integer_passed(self): - val = 1 - self.assertUUIDLike(val, False) + :param: test_file: file to spoof stat() for + :param: orig_os_stat: pointer to original os.stat() + """ + + def fake_stat(path): + if path == test_file: + class stat_result: + st_mode = 0o777 + st_gid = 33333 + return stat_result + else: + return orig_os_stat(path) + + return fake_stat + + def test_get_file_mode(self): + test_file = '/var/tmp/made_up_file' + + orig_os_stat = os.stat + os.stat = self._make_fake_stat(test_file, orig_os_stat) + + self.mox.ReplayAll() - def test_non_uuid_string_passed(self): - val = 'foo-fooo' - self.assertUUIDLike(val, False) + mode = utils.get_file_mode(test_file) + self.assertEqual(mode, 0o777) - def test_non_uuid_string_passed2(self): - val = 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' - self.assertUUIDLike(val, False) + self.mox.VerifyAll() - def test_gen_valid_uuid(self): - self.assertUUIDLike(str(utils.gen_uuid()), True) + os.stat = orig_os_stat + + def test_get_file_gid(self): + test_file = '/var/tmp/made_up_file' + + orig_os_stat = os.stat + os.stat = self._make_fake_stat(test_file, orig_os_stat) + + self.mox.ReplayAll() + + gid = utils.get_file_gid(test_file) + self.assertEqual(gid, 33333) + + self.mox.VerifyAll() + + os.stat = orig_os_stat class MonkeyPatchTestCase(test.TestCase): @@ -468,7 +571,8 @@ def setUp(self): self.flags( monkey_patch=True, monkey_patch_modules=[self.example_package + 'example_a' + ':' - + self.example_package + 'example_decorator']) + + self.example_package + + 'example_decorator']) def test_monkey_patch(self): utils.monkey_patch() @@ -490,19 +594,19 @@ def test_monkey_patch(self): self.assertEqual(ret_b, 8) package_a = self.example_package + 'example_a.' self.assertTrue(package_a + 'example_function_a' - in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) self.assertTrue(package_a + 'ExampleClassA.example_method' - in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) self.assertTrue(package_a + 'ExampleClassA.example_method_add' - in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) package_b = self.example_package + 'example_b.' self.assertFalse(package_b + 'example_function_b' - in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) self.assertFalse(package_b + 'ExampleClassB.example_method' - in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) self.assertFalse(package_b + 'ExampleClassB.example_method_add' - in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) class AuditPeriodTest(test.TestCase): @@ -524,146 +628,275 @@ def tearDown(self): def test_hour(self): begin, end = utils.last_completed_audit_period(unit='hour') - self.assertEquals(begin, datetime.datetime( - hour=7, - day=5, - month=3, - year=2012)) - self.assertEquals(end, datetime.datetime( - hour=8, + self.assertEqual(begin, + datetime.datetime(hour=7, day=5, month=3, year=2012)) + self.assertEqual(end, datetime.datetime(hour=8, + day=5, + month=3, + year=2012)) def test_hour_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='hour@10') - self.assertEquals(begin, datetime.datetime( - minute=10, - hour=7, - day=5, - month=3, - year=2012)) - self.assertEquals(end, datetime.datetime( - minute=10, - hour=8, - day=5, - month=3, - year=2012)) + self.assertEqual(begin, datetime.datetime(minute=10, + hour=7, + day=5, + month=3, + year=2012)) + self.assertEqual(end, datetime.datetime(minute=10, + hour=8, + day=5, + month=3, + year=2012)) def test_hour_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='hour@30') - self.assertEquals(begin, datetime.datetime( - minute=30, - hour=6, - day=5, - month=3, - year=2012)) - self.assertEquals(end, datetime.datetime( - minute=30, - hour=7, - day=5, - month=3, - year=2012)) + self.assertEqual(begin, datetime.datetime(minute=30, + hour=6, + day=5, + month=3, + year=2012)) + self.assertEqual(end, datetime.datetime(minute=30, + hour=7, + day=5, + month=3, + year=2012)) def test_day(self): begin, end = utils.last_completed_audit_period(unit='day') - self.assertEquals(begin, datetime.datetime( - day=4, - month=3, - year=2012)) - self.assertEquals(end, datetime.datetime( - day=5, - month=3, - year=2012)) + self.assertEqual(begin, datetime.datetime(day=4, + month=3, + year=2012)) + self.assertEqual(end, datetime.datetime(day=5, + month=3, + year=2012)) def test_day_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='day@6') - self.assertEquals(begin, datetime.datetime( - hour=6, - day=4, - month=3, - year=2012)) - self.assertEquals(end, datetime.datetime( - hour=6, - day=5, - month=3, - year=2012)) + self.assertEqual(begin, datetime.datetime(hour=6, + day=4, + month=3, + year=2012)) + self.assertEqual(end, datetime.datetime(hour=6, + day=5, + month=3, + year=2012)) def test_day_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='day@10') - self.assertEquals(begin, datetime.datetime( - hour=10, - day=3, - month=3, - year=2012)) - self.assertEquals(end, datetime.datetime( - hour=10, - day=4, - month=3, - year=2012)) + self.assertEqual(begin, datetime.datetime(hour=10, + day=3, + month=3, + year=2012)) + self.assertEqual(end, datetime.datetime(hour=10, + day=4, + month=3, + year=2012)) def test_month(self): begin, end = utils.last_completed_audit_period(unit='month') - self.assertEquals(begin, datetime.datetime( - day=1, - month=2, - year=2012)) - self.assertEquals(end, datetime.datetime( - day=1, - month=3, - year=2012)) + self.assertEqual(begin, datetime.datetime(day=1, + month=2, + year=2012)) + self.assertEqual(end, datetime.datetime(day=1, + month=3, + year=2012)) def test_month_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='month@2') - self.assertEquals(begin, datetime.datetime( - day=2, - month=2, - year=2012)) - self.assertEquals(end, datetime.datetime( - day=2, - month=3, - year=2012)) + self.assertEqual(begin, datetime.datetime(day=2, + month=2, + year=2012)) + self.assertEqual(end, datetime.datetime(day=2, + month=3, + year=2012)) def test_month_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='month@15') - self.assertEquals(begin, datetime.datetime( - day=15, - month=1, - year=2012)) - self.assertEquals(end, datetime.datetime( - day=15, - month=2, - year=2012)) + self.assertEqual(begin, datetime.datetime(day=15, + month=1, + year=2012)) + self.assertEqual(end, datetime.datetime(day=15, + month=2, + year=2012)) def test_year(self): begin, end = utils.last_completed_audit_period(unit='year') - self.assertEquals(begin, datetime.datetime( - day=1, - month=1, - year=2011)) - self.assertEquals(end, datetime.datetime( - day=1, - month=1, - year=2012)) + self.assertEqual(begin, datetime.datetime(day=1, + month=1, + year=2011)) + self.assertEqual(end, datetime.datetime(day=1, + month=1, + year=2012)) def test_year_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='year@2') - self.assertEquals(begin, datetime.datetime( - day=1, - month=2, - year=2011)) - self.assertEquals(end, datetime.datetime( - day=1, - month=2, - year=2012)) + self.assertEqual(begin, datetime.datetime(day=1, + month=2, + year=2011)) + self.assertEqual(end, datetime.datetime(day=1, + month=2, + year=2012)) def test_year_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='year@6') - self.assertEquals(begin, datetime.datetime( - day=1, - month=6, - year=2010)) - self.assertEquals(end, datetime.datetime( - day=1, - month=6, - year=2011)) + self.assertEqual(begin, datetime.datetime(day=1, + month=6, + year=2010)) + self.assertEqual(end, datetime.datetime(day=1, + month=6, + year=2011)) + + +class FakeSSHClient(object): + + def __init__(self): + self.id = uuid.uuid4() + self.transport = FakeTransport() + + def set_missing_host_key_policy(self, policy): + pass + + def connect(self, ip, port=22, username=None, password=None, + pkey=None, timeout=10): + pass + + def get_transport(self): + return self.transport + + def close(self): + pass + + def __call__(self, *args, **kwargs): + pass + + +class FakeSock(object): + def settimeout(self, timeout): + pass + + +class FakeTransport(object): + + def __init__(self): + self.active = True + self.sock = FakeSock() + + def set_keepalive(self, timeout): + pass + + def is_active(self): + return self.active + + +class SSHPoolTestCase(test.TestCase): + """Unit test for SSH Connection Pool.""" + + def setup(self): + self.mox.StubOutWithMock(paramiko, "SSHClient") + paramiko.SSHClient().AndReturn(FakeSSHClient()) + self.mox.ReplayAll() + + def test_single_ssh_connect(self): + self.setup() + sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", + min_size=1, max_size=1) + with sshpool.item() as ssh: + first_id = ssh.id + + with sshpool.item() as ssh: + second_id = ssh.id + + self.assertEqual(first_id, second_id) + + def test_closed_reopend_ssh_connections(self): + self.setup() + sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", + min_size=1, max_size=2) + with sshpool.item() as ssh: + first_id = ssh.id + with sshpool.item() as ssh: + second_id = ssh.id + # Close the connection and test for a new connection + ssh.get_transport().active = False + + self.assertEqual(first_id, second_id) + + # The mox items are not getting setup in a new pool connection, + # so had to reset and set again. + self.mox.UnsetStubs() + self.setup() + + with sshpool.item() as ssh: + third_id = ssh.id + + self.assertNotEqual(first_id, third_id) + + +class BrickUtils(test.TestCase): + """Unit test to test the brick utility + wrapper functions. + """ + + def test_brick_get_connector_properties(self): + + self.mox.StubOutWithMock(socket, 'gethostname') + socket.gethostname().AndReturn('fakehost') + + self.mox.StubOutWithMock(connector.ISCSIConnector, 'get_initiator') + connector.ISCSIConnector.get_initiator().AndReturn('fakeinitiator') + + self.mox.StubOutWithMock(linuxfc.LinuxFibreChannel, 'get_fc_wwpns') + linuxfc.LinuxFibreChannel.get_fc_wwpns().AndReturn(None) + + self.mox.StubOutWithMock(linuxfc.LinuxFibreChannel, 'get_fc_wwnns') + linuxfc.LinuxFibreChannel.get_fc_wwnns().AndReturn(None) + + props = {'initiator': 'fakeinitiator', + 'host': 'fakehost', + 'ip': CONF.my_ip, + } + + self.mox.ReplayAll() + props_actual = utils.brick_get_connector_properties() + self.assertEqual(props, props_actual) + self.mox.VerifyAll() + + def test_brick_get_connector(self): + + root_helper = utils.get_root_helper() + + self.mox.StubOutClassWithMocks(connector, 'ISCSIConnector') + connector.ISCSIConnector(execute=putils.execute, + driver=None, + root_helper=root_helper, + use_multipath=False, + device_scan_attempts=3) + + self.mox.StubOutClassWithMocks(connector, 'FibreChannelConnector') + connector.FibreChannelConnector(execute=putils.execute, + driver=None, + root_helper=root_helper, + use_multipath=False, + device_scan_attempts=3) + + self.mox.StubOutClassWithMocks(connector, 'AoEConnector') + connector.AoEConnector(execute=putils.execute, + driver=None, + root_helper=root_helper, + device_scan_attempts=3) + + self.mox.StubOutClassWithMocks(connector, 'LocalConnector') + connector.LocalConnector(execute=putils.execute, + driver=None, + root_helper=root_helper, + device_scan_attempts=3) + + self.mox.ReplayAll() + utils.brick_get_connector('iscsi') + utils.brick_get_connector('fibre_channel') + utils.brick_get_connector('aoe') + utils.brick_get_connector('local') + self.mox.VerifyAll() diff --git a/cinder/tests/test_versions.py b/cinder/tests/test_versions.py deleted file mode 100644 index c7a88c5a3e..0000000000 --- a/cinder/tests/test_versions.py +++ /dev/null @@ -1,59 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 Ken Pepple -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from cinder import test -from cinder import version - - -class VersionTestCase(test.TestCase): - """Test cases for Versions code""" - def setUp(self): - """setup test with unchanging values""" - super(VersionTestCase, self).setUp() - self.version = version - self.version.FINAL = False - self.version.CINDER_VERSION = ['2012', '10'] - self.version.YEAR, self.version.COUNT = self.version.CINDER_VERSION - self.version.version_info = {'branch_nick': u'LOCALBRANCH', - 'revision_id': 'LOCALREVISION', - 'revno': 0} - - def test_version_string_is_good(self): - """Ensure version string works""" - self.assertEqual("2012.10-dev", self.version.version_string()) - - def test_canonical_version_string_is_good(self): - """Ensure canonical version works""" - self.assertEqual("2012.10", self.version.canonical_version_string()) - - def test_final_version_strings_are_identical(self): - """Ensure final version strings match only at release""" - self.assertNotEqual(self.version.canonical_version_string(), - self.version.version_string()) - self.version.FINAL = True - self.assertEqual(self.version.canonical_version_string(), - self.version.version_string()) - - def test_vcs_version_string_is_good(self): - """Ensure uninstalled code generates local """ - self.assertEqual("LOCALBRANCH:LOCALREVISION", - self.version.vcs_version_string()) - - def test_version_string_with_vcs_is_good(self): - """Ensure uninstalled code get version string""" - self.assertEqual("2012.10-LOCALBRANCH:LOCALREVISION", - self.version.version_string_with_vcs()) diff --git a/cinder/tests/test_vmware_vmdk.py b/cinder/tests/test_vmware_vmdk.py new file mode 100644 index 0000000000..3642454f4c --- /dev/null +++ b/cinder/tests/test_vmware_vmdk.py @@ -0,0 +1,2001 @@ +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test suite for VMware VMDK driver. +""" + +import mox + +from cinder import exception +from cinder.image import glance +from cinder import test +from cinder import units +from cinder.volume import configuration +from cinder.volume.drivers.vmware import api +from cinder.volume.drivers.vmware import error_util +from cinder.volume.drivers.vmware import vim +from cinder.volume.drivers.vmware import vim_util +from cinder.volume.drivers.vmware import vmdk +from cinder.volume.drivers.vmware import vmware_images +from cinder.volume.drivers.vmware import volumeops + + +class FakeVim(object): + @property + def service_content(self): + return mox.MockAnything() + + @property + def client(self): + return mox.MockAnything() + + def Login(self, session_manager, userName, password): + return mox.MockAnything() + + +class FakeTaskInfo(object): + def __init__(self, state, result=None): + self.state = state + self.result = result + + class FakeError(object): + def __init__(self): + self.localizedMessage = None + + self.error = FakeError() + + +class FakeMor(object): + def __init__(self, type, val): + self._type = type + self.value = val + + +class FakeObject(object): + def __init__(self): + self._fields = {} + + def __setitem__(self, key, value): + self._fields[key] = value + + def __getitem__(self, item): + return self._fields[item] + + +class FakeManagedObjectReference(object): + def __init__(self, lis=[]): + self.ManagedObjectReference = lis + + +class FakeDatastoreSummary(object): + def __init__(self, freeSpace, capacity, datastore=None, name=None): + self.freeSpace = freeSpace + self.capacity = capacity + self.datastore = datastore + self.name = name + + +class FakeSnapshotTree(object): + def __init__(self, tree=None, name=None, + snapshot=None, childSnapshotList=None): + self.rootSnapshotList = tree + self.name = name + self.snapshot = snapshot + self.childSnapshotList = childSnapshotList + + +class FakeElem(object): + def __init__(self, prop_set=None): + self.propSet = prop_set + + +class FakeProp(object): + def __init__(self, name=None, val=None): + self.name = name + self.val = val + + +class FakeRetrieveResult(object): + def __init__(self, objects, token): + self.objects = objects + self.token = token + + +class FakeObj(object): + def __init__(self, obj=None): + self.obj = obj + + +class VMwareEsxVmdkDriverTestCase(test.TestCase): + """Test class for VMwareEsxVmdkDriver.""" + + IP = 'localhost' + USERNAME = 'username' + PASSWORD = 'password' + VOLUME_FOLDER = 'cinder-volumes' + API_RETRY_COUNT = 3 + TASK_POLL_INTERVAL = 5.0 + IMG_TX_TIMEOUT = 10 + MAX_OBJECTS = 100 + + def setUp(self): + super(VMwareEsxVmdkDriverTestCase, self).setUp() + self._config = mox.MockObject(configuration.Configuration) + self._config.append_config_values(mox.IgnoreArg()) + self._config.vmware_host_ip = self.IP + self._config.vmware_host_username = self.USERNAME + self._config.vmware_host_password = self.PASSWORD + self._config.vmware_wsdl_location = None + self._config.vmware_volume_folder = self.VOLUME_FOLDER + self._config.vmware_api_retry_count = self.API_RETRY_COUNT + self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL + self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT + self._config.vmware_max_objects_retrieval = self.MAX_OBJECTS + self._driver = vmdk.VMwareEsxVmdkDriver(configuration=self._config) + api_retry_count = self._config.vmware_api_retry_count, + task_poll_interval = self._config.vmware_task_poll_interval, + self._session = api.VMwareAPISession(self.IP, self.USERNAME, + self.PASSWORD, api_retry_count, + task_poll_interval, + create_session=False) + self._volumeops = volumeops.VMwareVolumeOps(self._session, + self.MAX_OBJECTS) + self._vim = FakeVim() + + def test_retry(self): + """Test Retry.""" + + class TestClass(object): + + def __init__(self): + self.counter1 = 0 + + @api.Retry(max_retry_count=2, inc_sleep_time=0.001, + exceptions=(Exception)) + def fail(self): + self.counter1 += 1 + raise exception.CinderException('Fail') + + test_obj = TestClass() + self.assertRaises(exception.CinderException, test_obj.fail) + self.assertEqual(test_obj.counter1, 3) + + def test_create_session(self): + """Test create_session.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.ReplayAll() + self._session.create_session() + m.UnsetStubs() + m.VerifyAll() + + def test_do_setup(self): + """Test do_setup.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'session') + self._driver.session = self._session + m.ReplayAll() + self._driver.do_setup(mox.IgnoreArg()) + m.UnsetStubs() + m.VerifyAll() + + def test_check_for_setup_error(self): + """Test check_for_setup_error.""" + self._driver.check_for_setup_error() + + def test_get_volume_stats(self): + """Test get_volume_stats.""" + stats = self._driver.get_volume_stats() + self.assertEqual(stats['vendor_name'], 'VMware') + self.assertEqual(stats['driver_version'], '1.1.0') + self.assertEqual(stats['storage_protocol'], 'LSI Logic SCSI') + self.assertEqual(stats['reserved_percentage'], 0) + self.assertEqual(stats['total_capacity_gb'], 'unknown') + self.assertEqual(stats['free_capacity_gb'], 'unknown') + + def test_create_volume(self): + """Test create_volume.""" + self._driver.create_volume(mox.IgnoreArg()) + + def test_success_wait_for_task(self): + """Test successful wait_for_task.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + result = FakeMor('VirtualMachine', 'my_vm') + success_task_info = FakeTaskInfo('success', result=result) + m.StubOutWithMock(vim_util, 'get_object_property') + vim_util.get_object_property(self._session.vim, + mox.IgnoreArg(), + 'info').AndReturn(success_task_info) + + m.ReplayAll() + ret = self._session.wait_for_task(mox.IgnoreArg()) + self.assertEqual(ret.result, result) + m.UnsetStubs() + m.VerifyAll() + + def test_failed_wait_for_task(self): + """Test failed wait_for_task.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + failed_task_info = FakeTaskInfo('failed') + m.StubOutWithMock(vim_util, 'get_object_property') + vim_util.get_object_property(self._session.vim, + mox.IgnoreArg(), + 'info').AndReturn(failed_task_info) + + m.ReplayAll() + self.assertRaises(error_util.VimFaultException, + self._session.wait_for_task, + mox.IgnoreArg()) + m.UnsetStubs() + m.VerifyAll() + + def test_continue_retrieval(self): + """Test continue_retrieval.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + self._session.invoke_api(vim_util, 'continue_retrieval', + self._vim, mox.IgnoreArg()) + + m.ReplayAll() + self._volumeops.continue_retrieval(mox.IgnoreArg()) + m.UnsetStubs() + m.VerifyAll() + + def test_cancel_retrieval(self): + """Test cancel_retrieval.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + self._session.invoke_api(vim_util, 'cancel_retrieval', + self._vim, mox.IgnoreArg()) + + m.ReplayAll() + self._volumeops.cancel_retrieval(mox.IgnoreArg()) + m.UnsetStubs() + m.VerifyAll() + + def test_get_backing(self): + """Test get_backing.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + self._session.invoke_api(vim_util, 'get_objects', + self._vim, 'VirtualMachine', + self.MAX_OBJECTS) + + m.ReplayAll() + self._volumeops.get_backing(mox.IgnoreArg()) + m.UnsetStubs() + m.VerifyAll() + + def test_get_backing_multiple_retrieval(self): + """Test get_backing with multiple retrieval.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + retrieve_result = FakeRetrieveResult([], 'my_token') + self._session.invoke_api(vim_util, 'get_objects', + self._vim, 'VirtualMachine', + self.MAX_OBJECTS).AndReturn(retrieve_result) + m.StubOutWithMock(self._volumeops, 'cancel_retrieval') + self._volumeops.continue_retrieval(retrieve_result) + + m.ReplayAll() + self._volumeops.get_backing(mox.IgnoreArg()) + m.UnsetStubs() + m.VerifyAll() + + def test_delete_backing(self): + """Test delete_backing.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + backing = FakeMor('VirtualMachine', 'my_vm') + self._session.invoke_api(self._vim, 'Destroy_Task', backing) + m.StubOutWithMock(self._session, 'wait_for_task') + self._session.wait_for_task(mox.IgnoreArg()) + + m.ReplayAll() + self._volumeops.delete_backing(backing) + m.UnsetStubs() + m.VerifyAll() + + def test_delete_volume_without_backing(self): + """Test delete_volume without backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + self._volumeops.get_backing('hello_world').AndReturn(None) + + m.ReplayAll() + volume = FakeObject() + volume['name'] = 'hello_world' + self._driver.delete_volume(volume) + m.UnsetStubs() + m.VerifyAll() + + def test_delete_volume_with_backing(self): + """Test delete_volume with backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + + backing = FakeMor('VirtualMachine', 'my_vm') + task = FakeMor('Task', 'my_task') + + m.StubOutWithMock(self._volumeops, 'get_backing') + m.StubOutWithMock(self._volumeops, 'delete_backing') + self._volumeops.get_backing('hello_world').AndReturn(backing) + self._volumeops.delete_backing(backing) + + m.ReplayAll() + volume = FakeObject() + volume['name'] = 'hello_world' + self._driver.delete_volume(volume) + m.UnsetStubs() + m.VerifyAll() + + def test_create_export(self): + """Test create_export.""" + self._driver.create_export(mox.IgnoreArg(), mox.IgnoreArg()) + + def test_ensure_export(self): + """Test ensure_export.""" + self._driver.ensure_export(mox.IgnoreArg(), mox.IgnoreArg()) + + def test_remove_export(self): + """Test remove_export.""" + self._driver.remove_export(mox.IgnoreArg(), mox.IgnoreArg()) + + def test_terminate_connection(self): + """Test terminate_connection.""" + self._driver.terminate_connection(mox.IgnoreArg(), mox.IgnoreArg(), + force=mox.IgnoreArg()) + + def test_get_host(self): + """Test get_host.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + instance = FakeObject() + self._session.invoke_api(vim_util, 'get_object_property', + self._vim, instance, 'runtime.host') + + m.ReplayAll() + self._volumeops.get_host(instance) + m.UnsetStubs() + m.VerifyAll() + + def test_get_hosts(self): + """Test get_hosts.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + self._session.invoke_api(vim_util, 'get_objects', self._vim, + 'HostSystem', self.MAX_OBJECTS) + + m.ReplayAll() + self._volumeops.get_hosts() + m.UnsetStubs() + m.VerifyAll() + + def _create_host_mounts(self, access_mode, host, set_accessible=True, + is_accessible=True, mounted=True): + """Create host mount value of datastore with single mount info. + + :param access_mode: string specifying the read/write permission + :param set_accessible: specify whether accessible property + should be set + :param is_accessible: boolean specifying whether the datastore + is accessible to host + :param host: managed object reference of the connected + host + :return: list of host mount info + """ + mntInfo = FakeObject() + mntInfo.accessMode = access_mode + if set_accessible: + mntInfo.accessible = is_accessible + mntInfo.mounted = mounted + + host_mount = FakeObject() + host_mount.key = host + host_mount.mountInfo = mntInfo + host_mounts = FakeObject() + host_mounts.DatastoreHostMount = [host_mount] + + return host_mounts + + def test_is_valid_with_accessible_attr(self): + """Test _is_valid with accessible attribute.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + + datastore = FakeMor('Datastore', 'my_ds') + host = FakeMor('HostSystem', "my_host") + host_mounts = self._create_host_mounts("readWrite", host) + + self._session.invoke_api(vim_util, 'get_object_property', + self._vim, datastore, + 'host').AndReturn(host_mounts) + + m.ReplayAll() + self.assertTrue(self._volumeops._is_valid(datastore, host)) + m.UnsetStubs() + m.VerifyAll() + + def test_is_valid_without_accessible_attr(self): + """Test _is_valid without accessible attribute.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + + datastore = FakeMor('Datastore', 'my_ds') + host = FakeMor('HostSystem', "my_host") + host_mounts = self._create_host_mounts("readWrite", host, False) + + self._session.invoke_api(vim_util, 'get_object_property', + self._vim, datastore, + 'host').AndReturn(host_mounts) + m.StubOutWithMock(self._volumeops, 'get_summary') + summary = FakeObject() + summary.accessible = True + self._volumeops.get_summary(datastore).AndReturn(summary) + + m.ReplayAll() + self.assertTrue(self._volumeops._is_valid(datastore, host)) + m.UnsetStubs() + m.VerifyAll() + + def test_get_dss_rp(self): + """Test get_dss_rp.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + + datastore = FakeMor('Datastore', 'my_ds') + datastore_prop = FakeProp(name='datastore', + val=FakeManagedObjectReference([datastore])) + + compute_resource = FakeMor('ClusterComputeResource', 'my_cluster') + compute_resource_prop = FakeProp(name='parent', val=compute_resource) + + props = [FakeElem(prop_set=[datastore_prop, compute_resource_prop])] + host = FakeMor('HostSystem', "my_host") + self._session.invoke_api(vim_util, 'get_object_properties', + self._vim, host, + ['datastore', 'parent']).AndReturn(props) + + host_mounts = self._create_host_mounts("readWrite", host) + self._session.invoke_api(vim_util, 'get_object_property', + self._vim, datastore, + 'host').AndReturn(host_mounts) + + resource_pool = FakeMor('ResourcePool', 'my_res_pool') + self._session.invoke_api(vim_util, 'get_object_property', + self._vim, compute_resource, + 'resourcePool').AndReturn(resource_pool) + + m.ReplayAll() + (datastores_ret, resource_pool_ret) = self._volumeops.get_dss_rp(host) + self.assertTrue(len(datastores_ret) == 1) + self.assertEqual(datastores_ret[0], datastore) + self.assertEqual(resource_pool_ret, resource_pool) + + m.UnsetStubs() + m.VerifyAll() + + def test_get_dss_rp_without_datastores(self): + """Test get_dss_rp without datastores.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + + host = FakeObject() + self._session.invoke_api(vim_util, 'get_object_properties', + self._vim, host, + ['datastore', 'parent']).AndReturn([]) + self._session.invoke_api(vim_util, 'get_object_property', + self._vim, mox.IgnoreArg(), 'resourcePool') + + m.ReplayAll() + self.assertRaises(error_util.VimException, self._volumeops.get_dss_rp, + host) + m.UnsetStubs() + m.VerifyAll() + + def test_get_parent(self): + """Test get_parent.""" + # Not recursive + child = FakeMor('Parent', 'my_parent') + parent = self._volumeops._get_parent(child, 'Parent') + self.assertEqual(parent, child) + + # Recursive + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + parent = FakeMor('Parent', 'my_parent1') + child = FakeMor('Child', 'my_child') + self._session.invoke_api(vim_util, 'get_object_property', self._vim, + child, 'parent').AndReturn(parent) + + m.ReplayAll() + ret = self._volumeops._get_parent(child, 'Parent') + self.assertEqual(ret, parent) + m.UnsetStubs() + m.VerifyAll() + + def test_get_dc(self): + """Test get_dc.""" + m = self.mox + m.StubOutWithMock(self._volumeops, '_get_parent') + self._volumeops._get_parent(mox.IgnoreArg(), 'Datacenter') + + m.ReplayAll() + self._volumeops.get_dc(mox.IgnoreArg()) + m.UnsetStubs() + m.VerifyAll() + + def test_get_vmfolder(self): + """Test get_vmfolder.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + datacenter = FakeMor('Datacenter', 'my_dc') + self._session.invoke_api(vim_util, 'get_object_property', self._vim, + datacenter, 'vmFolder') + + m.ReplayAll() + dc = self._volumeops.get_vmfolder(datacenter) + m.UnsetStubs() + m.VerifyAll() + + def test_create_backing(self): + """Test create_backing.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + folder = FakeMor('Folder', 'my_fol') + resource_pool = FakeMor('ResourcePool', 'my_rs') + host = FakeMor('HostSystem', 'my_host') + task = FakeMor('Task', 'my_task') + self._session.invoke_api(self._vim, 'CreateVM_Task', folder, + config=mox.IgnoreArg(), pool=resource_pool, + host=host).AndReturn(task) + m.StubOutWithMock(self._session, 'wait_for_task') + task_info = FakeTaskInfo('success', mox.IgnoreArg()) + self._session.wait_for_task(task).AndReturn(task_info) + name = 'my_vm' + size_kb = 1 * units.MiB + disk_type = 'thick' + ds_name = 'my_ds' + m.StubOutWithMock(self._volumeops, '_get_create_spec') + self._volumeops._get_create_spec(name, size_kb, disk_type, ds_name) + + m.ReplayAll() + self._volumeops.create_backing(name, size_kb, disk_type, folder, + resource_pool, host, ds_name) + m.UnsetStubs() + m.VerifyAll() + + def test_create_backing_in_inventory_multi_hosts(self): + """Test _create_backing_in_inventory scanning multiple hosts.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + host1 = FakeObj(obj=FakeMor('HostSystem', 'my_host1')) + host2 = FakeObj(obj=FakeMor('HostSystem', 'my_host2')) + retrieve_result = FakeRetrieveResult([host1, host2], None) + m.StubOutWithMock(self._volumeops, 'get_hosts') + self._volumeops.get_hosts().AndReturn(retrieve_result) + m.StubOutWithMock(self._driver, '_create_backing') + volume = FakeObject() + volume['name'] = 'vol_name' + backing = FakeMor('VirtualMachine', 'my_back') + mux = self._driver._create_backing(volume, host1.obj) + mux.AndRaise(error_util.VimException('Maintenance mode')) + mux = self._driver._create_backing(volume, host2.obj) + mux.AndReturn(backing) + m.StubOutWithMock(self._volumeops, 'cancel_retrieval') + self._volumeops.cancel_retrieval(retrieve_result) + m.StubOutWithMock(self._volumeops, 'continue_retrieval') + + m.ReplayAll() + result = self._driver._create_backing_in_inventory(volume) + self.assertEqual(result, backing) + m.UnsetStubs() + m.VerifyAll() + + def test_get_datastore(self): + """Test get_datastore.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + backing = FakeMor('VirtualMachine', 'my_back') + datastore = FakeMor('Datastore', 'my_ds') + datastores = FakeManagedObjectReference([datastore]) + self._session.invoke_api(vim_util, 'get_object_property', self._vim, + backing, 'datastore').AndReturn(datastores) + + m.ReplayAll() + result = self._volumeops.get_datastore(backing) + self.assertEqual(result, datastore) + m.UnsetStubs() + m.VerifyAll() + + def test_get_summary(self): + """Test get_summary.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + datastore = FakeMor('Datastore', 'my_ds') + self._session.invoke_api(vim_util, 'get_object_property', self._vim, + datastore, 'summary') + + m.ReplayAll() + self._volumeops.get_summary(datastore) + m.UnsetStubs() + m.VerifyAll() + + def test_init_conn_with_instance_and_backing(self): + """Test initialize_connection with instance and backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + volume = FakeObject() + volume['name'] = 'volume_name' + volume['id'] = 'volume_id' + volume['size'] = 1 + connector = {'instance': 'my_instance'} + backing = FakeMor('VirtualMachine', 'my_back') + self._volumeops.get_backing(volume['name']).AndReturn(backing) + m.StubOutWithMock(self._volumeops, 'get_host') + host = FakeMor('HostSystem', 'my_host') + self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host) + + m.ReplayAll() + conn_info = self._driver.initialize_connection(volume, connector) + self.assertEqual(conn_info['driver_volume_type'], 'vmdk') + self.assertEqual(conn_info['data']['volume'], 'my_back') + self.assertEqual(conn_info['data']['volume_id'], 'volume_id') + m.UnsetStubs() + m.VerifyAll() + + def test_get_volume_group_folder(self): + """Test _get_volume_group_folder.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + datacenter = FakeMor('Datacenter', 'my_dc') + m.StubOutWithMock(self._volumeops, 'get_vmfolder') + self._volumeops.get_vmfolder(datacenter) + + m.ReplayAll() + self._driver._get_volume_group_folder(datacenter) + m.UnsetStubs() + m.VerifyAll() + + def test_select_datastore_summary(self): + """Test _select_datastore_summary.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + + datastore1 = FakeMor('Datastore', 'my_ds_1') + datastore2 = FakeMor('Datastore', 'my_ds_2') + datastore3 = FakeMor('Datastore', 'my_ds_3') + datastore4 = FakeMor('Datastore', 'my_ds_4') + datastores = [datastore1, datastore2, datastore3, datastore4] + + m.StubOutWithMock(self._volumeops, 'get_summary') + summary1 = FakeDatastoreSummary(5, 100) + summary2 = FakeDatastoreSummary(25, 100) + summary3 = FakeDatastoreSummary(50, 100) + summary4 = FakeDatastoreSummary(75, 100) + + self._volumeops.get_summary( + datastore1).MultipleTimes().AndReturn(summary1) + self._volumeops.get_summary( + datastore2).MultipleTimes().AndReturn(summary2) + self._volumeops.get_summary( + datastore3).MultipleTimes().AndReturn(summary3) + self._volumeops.get_summary( + datastore4).MultipleTimes().AndReturn(summary4) + + m.StubOutWithMock(self._volumeops, 'get_connected_hosts') + + host1 = FakeMor('HostSystem', 'my_host_1') + host2 = FakeMor('HostSystem', 'my_host_2') + host3 = FakeMor('HostSystem', 'my_host_3') + host4 = FakeMor('HostSystem', 'my_host_4') + + self._volumeops.get_connected_hosts( + datastore1).MultipleTimes().AndReturn([host1, host2, host3, host4]) + self._volumeops.get_connected_hosts( + datastore2).MultipleTimes().AndReturn([host1, host2, host3]) + self._volumeops.get_connected_hosts( + datastore3).MultipleTimes().AndReturn([host1, host2]) + self._volumeops.get_connected_hosts( + datastore4).MultipleTimes().AndReturn([host1, host2]) + + m.ReplayAll() + + summary = self._driver._select_datastore_summary(1, datastores) + self.assertEqual(summary, summary1) + + summary = self._driver._select_datastore_summary(10, datastores) + self.assertEqual(summary, summary2) + + summary = self._driver._select_datastore_summary(40, datastores) + self.assertEqual(summary, summary4) + + self.assertRaises(error_util.VimException, + self._driver._select_datastore_summary, + 100, datastores) + m.UnsetStubs() + m.VerifyAll() + + def test_get_folder_ds_summary(self): + """Test _get_folder_ds_summary.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + size = 1 + resource_pool = FakeMor('ResourcePool', 'my_rp') + datacenter = FakeMor('Datacenter', 'my_dc') + m.StubOutWithMock(self._volumeops, 'get_dc') + self._volumeops.get_dc(resource_pool).AndReturn(datacenter) + m.StubOutWithMock(self._driver, '_get_volume_group_folder') + folder = FakeMor('Folder', 'my_fol') + self._driver._get_volume_group_folder(datacenter).AndReturn(folder) + m.StubOutWithMock(self._driver, '_select_datastore_summary') + size = 1 + datastores = [FakeMor('Datastore', 'my_ds')] + self._driver._select_datastore_summary(size * units.GiB, datastores) + + m.ReplayAll() + self._driver._get_folder_ds_summary(size, resource_pool, datastores) + m.UnsetStubs() + m.VerifyAll() + + def test_get_disk_type(self): + """Test _get_disk_type.""" + volume = FakeObject() + volume['volume_type_id'] = None + self.assertEqual(vmdk.VMwareEsxVmdkDriver._get_disk_type(volume), + 'thin') + + def test_init_conn_with_instance_no_backing(self): + """Test initialize_connection with instance and without backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + volume = FakeObject() + volume['name'] = 'volume_name' + volume['id'] = 'volume_id' + volume['size'] = 1 + volume['volume_type_id'] = None + connector = {'instance': 'my_instance'} + self._volumeops.get_backing(volume['name']) + m.StubOutWithMock(self._volumeops, 'get_host') + host = FakeMor('HostSystem', 'my_host') + self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host) + m.StubOutWithMock(self._volumeops, 'get_dss_rp') + resource_pool = FakeMor('ResourcePool', 'my_rp') + datastores = [FakeMor('Datastore', 'my_ds')] + self._volumeops.get_dss_rp(host).AndReturn((datastores, resource_pool)) + m.StubOutWithMock(self._driver, '_get_folder_ds_summary') + folder = FakeMor('Folder', 'my_fol') + summary = FakeDatastoreSummary(1, 1) + self._driver._get_folder_ds_summary(volume['size'], resource_pool, + datastores).AndReturn((folder, + summary)) + backing = FakeMor('VirtualMachine', 'my_back') + m.StubOutWithMock(self._volumeops, 'create_backing') + self._volumeops.create_backing(volume['name'], + volume['size'] * units.MiB, + mox.IgnoreArg(), folder, + resource_pool, host, + mox.IgnoreArg()).AndReturn(backing) + + m.ReplayAll() + conn_info = self._driver.initialize_connection(volume, connector) + self.assertEqual(conn_info['driver_volume_type'], 'vmdk') + self.assertEqual(conn_info['data']['volume'], 'my_back') + self.assertEqual(conn_info['data']['volume_id'], 'volume_id') + m.UnsetStubs() + m.VerifyAll() + + def test_init_conn_without_instance(self): + """Test initialize_connection without instance and a backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + backing = FakeMor('VirtualMachine', 'my_back') + volume = FakeObject() + volume['name'] = 'volume_name' + volume['id'] = 'volume_id' + connector = {} + self._volumeops.get_backing(volume['name']).AndReturn(backing) + + m.ReplayAll() + conn_info = self._driver.initialize_connection(volume, connector) + self.assertEqual(conn_info['driver_volume_type'], 'vmdk') + self.assertEqual(conn_info['data']['volume'], 'my_back') + self.assertEqual(conn_info['data']['volume_id'], 'volume_id') + m.UnsetStubs() + m.VerifyAll() + + def test_create_snapshot_operation(self): + """Test volumeops.create_snapshot.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + name = 'snapshot_name' + description = 'snapshot_desc' + backing = FakeMor('VirtualMachine', 'my_back') + task = FakeMor('Task', 'my_task') + self._session.invoke_api(self._vim, 'CreateSnapshot_Task', backing, + name=name, description=description, + memory=False, quiesce=False).AndReturn(task) + result = FakeMor('VirtualMachineSnapshot', 'my_snap') + success_task_info = FakeTaskInfo('success', result=result) + m.StubOutWithMock(self._session, 'wait_for_task') + self._session.wait_for_task(task).AndReturn(success_task_info) + + m.ReplayAll() + self._volumeops.create_snapshot(backing, name, description) + m.UnsetStubs() + m.VerifyAll() + + def test_create_snapshot_without_backing(self): + """Test vmdk.create_snapshot without backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + snapshot = FakeObject() + snapshot['volume_name'] = 'volume_name' + snapshot['name'] = 'snap_name' + snapshot['volume'] = FakeObject() + snapshot['volume']['status'] = 'available' + self._volumeops.get_backing(snapshot['volume_name']) + + m.ReplayAll() + self._driver.create_snapshot(snapshot) + m.UnsetStubs() + m.VerifyAll() + + def test_create_snapshot_with_backing(self): + """Test vmdk.create_snapshot with backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + snapshot = FakeObject() + snapshot['volume_name'] = 'volume_name' + snapshot['name'] = 'snapshot_name' + snapshot['display_description'] = 'snapshot_desc' + snapshot['volume'] = FakeObject() + snapshot['volume']['status'] = 'available' + backing = FakeMor('VirtualMachine', 'my_back') + self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing) + m.StubOutWithMock(self._volumeops, 'create_snapshot') + self._volumeops.create_snapshot(backing, snapshot['name'], + snapshot['display_description']) + + m.ReplayAll() + self._driver.create_snapshot(snapshot) + m.UnsetStubs() + m.VerifyAll() + + def test_create_snapshot_when_attached(self): + """Test vmdk.create_snapshot when volume is attached.""" + snapshot = FakeObject() + snapshot['volume'] = FakeObject() + snapshot['volume']['status'] = 'in-use' + + self.assertRaises(exception.InvalidVolume, + self._driver.create_snapshot, snapshot) + + def test_get_snapshot_from_tree(self): + """Test _get_snapshot_from_tree.""" + volops = volumeops.VMwareVolumeOps + ret = volops._get_snapshot_from_tree(mox.IgnoreArg(), None) + self.assertIsNone(ret) + name = 'snapshot_name' + snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap') + root = FakeSnapshotTree(name='snapshot_name', snapshot=snapshot) + ret = volops._get_snapshot_from_tree(name, root) + self.assertEqual(ret, snapshot) + snapshot1 = FakeMor('VirtualMachineSnapshot', 'my_snap_1') + root = FakeSnapshotTree(name='snapshot_name_1', snapshot=snapshot1, + childSnapshotList=[root]) + ret = volops._get_snapshot_from_tree(name, root) + self.assertEqual(ret, snapshot) + + def test_get_snapshot(self): + """Test get_snapshot.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + name = 'snapshot_name' + backing = FakeMor('VirtualMachine', 'my_back') + root = FakeSnapshotTree() + tree = FakeSnapshotTree(tree=[root]) + self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, backing, + 'snapshot').AndReturn(tree) + volops = volumeops.VMwareVolumeOps + m.StubOutWithMock(volops, '_get_snapshot_from_tree') + volops._get_snapshot_from_tree(name, root) + + m.ReplayAll() + self._volumeops.get_snapshot(backing, name) + m.UnsetStubs() + m.VerifyAll() + + def test_delete_snapshot_not_present(self): + """Test volumeops.delete_snapshot, when not present.""" + m = self.mox + m.StubOutWithMock(self._volumeops, 'get_snapshot') + name = 'snapshot_name' + backing = FakeMor('VirtualMachine', 'my_back') + self._volumeops.get_snapshot(backing, name) + + m.ReplayAll() + self._volumeops.delete_snapshot(backing, name) + m.UnsetStubs() + m.VerifyAll() + + def test_delete_snapshot_when_present(self): + """Test volumeops.delete_snapshot, when it is present.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + m.StubOutWithMock(self._volumeops, 'get_snapshot') + name = 'snapshot_name' + backing = FakeMor('VirtualMachine', 'my_back') + snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap') + self._volumeops.get_snapshot(backing, name).AndReturn(snapshot) + task = FakeMor('Task', 'my_task') + self._session.invoke_api(self._session.vim, + 'RemoveSnapshot_Task', snapshot, + removeChildren=False).AndReturn(task) + m.StubOutWithMock(self._session, 'wait_for_task') + self._session.wait_for_task(task) + + m.ReplayAll() + self._volumeops.delete_snapshot(backing, name) + m.UnsetStubs() + m.VerifyAll() + + def test_delete_snapshot_without_backing(self): + """Test delete_snapshot without backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + snapshot = FakeObject() + snapshot['volume_name'] = 'volume_name' + snapshot['name'] = 'snap_name' + snapshot['volume'] = FakeObject() + snapshot['volume']['status'] = 'available' + self._volumeops.get_backing(snapshot['volume_name']) + + m.ReplayAll() + self._driver.delete_snapshot(snapshot) + m.UnsetStubs() + m.VerifyAll() + + def test_delete_snapshot_with_backing(self): + """Test delete_snapshot with backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + snapshot = FakeObject() + snapshot['name'] = 'snapshot_name' + snapshot['volume_name'] = 'volume_name' + snapshot['name'] = 'snap_name' + snapshot['volume'] = FakeObject() + snapshot['volume']['status'] = 'available' + backing = FakeMor('VirtualMachine', 'my_back') + self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing) + m.StubOutWithMock(self._volumeops, 'delete_snapshot') + self._volumeops.delete_snapshot(backing, + snapshot['name']) + + m.ReplayAll() + self._driver.delete_snapshot(snapshot) + m.UnsetStubs() + m.VerifyAll() + + def test_delete_snapshot_when_attached(self): + """Test delete_snapshot when volume is attached.""" + snapshot = FakeObject() + snapshot['volume'] = FakeObject() + snapshot['volume']['status'] = 'in-use' + + self.assertRaises(exception.InvalidVolume, + self._driver.delete_snapshot, snapshot) + + def test_create_cloned_volume_without_backing(self): + """Test create_cloned_volume without a backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + volume = FakeObject() + volume['name'] = 'volume_name' + volume['status'] = 'available' + src_vref = FakeObject() + src_vref['name'] = 'src_volume_name' + self._volumeops.get_backing(src_vref['name']) + + m.ReplayAll() + self._driver.create_cloned_volume(volume, src_vref) + m.UnsetStubs() + m.VerifyAll() + + def test_get_path_name(self): + """Test get_path_name.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + backing = FakeMor('VirtualMachine', 'my_back') + + class FakePath(object): + def __init__(self, path=None): + self.vmPathName = path + + path = FakePath() + self._session.invoke_api(vim_util, 'get_object_property', self._vim, + backing, 'config.files').AndReturn(path) + + m.ReplayAll() + self._volumeops.get_path_name(backing) + m.UnsetStubs() + m.VerifyAll() + + def test_delete_file(self): + """Test _delete_file.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + src_path = 'src_path' + task = FakeMor('Task', 'my_task') + self._session.invoke_api(self._vim, 'DeleteDatastoreFile_Task', + mox.IgnoreArg(), name=src_path, + datacenter=mox.IgnoreArg()).AndReturn(task) + m.StubOutWithMock(self._session, 'wait_for_task') + self._session.wait_for_task(task) + + m.ReplayAll() + self._volumeops.delete_file(src_path) + m.UnsetStubs() + m.VerifyAll() + + def test_clone_backing_by_copying(self): + """Test _clone_backing_by_copying.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + volume = FakeObject() + src_vmdk_path = "[datastore] src_vm/src_vm.vmdk" + new_vmdk_path = "[datastore] dest_vm/dest_vm.vmdk" + backing = FakeMor('VirtualMachine', 'my_back') + m.StubOutWithMock(self._driver, '_create_backing_in_inventory') + mux = self._driver._create_backing_in_inventory(volume) + mux.AndReturn(backing) + m.StubOutWithMock(self._volumeops, 'get_vmdk_path') + self._volumeops.get_vmdk_path(backing).AndReturn(new_vmdk_path) + m.StubOutWithMock(self._volumeops, 'get_dc') + datacenter = FakeMor('Datacenter', 'my_dc') + self._volumeops.get_dc(backing).AndReturn(datacenter) + m.StubOutWithMock(self._volumeops, 'delete_vmdk_file') + self._volumeops.delete_vmdk_file(new_vmdk_path, datacenter) + m.StubOutWithMock(self._volumeops, 'copy_vmdk_file') + self._volumeops.copy_vmdk_file(datacenter, src_vmdk_path, + new_vmdk_path) + + m.ReplayAll() + self._driver._clone_backing_by_copying(volume, src_vmdk_path) + m.UnsetStubs() + m.VerifyAll() + + def test_create_cloned_volume_with_backing(self): + """Test create_cloned_volume with a backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + volume = FakeObject() + src_vref = FakeObject() + src_vref['name'] = 'src_snapshot_name' + backing = FakeMor('VirtualMachine', 'my_vm') + self._volumeops.get_backing(src_vref['name']).AndReturn(backing) + m.StubOutWithMock(self._volumeops, 'get_vmdk_path') + src_vmdk_path = "[datastore] src_vm/src_vm.vmdk" + self._volumeops.get_vmdk_path(backing).AndReturn(src_vmdk_path) + m.StubOutWithMock(self._driver, '_clone_backing_by_copying') + self._driver._clone_backing_by_copying(volume, src_vmdk_path) + + m.ReplayAll() + self._driver.create_cloned_volume(volume, src_vref) + m.UnsetStubs() + m.VerifyAll() + + def test_create_volume_from_snapshot_without_backing(self): + """Test create_volume_from_snapshot without a backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + volume = FakeObject() + volume['name'] = 'volume_name' + snapshot = FakeObject() + snapshot['volume_name'] = 'volume_name' + snapshot['name'] = 'snap_name' + self._volumeops.get_backing(snapshot['volume_name']) + + m.ReplayAll() + self._driver.create_volume_from_snapshot(volume, snapshot) + m.UnsetStubs() + m.VerifyAll() + + def test_create_volume_from_snap_without_backing_snap(self): + """Test create_volume_from_snapshot without a backing snapshot.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + backing = FakeMor('VirtualMachine', 'my_vm') + m.StubOutWithMock(self._volumeops, 'get_backing') + volume = FakeObject() + volume['name'] = 'volume_name' + snapshot = FakeObject() + snapshot['volume_name'] = 'volume_name' + self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing) + m.StubOutWithMock(self._volumeops, 'get_snapshot') + snapshot['name'] = 'snapshot_name' + self._volumeops.get_snapshot(backing, snapshot['name']) + + m.ReplayAll() + self._driver.create_volume_from_snapshot(volume, snapshot) + m.UnsetStubs() + m.VerifyAll() + + def test_create_volume_from_snapshot(self): + """Test create_volume_from_snapshot.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + backing = FakeMor('VirtualMachine', 'my_vm') + m.StubOutWithMock(self._volumeops, 'get_backing') + volume = FakeObject() + snapshot = FakeObject() + snapshot['volume_name'] = 'volume_name' + self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing) + m.StubOutWithMock(self._volumeops, 'get_snapshot') + snapshot['name'] = 'snapshot_name' + snapshot_mor = FakeMor('VirtualMachineSnapshot', 'my_snap') + self._volumeops.get_snapshot(backing, + snapshot['name']).AndReturn(snapshot_mor) + m.StubOutWithMock(self._volumeops, 'get_vmdk_path') + src_vmdk_path = "[datastore] src_vm/src_vm-001.vmdk" + self._volumeops.get_vmdk_path(snapshot_mor).AndReturn(src_vmdk_path) + m.StubOutWithMock(self._driver, '_clone_backing_by_copying') + self._driver._clone_backing_by_copying(volume, src_vmdk_path) + + m.ReplayAll() + self._driver.create_volume_from_snapshot(volume, snapshot) + m.UnsetStubs() + m.VerifyAll() + + def test_get_entity_name(self): + """Test volumeops get_entity_name.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + entity = FakeMor('VirtualMachine', 'virt') + self._session.invoke_api(vim_util, 'get_object_property', + self._vim, entity, 'name') + + m.ReplayAll() + self._volumeops.get_entity_name(entity) + m.UnsetStubs() + m.VerifyAll() + + def test_get_vmdk_path(self): + """Test volumeops get_vmdk_path.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + backing = FakeMor('VirtualMachine', 'my_back') + vmdk_path = '[datastore 1] folders/myvols/volume-123.vmdk' + + class VirtualDisk: + pass + virtualDisk = VirtualDisk() + + class VirtualDiskFlatVer2BackingInfo: + pass + backingInfo = VirtualDiskFlatVer2BackingInfo() + backingInfo.fileName = vmdk_path + virtualDisk.backing = backingInfo + devices = [FakeObject(), virtualDisk, FakeObject()] + + moxed = self._session.invoke_api(vim_util, 'get_object_property', + self._vim, backing, + 'config.hardware.device') + moxed.AndReturn(devices) + + m.ReplayAll() + actual_vmdk_path = self._volumeops.get_vmdk_path(backing) + self.assertEqual(backingInfo.__class__.__name__, + 'VirtualDiskFlatVer2BackingInfo') + self.assertEqual(virtualDisk.__class__.__name__, 'VirtualDisk') + self.assertEqual(actual_vmdk_path, vmdk_path) + m.UnsetStubs() + m.VerifyAll() + + def test_copy_vmdk_file(self): + """Test copy_vmdk_file.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + dc_ref = FakeMor('Datacenter', 'dc1') + src_path = 'src_path' + dest_path = 'dest_path' + task = FakeMor('Task', 'my_task') + self._session.invoke_api(self._vim, 'CopyVirtualDisk_Task', + mox.IgnoreArg(), sourceName=src_path, + sourceDatacenter=dc_ref, destName=dest_path, + destDatacenter=dc_ref, + force=True).AndReturn(task) + m.StubOutWithMock(self._session, 'wait_for_task') + self._session.wait_for_task(task) + + m.ReplayAll() + self._volumeops.copy_vmdk_file(dc_ref, src_path, dest_path) + m.UnsetStubs() + m.VerifyAll() + + def test_delete_vmdk_file(self): + """Test delete_vmdk_file.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + dc_ref = FakeMor('Datacenter', 'dc1') + vmdk_path = 'vmdk_path' + task = FakeMor('Task', 'my_task') + self._session.invoke_api(self._vim, 'DeleteVirtualDisk_Task', + mox.IgnoreArg(), name=vmdk_path, + datacenter=dc_ref).AndReturn(task) + m.StubOutWithMock(self._session, 'wait_for_task') + self._session.wait_for_task(task) + + m.ReplayAll() + self._volumeops.delete_vmdk_file(vmdk_path, dc_ref) + m.UnsetStubs() + m.VerifyAll() + + def test_split_datastore_path(self): + """Test volumeops split_datastore_path.""" + test1 = '[datastore1] myfolder/mysubfolder/myvm.vmx' + (datastore, + folder, + file_name) = volumeops.split_datastore_path(test1) + self.assertEqual(datastore, 'datastore1') + self.assertEqual(folder, 'myfolder/mysubfolder/') + self.assertEqual(file_name, 'myvm.vmx') + test2 = '[datastore2 ] myfolder/myvm.vmdk' + (datastore, + folder, + file_name) = volumeops.split_datastore_path(test2) + self.assertEqual(datastore, 'datastore2') + self.assertEqual(folder, 'myfolder/') + self.assertEqual(file_name, 'myvm.vmdk') + test3 = 'myfolder/myvm.vmdk' + self.assertRaises(IndexError, volumeops.split_datastore_path, test3) + + def test_copy_image_to_volume_non_vmdk(self): + """Test copy_image_to_volume for a non-vmdk disk format.""" + m = self.mox + image_id = 'image-123456789' + image_meta = FakeObject() + image_meta['disk_format'] = 'novmdk' + image_service = m.CreateMock(glance.GlanceImageService) + image_service.show(mox.IgnoreArg(), image_id).AndReturn(image_meta) + + m.ReplayAll() + self.assertRaises(exception.ImageUnacceptable, + self._driver.copy_image_to_volume, + mox.IgnoreArg(), mox.IgnoreArg(), + image_service, image_id) + m.UnsetStubs() + m.VerifyAll() + + def test_copy_image_to_volume_vmdk(self): + """Test copy_image_to_volume with an acceptable vmdk disk format.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'session') + self._driver.session = self._session + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + + image_id = 'image-id' + image_meta = FakeObject() + image_meta['disk_format'] = 'vmdk' + image_meta['size'] = 1 * units.MiB + image_meta['properties'] = {'vmware_disktype': 'preallocated'} + image_service = m.CreateMock(glance.GlanceImageService) + image_service.show(mox.IgnoreArg(), image_id).AndReturn(image_meta) + volume = FakeObject() + vol_name = 'volume name' + volume['name'] = vol_name + backing = FakeMor('VirtualMachine', 'my_vm') + m.StubOutWithMock(self._driver, '_create_backing_in_inventory') + self._driver._create_backing_in_inventory(volume).AndReturn(backing) + datastore_name = 'datastore1' + flat_vmdk_path = 'myvolumes/myvm-flat.vmdk' + m.StubOutWithMock(self._driver, '_get_ds_name_flat_vmdk_path') + moxed = self._driver._get_ds_name_flat_vmdk_path(mox.IgnoreArg(), + vol_name) + moxed.AndReturn((datastore_name, flat_vmdk_path)) + host = FakeMor('Host', 'my_host') + m.StubOutWithMock(self._volumeops, 'get_host') + self._volumeops.get_host(backing).AndReturn(host) + datacenter = FakeMor('Datacenter', 'my_datacenter') + m.StubOutWithMock(self._volumeops, 'get_dc') + self._volumeops.get_dc(host).AndReturn(datacenter) + datacenter_name = 'my-datacenter' + m.StubOutWithMock(self._volumeops, 'get_entity_name') + self._volumeops.get_entity_name(datacenter).AndReturn(datacenter_name) + flat_path = '[%s] %s' % (datastore_name, flat_vmdk_path) + m.StubOutWithMock(self._volumeops, 'delete_file') + self._volumeops.delete_file(flat_path, datacenter) + client = FakeObject() + client.options = FakeObject() + client.options.transport = FakeObject() + cookies = FakeObject() + client.options.transport.cookiejar = cookies + m.StubOutWithMock(self._vim.__class__, 'client') + self._vim.client = client + m.StubOutWithMock(vmware_images, 'fetch_flat_image') + timeout = self._config.vmware_image_transfer_timeout_secs + vmware_images.fetch_flat_image(mox.IgnoreArg(), timeout, image_service, + image_id, image_size=image_meta['size'], + host=self.IP, + data_center_name=datacenter_name, + datastore_name=datastore_name, + cookies=cookies, + file_path=flat_vmdk_path) + + m.ReplayAll() + self._driver.copy_image_to_volume(mox.IgnoreArg(), volume, + image_service, image_id) + m.UnsetStubs() + m.VerifyAll() + + def test_copy_image_to_volume_stream_optimized(self): + """Test copy_image_to_volume. + + Test with an acceptable vmdk disk format and streamOptimized disk type. + """ + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'session') + self._driver.session = self._session + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + + image_id = 'image-id' + size = 5 * units.GiB + size_kb = float(size) / units.KiB + size_gb = float(size) / units.GiB + # image_service.show call + image_meta = FakeObject() + image_meta['disk_format'] = 'vmdk' + image_meta['size'] = size + image_meta['properties'] = {'vmware_disktype': 'streamOptimized'} + image_service = m.CreateMock(glance.GlanceImageService) + image_service.show(mox.IgnoreArg(), image_id).AndReturn(image_meta) + # _select_ds_for_volume call + (host, rp, folder, summary) = (FakeObject(), FakeObject(), + FakeObject(), FakeObject()) + summary.name = "datastore-1" + m.StubOutWithMock(self._driver, '_select_ds_for_volume') + self._driver._select_ds_for_volume(size_gb).AndReturn((host, rp, + folder, + summary)) + # _get_disk_type call + vol_name = 'volume name' + volume = FakeObject() + volume['name'] = vol_name + volume['size'] = size_gb + volume['volume_type_id'] = None # _get_disk_type will return 'thin' + disk_type = 'thin' + # _get_create_spec call + m.StubOutWithMock(self._volumeops, '_get_create_spec') + self._volumeops._get_create_spec(vol_name, 0, disk_type, + summary.name) + + # vim.client.factory.create call + class FakeFactory(object): + def create(self, name): + return mox.MockAnything() + + client = FakeObject() + client.factory = FakeFactory() + m.StubOutWithMock(self._vim.__class__, 'client') + self._vim.client = client + # fetch_stream_optimized_image call + timeout = self._config.vmware_image_transfer_timeout_secs + m.StubOutWithMock(vmware_images, 'fetch_stream_optimized_image') + vmware_images.fetch_stream_optimized_image(mox.IgnoreArg(), timeout, + image_service, image_id, + session=self._session, + host=self.IP, + resource_pool=rp, + vm_folder=folder, + vm_create_spec= + mox.IgnoreArg(), + image_size=size) + + m.ReplayAll() + self._driver.copy_image_to_volume(mox.IgnoreArg(), volume, + image_service, image_id) + m.UnsetStubs() + m.VerifyAll() + + def test_copy_volume_to_image_non_vmdk(self): + """Test copy_volume_to_image for a non-vmdk disk format.""" + m = self.mox + image_meta = FakeObject() + image_meta['disk_format'] = 'novmdk' + volume = FakeObject() + volume['name'] = 'vol-name' + volume['instance_uuid'] = None + volume['attached_host'] = None + + m.ReplayAll() + self.assertRaises(exception.ImageUnacceptable, + self._driver.copy_volume_to_image, + mox.IgnoreArg(), volume, + mox.IgnoreArg(), image_meta) + m.UnsetStubs() + m.VerifyAll() + + def test_copy_volume_to_image_when_attached(self): + """Test copy_volume_to_image when volume is attached.""" + m = self.mox + volume = FakeObject() + volume['instance_uuid'] = 'my_uuid' + + m.ReplayAll() + self.assertRaises(exception.InvalidVolume, + self._driver.copy_volume_to_image, + mox.IgnoreArg(), volume, + mox.IgnoreArg(), mox.IgnoreArg()) + m.UnsetStubs() + m.VerifyAll() + + def test_copy_volume_to_image_vmdk(self): + """Test copy_volume_to_image for a valid vmdk disk format.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'session') + self._driver.session = self._session + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + + image_id = 'image-id-1' + image_meta = FakeObject() + image_meta['disk_format'] = 'vmdk' + image_meta['id'] = image_id + image_meta['name'] = image_id + image_service = FakeObject() + vol_name = 'volume-123456789' + project_id = 'project-owner-id-123' + volume = FakeObject() + volume['name'] = vol_name + size_gb = 5 + size = size_gb * units.GiB + volume['size'] = size_gb + volume['project_id'] = project_id + volume['instance_uuid'] = None + volume['attached_host'] = None + # volumeops.get_backing + backing = FakeMor("VirtualMachine", "my_vm") + m.StubOutWithMock(self._volumeops, 'get_backing') + self._volumeops.get_backing(vol_name).AndReturn(backing) + # volumeops.get_vmdk_path + datastore_name = 'datastore1' + file_path = 'my_folder/my_nested_folder/my_vm.vmdk' + vmdk_file_path = '[%s] %s' % (datastore_name, file_path) + m.StubOutWithMock(self._volumeops, 'get_vmdk_path') + self._volumeops.get_vmdk_path(backing).AndReturn(vmdk_file_path) + # vmware_images.upload_image + timeout = self._config.vmware_image_transfer_timeout_secs + host_ip = self.IP + m.StubOutWithMock(vmware_images, 'upload_image') + vmware_images.upload_image(mox.IgnoreArg(), timeout, image_service, + image_id, project_id, session=self._session, + host=host_ip, vm=backing, + vmdk_file_path=vmdk_file_path, + vmdk_size=size, + image_name=image_id, + image_version=1) + + m.ReplayAll() + self._driver.copy_volume_to_image(mox.IgnoreArg(), volume, + image_service, image_meta) + m.UnsetStubs() + m.VerifyAll() + + def test_retrieve_properties_ex_fault_checker(self): + """Test retrieve_properties_ex_fault_checker is called.""" + m = self.mox + + class FakeVim(vim.Vim): + def __init__(self): + pass + + @property + def client(self): + + class FakeRetrv(object): + def RetrievePropertiesEx(self, collector): + pass + + def __getattr__(self, name): + if name == 'service': + return FakeRetrv() + + return FakeRetrv() + + def RetrieveServiceContent(self, type='ServiceInstance'): + return mox.MockAnything() + + _vim = FakeVim() + m.ReplayAll() + # retrieve_properties_ex_fault_checker throws authentication error + self.assertRaises(error_util.VimFaultException, + _vim.RetrievePropertiesEx, mox.IgnoreArg()) + m.UnsetStubs() + m.VerifyAll() + + +class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase): + """Test class for VMwareVcVmdkDriver.""" + + def setUp(self): + super(VMwareVcVmdkDriverTestCase, self).setUp() + self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config) + + def test_create_folder_not_present(self): + """Test create_folder when not present.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + parent_folder = FakeMor('Folder', 'my_par_fol') + child_entities = FakeManagedObjectReference() + self._session.invoke_api(vim_util, 'get_object_property', + self._vim, parent_folder, + 'childEntity').AndReturn(child_entities) + self._session.invoke_api(self._vim, 'CreateFolder', parent_folder, + name='child_folder_name') + + m.ReplayAll() + dc = self._volumeops.create_folder(parent_folder, 'child_folder_name') + m.UnsetStubs() + m.VerifyAll() + + def test_create_folder_already_present(self): + """Test create_folder when already present.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + parent_folder = FakeMor('Folder', 'my_par_fol') + child_folder = FakeMor('Folder', 'my_child_fol') + child_entities = FakeManagedObjectReference([child_folder]) + self._session.invoke_api(vim_util, 'get_object_property', + self._vim, parent_folder, + 'childEntity').AndReturn(child_entities) + self._session.invoke_api(vim_util, 'get_object_property', + self._vim, child_folder, + 'name').AndReturn('child_folder_name') + + m.ReplayAll() + fol = self._volumeops.create_folder(parent_folder, 'child_folder_name') + self.assertEqual(fol, child_folder) + m.UnsetStubs() + m.VerifyAll() + + def test_relocate_backing(self): + """Test relocate_backing.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._volumeops, '_get_relocate_spec') + datastore = FakeMor('Datastore', 'my_ds') + resource_pool = FakeMor('ResourcePool', 'my_rp') + host = FakeMor('HostSystem', 'my_host') + disk_move_type = 'moveAllDiskBackingsAndAllowSharing' + self._volumeops._get_relocate_spec(datastore, resource_pool, host, + disk_move_type) + m.StubOutWithMock(self._session, 'invoke_api') + backing = FakeMor('VirtualMachine', 'my_back') + task = FakeMor('Task', 'my_task') + self._session.invoke_api(self._vim, 'RelocateVM_Task', + backing, spec=mox.IgnoreArg()).AndReturn(task) + m.StubOutWithMock(self._session, 'wait_for_task') + self._session.wait_for_task(task) + + m.ReplayAll() + self._volumeops.relocate_backing(backing, datastore, + resource_pool, host) + m.UnsetStubs() + m.VerifyAll() + + def test_move_backing_to_folder(self): + """Test move_backing_to_folder.""" + m = self.mox + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + backing = FakeMor('VirtualMachine', 'my_back') + folder = FakeMor('Folder', 'my_fol') + task = FakeMor('Task', 'my_task') + self._session.invoke_api(self._vim, 'MoveIntoFolder_Task', + folder, list=[backing]).AndReturn(task) + m.StubOutWithMock(self._session, 'wait_for_task') + self._session.wait_for_task(task) + + m.ReplayAll() + self._volumeops.move_backing_to_folder(backing, folder) + m.UnsetStubs() + m.VerifyAll() + + def test_init_conn_with_instance_and_backing(self): + """Test initialize_connection with instance and backing.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + volume = FakeObject() + volume['name'] = 'volume_name' + volume['id'] = 'volume_id' + volume['size'] = 1 + connector = {'instance': 'my_instance'} + backing = FakeMor('VirtualMachine', 'my_back') + self._volumeops.get_backing(volume['name']).AndReturn(backing) + m.StubOutWithMock(self._volumeops, 'get_host') + host = FakeMor('HostSystem', 'my_host') + self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host) + datastore = FakeMor('Datastore', 'my_ds') + resource_pool = FakeMor('ResourcePool', 'my_rp') + m.StubOutWithMock(self._volumeops, 'get_dss_rp') + self._volumeops.get_dss_rp(host).AndReturn(([datastore], + resource_pool)) + m.StubOutWithMock(self._volumeops, 'get_datastore') + self._volumeops.get_datastore(backing).AndReturn(datastore) + + m.ReplayAll() + conn_info = self._driver.initialize_connection(volume, connector) + self.assertEqual(conn_info['driver_volume_type'], 'vmdk') + self.assertEqual(conn_info['data']['volume'], 'my_back') + self.assertEqual(conn_info['data']['volume_id'], 'volume_id') + m.UnsetStubs() + m.VerifyAll() + + def test_get_volume_group_folder(self): + """Test _get_volume_group_folder.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + datacenter = FakeMor('Datacenter', 'my_dc') + m.StubOutWithMock(self._volumeops, 'get_vmfolder') + self._volumeops.get_vmfolder(datacenter) + m.StubOutWithMock(self._volumeops, 'create_folder') + self._volumeops.create_folder(mox.IgnoreArg(), + self._config.vmware_volume_folder) + + m.ReplayAll() + self._driver._get_volume_group_folder(datacenter) + m.UnsetStubs() + m.VerifyAll() + + def test_init_conn_with_instance_and_backing_and_relocation(self): + """Test initialize_connection with backing being relocated.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + volume = FakeObject() + volume['name'] = 'volume_name' + volume['id'] = 'volume_id' + volume['size'] = 1 + connector = {'instance': 'my_instance'} + backing = FakeMor('VirtualMachine', 'my_back') + self._volumeops.get_backing(volume['name']).AndReturn(backing) + m.StubOutWithMock(self._volumeops, 'get_host') + host = FakeMor('HostSystem', 'my_host') + self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host) + datastore1 = FakeMor('Datastore', 'my_ds_1') + datastore2 = FakeMor('Datastore', 'my_ds_2') + resource_pool = FakeMor('ResourcePool', 'my_rp') + m.StubOutWithMock(self._volumeops, 'get_dss_rp') + self._volumeops.get_dss_rp(host).AndReturn(([datastore1], + resource_pool)) + m.StubOutWithMock(self._volumeops, 'get_datastore') + self._volumeops.get_datastore(backing).AndReturn(datastore2) + m.StubOutWithMock(self._driver, '_get_folder_ds_summary') + folder = FakeMor('Folder', 'my_fol') + summary = FakeDatastoreSummary(1, 1, datastore1) + size = 1 + self._driver._get_folder_ds_summary(size, resource_pool, + [datastore1]).AndReturn((folder, + summary)) + m.StubOutWithMock(self._volumeops, 'relocate_backing') + self._volumeops.relocate_backing(backing, datastore1, + resource_pool, host) + m.StubOutWithMock(self._volumeops, 'move_backing_to_folder') + self._volumeops.move_backing_to_folder(backing, folder) + + m.ReplayAll() + conn_info = self._driver.initialize_connection(volume, connector) + self.assertEqual(conn_info['driver_volume_type'], 'vmdk') + self.assertEqual(conn_info['data']['volume'], 'my_back') + self.assertEqual(conn_info['data']['volume_id'], 'volume_id') + m.UnsetStubs() + m.VerifyAll() + + def test_get_folder(self): + """Test _get_folder.""" + m = self.mox + m.StubOutWithMock(self._volumeops, '_get_parent') + self._volumeops._get_parent(mox.IgnoreArg(), 'Folder') + + m.ReplayAll() + self._volumeops._get_folder(mox.IgnoreArg()) + m.UnsetStubs() + m.VerifyAll() + + def test_volumeops_clone_backing(self): + """Test volumeops.clone_backing.""" + m = self.mox + m.StubOutWithMock(self._volumeops, '_get_parent') + backing = FakeMor('VirtualMachine', 'my_back') + folder = FakeMor('Folder', 'my_fol') + self._volumeops._get_folder(backing).AndReturn(folder) + m.StubOutWithMock(self._volumeops, '_get_clone_spec') + name = 'name' + snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap') + datastore = FakeMor('Datastore', 'my_ds') + self._volumeops._get_clone_spec(datastore, mox.IgnoreArg(), snapshot) + m.StubOutWithMock(api.VMwareAPISession, 'vim') + self._session.vim = self._vim + m.StubOutWithMock(self._session, 'invoke_api') + task = FakeMor('Task', 'my_task') + self._session.invoke_api(self._vim, 'CloneVM_Task', backing, + folder=folder, name=name, + spec=mox.IgnoreArg()).AndReturn(task) + m.StubOutWithMock(self._session, 'wait_for_task') + clone = FakeMor('VirtualMachine', 'my_clone') + task_info = FakeTaskInfo('success', clone) + self._session.wait_for_task(task).AndReturn(task_info) + + m.ReplayAll() + ret = self._volumeops.clone_backing(name, backing, snapshot, + mox.IgnoreArg(), datastore) + self.assertEqual(ret, clone) + m.UnsetStubs() + m.VerifyAll() + + def test_clone_backing_linked(self): + """Test _clone_backing with clone type - linked.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'clone_backing') + volume = FakeObject() + volume['name'] = 'volume_name' + self._volumeops.clone_backing(volume['name'], mox.IgnoreArg(), + mox.IgnoreArg(), + volumeops.LINKED_CLONE_TYPE, + mox.IgnoreArg()) + + m.ReplayAll() + self._driver._clone_backing(volume, mox.IgnoreArg(), mox.IgnoreArg(), + volumeops.LINKED_CLONE_TYPE) + m.UnsetStubs() + m.VerifyAll() + + def test_clone_backing_full(self): + """Test _clone_backing with clone type - full.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_host') + backing = FakeMor('VirtualMachine', 'my_vm') + host = FakeMor('HostSystem', 'my_host') + self._volumeops.get_host(backing).AndReturn(host) + m.StubOutWithMock(self._volumeops, 'get_dss_rp') + datastore = FakeMor('Datastore', 'my_ds') + datastores = [datastore] + resource_pool = FakeMor('ResourcePool', 'my_rp') + self._volumeops.get_dss_rp(host).AndReturn((datastores, + resource_pool)) + m.StubOutWithMock(self._driver, '_select_datastore_summary') + volume = FakeObject() + volume['name'] = 'volume_name' + volume['size'] = 1 + summary = FakeDatastoreSummary(1, 1, datastore=datastore) + self._driver._select_datastore_summary(volume['size'] * units.GiB, + datastores).AndReturn(summary) + m.StubOutWithMock(self._volumeops, 'clone_backing') + self._volumeops.clone_backing(volume['name'], backing, + mox.IgnoreArg(), + volumeops.FULL_CLONE_TYPE, + datastore) + + m.ReplayAll() + self._driver._clone_backing(volume, backing, mox.IgnoreArg(), + volumeops.FULL_CLONE_TYPE) + m.UnsetStubs() + m.VerifyAll() + + def test_create_volume_from_snapshot(self): + """Test create_volume_from_snapshot.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + snapshot = FakeObject() + snapshot['volume_name'] = 'volume_name' + snapshot['name'] = 'snapshot_name' + backing = FakeMor('VirtualMachine', 'my_back') + self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing) + m.StubOutWithMock(self._volumeops, 'get_snapshot') + snap_mor = FakeMor('VirtualMachineSnapshot', 'my_snap') + self._volumeops.get_snapshot(backing, + snapshot['name']).AndReturn(snap_mor) + volume = FakeObject() + volume['volume_type_id'] = None + m.StubOutWithMock(self._driver, '_clone_backing') + self._driver._clone_backing(volume, backing, snap_mor, mox.IgnoreArg()) + + m.ReplayAll() + self._driver.create_volume_from_snapshot(volume, snapshot) + m.UnsetStubs() + m.VerifyAll() + + def test_create_cloned_volume_with_backing(self): + """Test create_cloned_volume with clone type - full.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + backing = FakeMor('VirtualMachine', 'my_back') + src_vref = FakeObject() + src_vref['name'] = 'src_vol_name' + src_vref['status'] = 'available' + self._volumeops.get_backing(src_vref['name']).AndReturn(backing) + volume = FakeObject() + volume['volume_type_id'] = None + m.StubOutWithMock(self._driver, '_clone_backing') + self._driver._clone_backing(volume, backing, mox.IgnoreArg(), + volumeops.FULL_CLONE_TYPE) + + m.ReplayAll() + self._driver.create_cloned_volume(volume, src_vref) + m.UnsetStubs() + + def test_create_linked_cloned_volume_with_backing(self): + """Test create_cloned_volume with clone type - linked.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + backing = FakeMor('VirtualMachine', 'my_back') + src_vref = FakeObject() + src_vref['name'] = 'src_vol_name' + src_vref['status'] = 'available' + self._volumeops.get_backing(src_vref['name']).AndReturn(backing) + volume = FakeObject() + volume['id'] = 'volume_id' + m.StubOutWithMock(vmdk.VMwareVcVmdkDriver, '_get_clone_type') + moxed = vmdk.VMwareVcVmdkDriver._get_clone_type(volume) + moxed.AndReturn(volumeops.LINKED_CLONE_TYPE) + m.StubOutWithMock(self._volumeops, 'create_snapshot') + name = 'snapshot-%s' % volume['id'] + snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap') + self._volumeops.create_snapshot(backing, name, + None).AndReturn(snapshot) + m.StubOutWithMock(self._driver, '_clone_backing') + self._driver._clone_backing(volume, backing, snapshot, + volumeops.LINKED_CLONE_TYPE) + + m.ReplayAll() + self._driver.create_cloned_volume(volume, src_vref) + m.UnsetStubs() + + def test_create_linked_cloned_volume_when_attached(self): + """Test create_cloned_volume linked clone when volume is attached.""" + m = self.mox + m.StubOutWithMock(self._driver.__class__, 'volumeops') + self._driver.volumeops = self._volumeops + m.StubOutWithMock(self._volumeops, 'get_backing') + backing = FakeMor('VirtualMachine', 'my_back') + src_vref = FakeObject() + src_vref['name'] = 'src_vol_name' + src_vref['status'] = 'in-use' + volume = FakeObject() + self._volumeops.get_backing(src_vref['name']).AndReturn(backing) + m.StubOutWithMock(vmdk.VMwareVcVmdkDriver, '_get_clone_type') + moxed = vmdk.VMwareVcVmdkDriver._get_clone_type(volume) + moxed.AndReturn(volumeops.LINKED_CLONE_TYPE) + + m.ReplayAll() + self.assertRaises(exception.InvalidVolume, + self._driver.create_cloned_volume, volume, src_vref) + m.UnsetStubs() + m.VerifyAll() diff --git a/cinder/tests/test_volume.py b/cinder/tests/test_volume.py index baac8023b4..6dcfe0e307 100644 --- a/cinder/tests/test_volume.py +++ b/cinder/tests/test_volume.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -20,18 +18,28 @@ """ -import os +import contextlib import datetime - -import mox +import os import shutil +import socket import tempfile +import eventlet +import mock +import mox +from oslo.config import cfg +from taskflow.engines.action_engine import engine + +from cinder.backup import driver as backup_driver +from cinder.brick.iscsi import iscsi +from cinder.brick.local_dev import lvm as brick_lvm from cinder import context -from cinder import exception from cinder import db -from cinder import flags -from cinder.tests.image import fake as fake_image +from cinder import exception +from cinder.image import image_utils +from cinder import keymgr +from cinder.openstack.common import fileutils from cinder.openstack.common import importutils from cinder.openstack.common.notifier import api as notifier_api from cinder.openstack.common.notifier import test_notifier @@ -39,81 +47,237 @@ import cinder.policy from cinder import quota from cinder import test -from cinder.volume import iscsi +from cinder.tests.brick.fake_lvm import FakeBrickLVM +from cinder.tests import conf_fixture +from cinder.tests.image import fake as fake_image +from cinder.tests.keymgr import fake as fake_keymgr +from cinder.tests import utils as tests_utils +from cinder import units +from cinder import utils +import cinder.volume +from cinder.volume import configuration as conf +from cinder.volume import driver +from cinder.volume.drivers import lvm +from cinder.volume import rpcapi as volume_rpcapi +from cinder.volume import utils as volutils +from cinder.volume import volume_types + QUOTAS = quota.QUOTAS -FLAGS = flags.FLAGS +CONF = cfg.CONF -class VolumeTestCase(test.TestCase): - """Test Case for volumes.""" +ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor' +fake_opt = [ + cfg.StrOpt('fake_opt', default='fake', help='fake opts') +] + +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa' + + +class FakeImageService: + def __init__(self, db_driver=None, image_service=None): + pass + + def show(self, context, image_id): + return {'size': 2 * units.GiB, + 'disk_format': 'raw', + 'container_format': 'bare'} + + +class BaseVolumeTestCase(test.TestCase): + """Test Case for volumes.""" def setUp(self): - super(VolumeTestCase, self).setUp() + super(BaseVolumeTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() - self.flags(connection_type='fake', - volumes_dir=vol_tmpdir, + self.flags(volumes_dir=vol_tmpdir, notification_driver=[test_notifier.__name__]) - self.volume = importutils.import_object(FLAGS.volume_manager) + self.volume = importutils.import_object(CONF.volume_manager) self.context = context.get_admin_context() + self.context.user_id = 'fake' + self.context.project_id = 'fake' + self.volume_params = { + 'status': 'creating', + 'host': CONF.host, + 'size': 1} self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target) + self.stubs.Set(brick_lvm.LVM, + 'get_all_volume_groups', + self.fake_get_all_volume_groups) fake_image.stub_out_image_service(self.stubs) test_notifier.NOTIFICATIONS = [] + self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True) + self.stubs.Set(os.path, 'exists', lambda x: True) + self.volume.driver.set_initialized() + self.volume.stats = {'allocated_capacity_gb': 0} + # keep ordered record of what we execute + self.called = [] def tearDown(self): try: - shutil.rmtree(FLAGS.volumes_dir) + shutil.rmtree(CONF.volumes_dir) except OSError: pass notifier_api._reset_drivers() - super(VolumeTestCase, self).tearDown() + super(BaseVolumeTestCase, self).tearDown() def fake_get_target(obj, iqn): return 1 - @staticmethod - def _create_volume(size=0, snapshot_id=None, image_id=None, - metadata=None): - """Create a volume object.""" - vol = {} - vol['size'] = size - vol['snapshot_id'] = snapshot_id - vol['image_id'] = image_id - vol['user_id'] = 'fake' - vol['project_id'] = 'fake' - vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" - vol['attach_status'] = "detached" - if metadata is not None: - vol['metadata'] = metadata - return db.volume_create(context.get_admin_context(), vol) + def fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True): + return [{'name': 'cinder-volumes', + 'size': '5.00', + 'available': '2.50', + 'lv_count': '2', + 'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}] + + +class VolumeTestCase(BaseVolumeTestCase): + + def setUp(self): + super(VolumeTestCase, self).setUp() + self.stubs.Set(volutils, 'clear_volume', + lambda a, b, volume_clear=mox.IgnoreArg(), + volume_clear_size=mox.IgnoreArg(), + lvm_type=mox.IgnoreArg(): None) + + def test_init_host_clears_downloads(self): + """Test that init_host will unwedge a volume stuck in downloading.""" + volume = tests_utils.create_volume(self.context, status='downloading', + size=0, host=CONF.host) + volume_id = volume['id'] + self.volume.init_host() + volume = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(volume['status'], "error") + self.volume.delete_volume(self.context, volume_id) + + @mock.patch.object(QUOTAS, 'reserve') + @mock.patch.object(QUOTAS, 'commit') + @mock.patch.object(QUOTAS, 'rollback') + def test_create_driver_not_initialized(self, reserve, commit, rollback): + self.volume.driver._initialized = False + + def fake_reserve(context, expire=None, project_id=None, **deltas): + return ["RESERVATION"] + + def fake_commit_and_rollback(context, reservations, project_id=None): + pass + + reserve.return_value = fake_reserve + commit.return_value = fake_commit_and_rollback + rollback.return_value = fake_commit_and_rollback + + volume = tests_utils.create_volume( + self.context, + availability_zone=CONF.storage_availability_zone, + **self.volume_params) + + volume_id = volume['id'] + self.assertIsNone(volume['encryption_key_id']) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.assertRaises(exception.DriverNotInitialized, + self.volume.create_volume, + self.context, volume_id) + + # NOTE(flaper87): The volume status should be error_deleting. + volume = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(volume.status, "error") + db.volume_destroy(context.get_admin_context(), volume_id) + + @mock.patch.object(QUOTAS, 'reserve') + @mock.patch.object(QUOTAS, 'commit') + @mock.patch.object(QUOTAS, 'rollback') + def test_delete_driver_not_initialized(self, reserve, commit, rollback): + # NOTE(flaper87): Set initialized to False + self.volume.driver._initialized = False + + def fake_reserve(context, expire=None, project_id=None, **deltas): + return ["RESERVATION"] + + def fake_commit_and_rollback(context, reservations, project_id=None): + pass + + reserve.return_value = fake_reserve + commit.return_value = fake_commit_and_rollback + rollback.return_value = fake_commit_and_rollback + + volume = tests_utils.create_volume( + self.context, + availability_zone=CONF.storage_availability_zone, + **self.volume_params) + + volume_id = volume['id'] + self.assertIsNone(volume['encryption_key_id']) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) + self.assertRaises(exception.DriverNotInitialized, + self.volume.delete_volume, + self.context, volume_id) + + # NOTE(flaper87): The volume status should be error. + volume = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(volume.status, "error_deleting") + db.volume_destroy(context.get_admin_context(), volume_id) def test_create_delete_volume(self): """Test volume can be created and deleted.""" # Need to stub out reserve, commit, and rollback - def fake_reserve(context, expire=None, **deltas): + def fake_reserve(context, expire=None, project_id=None, **deltas): return ["RESERVATION"] - def fake_commit(context, reservations): + def fake_commit(context, reservations, project_id=None): pass - def fake_rollback(context, reservations): + def fake_rollback(context, reservations, project_id=None): pass self.stubs.Set(QUOTAS, "reserve", fake_reserve) self.stubs.Set(QUOTAS, "commit", fake_commit) self.stubs.Set(QUOTAS, "rollback", fake_rollback) - volume = self._create_volume() + volume = tests_utils.create_volume( + self.context, + availability_zone=CONF.storage_availability_zone, + **self.volume_params) volume_id = volume['id'] - self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) + self.assertIsNone(volume['encryption_key_id']) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) self.volume.create_volume(self.context, volume_id) - self.assertEquals(len(test_notifier.NOTIFICATIONS), 2) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 2) + msg = test_notifier.NOTIFICATIONS[0] + self.assertEqual(msg['event_type'], 'volume.create.start') + expected = { + 'status': 'creating', + 'display_name': 'test_volume', + 'availability_zone': 'nova', + 'tenant_id': 'fake', + 'created_at': 'DONTCARE', + 'volume_id': volume_id, + 'volume_type': None, + 'snapshot_id': None, + 'user_id': 'fake', + 'launched_at': 'DONTCARE', + 'size': 1, + } + self.assertDictMatch(msg['payload'], expected) + msg = test_notifier.NOTIFICATIONS[1] + self.assertEqual(msg['event_type'], 'volume.create.end') + expected['status'] = 'available' + self.assertDictMatch(msg['payload'], expected) self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), volume_id).id) self.volume.delete_volume(self.context, volume_id) - self.assertEquals(len(test_notifier.NOTIFICATIONS), 4) + vol = db.volume_get(context.get_admin_context(read_deleted='yes'), + volume_id) + self.assertEqual(vol['status'], 'deleted') + self.assertEqual(len(test_notifier.NOTIFICATIONS), 4) + msg = test_notifier.NOTIFICATIONS[2] + self.assertEqual(msg['event_type'], 'volume.delete.start') + self.assertDictMatch(msg['payload'], expected) + msg = test_notifier.NOTIFICATIONS[3] + self.assertEqual(msg['event_type'], 'volume.delete.end') + self.assertDictMatch(msg['payload'], expected) self.assertRaises(exception.NotFound, db.volume_get, self.context, @@ -122,7 +286,8 @@ def fake_rollback(context, reservations): def test_create_delete_volume_with_metadata(self): """Test volume can be created with metadata and deleted.""" test_meta = {'fake_key': 'fake_value'} - volume = self._create_volume(0, None, metadata=test_meta) + volume = tests_utils.create_volume(self.context, metadata=test_meta, + **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) result_meta = { @@ -135,15 +300,180 @@ def test_create_delete_volume_with_metadata(self): self.context, volume_id) + def test_create_volume_with_invalid_metadata(self): + """Test volume create with too much metadata fails.""" + volume_api = cinder.volume.api.API() + test_meta = {'fake_key': 'fake_value' * 256} + self.assertRaises(exception.InvalidVolumeMetadataSize, + volume_api.create, + self.context, + 1, + 'name', + 'description', + None, + None, + None, + test_meta) + + def test_create_volume_uses_default_availability_zone(self): + """Test setting availability_zone correctly during volume create.""" + volume_api = cinder.volume.api.API() + + def fake_list_availability_zones(): + return ({'name': 'az1', 'available': True}, + {'name': 'az2', 'available': True}, + {'name': 'default-az', 'available': True}) + + self.stubs.Set(volume_api, + 'list_availability_zones', + fake_list_availability_zones) + + # Test backwards compatibility, default_availability_zone not set + CONF.set_override('storage_availability_zone', 'az2') + volume = volume_api.create(self.context, + 1, + 'name', + 'description') + self.assertEqual(volume['availability_zone'], 'az2') + + CONF.set_override('default_availability_zone', 'default-az') + volume = volume_api.create(self.context, + 1, + 'name', + 'description') + self.assertEqual(volume['availability_zone'], 'default-az') + + def test_create_volume_with_volume_type(self): + """Test volume creation with default volume type.""" + def fake_reserve(context, expire=None, project_id=None, **deltas): + return ["RESERVATION"] + + def fake_commit(context, reservations, project_id=None): + pass + + def fake_rollback(context, reservations, project_id=None): + pass + + self.stubs.Set(QUOTAS, "reserve", fake_reserve) + self.stubs.Set(QUOTAS, "commit", fake_commit) + self.stubs.Set(QUOTAS, "rollback", fake_rollback) + + volume_api = cinder.volume.api.API() + + # Create volume with default volume type while default + # volume type doesn't exist, volume_type_id should be NULL + volume = volume_api.create(self.context, + 1, + 'name', + 'description') + self.assertIsNone(volume['volume_type_id']) + self.assertIsNone(volume['encryption_key_id']) + + # Create default volume type + vol_type = conf_fixture.def_vol_type + db.volume_type_create(context.get_admin_context(), + {'name': vol_type, 'extra_specs': {}}) + + db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), + vol_type) + + # Create volume with default volume type + volume = volume_api.create(self.context, + 1, + 'name', + 'description') + self.assertEqual(volume['volume_type_id'], db_vol_type.get('id')) + self.assertIsNone(volume['encryption_key_id']) + + # Create volume with specific volume type + vol_type = 'test' + db.volume_type_create(context.get_admin_context(), + {'name': vol_type, 'extra_specs': {}}) + db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), + vol_type) + + volume = volume_api.create(self.context, + 1, + 'name', + 'description', + volume_type=db_vol_type) + self.assertEqual(volume['volume_type_id'], db_vol_type.get('id')) + + def test_create_volume_with_encrypted_volume_type(self): + self.stubs.Set(keymgr, "API", fake_keymgr.fake_api) + + ctxt = context.get_admin_context() + + db.volume_type_create(ctxt, + {'id': '61298380-0c12-11e3-bfd6-4b48424183be', + 'name': 'LUKS'}) + db.volume_type_encryption_update_or_create( + ctxt, + '61298380-0c12-11e3-bfd6-4b48424183be', + {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) + + volume_api = cinder.volume.api.API() + + db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS') + + volume = volume_api.create(self.context, + 1, + 'name', + 'description', + volume_type=db_vol_type) + self.assertEqual(volume['volume_type_id'], db_vol_type.get('id')) + self.assertIsNotNone(volume['encryption_key_id']) + + def test_create_delete_volume_with_encrypted_volume_type(self): + self.stubs.Set(keymgr, "API", fake_keymgr.fake_api) + + ctxt = context.get_admin_context() + + db.volume_type_create(ctxt, + {'id': '61298380-0c12-11e3-bfd6-4b48424183be', + 'name': 'LUKS'}) + db.volume_type_encryption_update_or_create( + ctxt, + '61298380-0c12-11e3-bfd6-4b48424183be', + {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) + + volume_api = cinder.volume.api.API() + + db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS') + + volume = volume_api.create(self.context, + 1, + 'name', + 'description', + volume_type=db_vol_type) + + self.assertIsNotNone(volume.get('encryption_key_id', None)) + self.assertEqual(volume['volume_type_id'], db_vol_type.get('id')) + self.assertIsNotNone(volume['encryption_key_id']) + + volume['host'] = 'fake_host' + volume['status'] = 'available' + volume_api.delete(self.context, volume) + + volume = db.volume_get(self.context, volume['id']) + self.assertEqual('deleting', volume['status']) + + db.volume_destroy(self.context, volume['id']) + self.assertRaises(exception.NotFound, + db.volume_get, + self.context, + volume['id']) + def test_delete_busy_volume(self): """Test volume survives deletion if driver reports it as busy.""" - volume = self._create_volume() + volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) self.mox.StubOutWithMock(self.volume.driver, 'delete_volume') - self.volume.driver.delete_volume(mox.IgnoreArg()) \ - .AndRaise(exception.VolumeIsBusy) + self.volume.driver.delete_volume( + mox.IgnoreArg()).AndRaise(exception.VolumeIsBusy( + volume_name='fake')) self.mox.ReplayAll() res = self.volume.delete_volume(self.context, volume_id) self.assertEqual(True, res) @@ -154,58 +484,543 @@ def test_delete_busy_volume(self): self.mox.UnsetStubs() self.volume.delete_volume(self.context, volume_id) + def test_delete_volume_in_error_extending(self): + """Test volume can be deleted in error_extending stats.""" + # create a volume + volume = tests_utils.create_volume(self.context, **self.volume_params) + self.volume.create_volume(self.context, volume['id']) + + # delete 'error_extending' volume + db.volume_update(self.context, volume['id'], + {'status': 'error_extending'}) + self.volume.delete_volume(self.context, volume['id']) + self.assertRaises(exception.NotFound, db.volume_get, + self.context, volume['id']) + def test_create_volume_from_snapshot(self): """Test volume can be created from a snapshot.""" - volume_src = self._create_volume() + volume_src = tests_utils.create_volume(self.context, + **self.volume_params) self.volume.create_volume(self.context, volume_src['id']) snapshot_id = self._create_snapshot(volume_src['id'])['id'] self.volume.create_snapshot(self.context, volume_src['id'], snapshot_id) - volume_dst = self._create_volume(0, snapshot_id) + volume_dst = tests_utils.create_volume(self.context, + snapshot_id=snapshot_id, + **self.volume_params) self.volume.create_volume(self.context, volume_dst['id'], snapshot_id) self.assertEqual(volume_dst['id'], db.volume_get( context.get_admin_context(), volume_dst['id']).id) - self.assertEqual(snapshot_id, db.volume_get( - context.get_admin_context(), - volume_dst['id']).snapshot_id) + self.assertEqual(snapshot_id, + db.volume_get(context.get_admin_context(), + volume_dst['id']).snapshot_id) self.volume.delete_volume(self.context, volume_dst['id']) self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume_src['id']) - def test_too_big_volume(self): - """Ensure failure if a too large of a volume is requested.""" - # FIXME(vish): validation needs to move into the data layer in - # volume_create - return True - try: - volume = self._create_volume(1001) - self.volume.create_volume(self.context, volume) - self.fail("Should have thrown TypeError") - except TypeError: - pass + def test_create_snapshot_driver_not_initialized(self): + volume_src = tests_utils.create_volume(self.context, + **self.volume_params) + self.volume.create_volume(self.context, volume_src['id']) + snapshot_id = self._create_snapshot(volume_src['id'])['id'] + + # NOTE(flaper87): Set initialized to False + self.volume.driver._initialized = False + + self.assertRaises(exception.DriverNotInitialized, + self.volume.create_snapshot, + self.context, volume_src['id'], + snapshot_id) + + # NOTE(flaper87): The volume status should be error. + snapshot = db.snapshot_get(context.get_admin_context(), snapshot_id) + self.assertEqual(snapshot.status, "error") + + # NOTE(flaper87): Set initialized to True, + # lets cleanup the mess + self.volume.driver._initialized = True + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_src['id']) + + def _mock_synchronized(self, name, *s_args, **s_kwargs): + def inner_sync1(f): + def inner_sync2(*args, **kwargs): + self.called.append('lock-%s' % (name)) + ret = f(*args, **kwargs) + self.called.append('unlock-%s' % (name)) + return ret + return inner_sync2 + return inner_sync1 + + def test_create_volume_from_snapshot_check_locks(self): + # mock the synchroniser so we can record events + self.stubs.Set(utils, 'synchronized', self._mock_synchronized) + + self.stubs.Set(self.volume.driver, 'create_volume_from_snapshot', + lambda *args, **kwargs: None) + + orig_flow = engine.ActionEngine.run + + def mock_flow_run(*args, **kwargs): + # ensure the lock has been taken + self.assertEqual(len(self.called), 1) + # now proceed with the flow. + ret = orig_flow(*args, **kwargs) + return ret + + # create source volume + src_vol = tests_utils.create_volume(self.context, **self.volume_params) + src_vol_id = src_vol['id'] + + # no lock + self.volume.create_volume(self.context, src_vol_id) + + snap_id = self._create_snapshot(src_vol_id)['id'] + # no lock + self.volume.create_snapshot(self.context, src_vol_id, snap_id) + + dst_vol = tests_utils.create_volume(self.context, + snapshot_id=snap_id, + **self.volume_params) + dst_vol_id = dst_vol['id'] + admin_ctxt = context.get_admin_context() + + # mock the flow runner so we can do some checks + self.stubs.Set(engine.ActionEngine, 'run', mock_flow_run) + + # locked + self.volume.create_volume(self.context, volume_id=dst_vol_id, + snapshot_id=snap_id) + self.assertEqual(len(self.called), 2) + self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id) + self.assertEqual(snap_id, + db.volume_get(admin_ctxt, dst_vol_id).snapshot_id) + + # locked + self.volume.delete_volume(self.context, dst_vol_id) + self.assertEqual(len(self.called), 4) + + # locked + self.volume.delete_snapshot(self.context, snap_id) + self.assertEqual(len(self.called), 6) + + # locked + self.volume.delete_volume(self.context, src_vol_id) + self.assertEqual(len(self.called), 8) + + self.assertEqual(self.called, + ['lock-%s' % ('%s-delete_snapshot' % (snap_id)), + 'unlock-%s' % ('%s-delete_snapshot' % (snap_id)), + 'lock-%s' % ('%s-delete_volume' % (dst_vol_id)), + 'unlock-%s' % ('%s-delete_volume' % (dst_vol_id)), + 'lock-%s' % ('%s-delete_snapshot' % (snap_id)), + 'unlock-%s' % ('%s-delete_snapshot' % (snap_id)), + 'lock-%s' % ('%s-delete_volume' % (src_vol_id)), + 'unlock-%s' % ('%s-delete_volume' % (src_vol_id))]) + + def test_create_volume_from_volume_check_locks(self): + # mock the synchroniser so we can record events + self.stubs.Set(utils, 'synchronized', self._mock_synchronized) + + orig_flow = engine.ActionEngine.run + + def mock_flow_run(*args, **kwargs): + # ensure the lock has been taken + self.assertEqual(len(self.called), 1) + # now proceed with the flow. + ret = orig_flow(*args, **kwargs) + return ret + + # create source volume + src_vol = tests_utils.create_volume(self.context, **self.volume_params) + src_vol_id = src_vol['id'] + + # no lock + self.volume.create_volume(self.context, src_vol_id) + + dst_vol = tests_utils.create_volume(self.context, + source_volid=src_vol_id, + **self.volume_params) + dst_vol_id = dst_vol['id'] + admin_ctxt = context.get_admin_context() + + # mock the flow runner so we can do some checks + self.stubs.Set(engine.ActionEngine, 'run', mock_flow_run) + + # locked + self.volume.create_volume(self.context, volume_id=dst_vol_id, + source_volid=src_vol_id) + self.assertEqual(len(self.called), 2) + self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id) + self.assertEqual(src_vol_id, + db.volume_get(admin_ctxt, dst_vol_id).source_volid) + + # locked + self.volume.delete_volume(self.context, dst_vol_id) + self.assertEqual(len(self.called), 4) + + # locked + self.volume.delete_volume(self.context, src_vol_id) + self.assertEqual(len(self.called), 6) + + self.assertEqual(self.called, + ['lock-%s' % ('%s-delete_volume' % (src_vol_id)), + 'unlock-%s' % ('%s-delete_volume' % (src_vol_id)), + 'lock-%s' % ('%s-delete_volume' % (dst_vol_id)), + 'unlock-%s' % ('%s-delete_volume' % (dst_vol_id)), + 'lock-%s' % ('%s-delete_volume' % (src_vol_id)), + 'unlock-%s' % ('%s-delete_volume' % (src_vol_id))]) + + def test_create_volume_from_volume_delete_lock_taken(self): + # create source volume + src_vol = tests_utils.create_volume(self.context, **self.volume_params) + src_vol_id = src_vol['id'] + + # no lock + self.volume.create_volume(self.context, src_vol_id) + + dst_vol = tests_utils.create_volume(self.context, + source_volid=src_vol_id, + **self.volume_params) + dst_vol_id = dst_vol['id'] + admin_ctxt = context.get_admin_context() + + orig_elevated = self.context.elevated + + ctxt_deepcopy = self.context.deepcopy() + gthreads = [] + + def mock_elevated(*args, **kwargs): + # unset mock so it is only called once + self.stubs.Set(self.context, 'elevated', orig_elevated) + + # we expect this to block and then fail + t = eventlet.spawn(self.volume.create_volume, + ctxt_deepcopy, + volume_id=dst_vol_id, source_volid=src_vol_id) + gthreads.append(t) + + return orig_elevated(*args, **kwargs) + + # mock something from early on in the delete operation and within the + # lock so that when we do the create we expect it to block. + self.stubs.Set(self.context, 'elevated', mock_elevated) + + # locked + self.volume.delete_volume(self.context, src_vol_id) + + # we expect the volume create to fail with the following err since the + # source volume was deleted while the create was locked. Note that the + # volume is still in the db since it was created by the test prior to + # calling manager.create_volume. + self.assertRaises(exception.VolumeNotFound, gthreads[0].wait) + + def test_create_volume_from_snapshot_delete_lock_taken(self): + # create source volume + src_vol = tests_utils.create_volume(self.context, **self.volume_params) + src_vol_id = src_vol['id'] + + # no lock + self.volume.create_volume(self.context, src_vol_id) + + # create snapshot + snap_id = self._create_snapshot(src_vol_id)['id'] + # no lock + self.volume.create_snapshot(self.context, src_vol_id, snap_id) + + # create vol from snapshot... + dst_vol = tests_utils.create_volume(self.context, + source_volid=src_vol_id, + **self.volume_params) + dst_vol_id = dst_vol['id'] + admin_ctxt = context.get_admin_context() + + orig_elevated = self.context.elevated + + ctxt_deepcopy = self.context.deepcopy() + gthreads = [] + + def mock_elevated(*args, **kwargs): + # unset mock so it is only called once + self.stubs.Set(self.context, 'elevated', orig_elevated) + + # We expect this to block and then fail + t = eventlet.spawn(self.volume.create_volume, ctxt_deepcopy, + volume_id=dst_vol_id, snapshot_id=snap_id) + gthreads.append(t) + + return orig_elevated(*args, **kwargs) + + # mock something from early on in the delete operation and within the + # lock so that when we do the create we expect it to block. + self.stubs.Set(self.context, 'elevated', mock_elevated) + + # locked + self.volume.delete_snapshot(self.context, snap_id) + + # we expect the volume create to fail with the following err since the + # snapshot was deleted while the create was locked. Note that the + # volume is still in the db since it was created by the test prior to + # calling manager.create_volume. + self.assertRaises(exception.SnapshotNotFound, gthreads[0].wait) + + # locked + self.volume.delete_volume(self.context, src_vol_id) + # make sure it is gone + self.assertRaises(exception.VolumeNotFound, db.volume_get, + self.context, src_vol_id) + + def test_create_volume_from_snapshot_with_encryption(self): + """Test volume can be created from a snapshot of + an encrypted volume. + """ + self.stubs.Set(keymgr, 'API', fake_keymgr.fake_api) + + ctxt = context.get_admin_context() + + db.volume_type_create(ctxt, + {'id': '61298380-0c12-11e3-bfd6-4b48424183be', + 'name': 'LUKS'}) + db.volume_type_encryption_update_or_create( + ctxt, + '61298380-0c12-11e3-bfd6-4b48424183be', + {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) - def test_run_attach_detach_volume(self): + volume_api = cinder.volume.api.API() + + db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), + 'LUKS') + volume_src = volume_api.create(self.context, + 1, + 'name', + 'description', + volume_type=db_vol_type) + snapshot_ref = volume_api.create_snapshot_force(self.context, + volume_src, + 'name', + 'description') + snapshot_ref['status'] = 'available' # status must be available + volume_dst = volume_api.create(self.context, + 1, + 'name', + 'description', + snapshot=snapshot_ref) + self.assertEqual(volume_dst['id'], + db.volume_get( + context.get_admin_context(), + volume_dst['id']).id) + self.assertEqual(snapshot_ref['id'], + db.volume_get(context.get_admin_context(), + volume_dst['id']).snapshot_id) + + # ensure encryption keys match + self.assertIsNotNone(volume_src['encryption_key_id']) + self.assertIsNotNone(volume_dst['encryption_key_id']) + + key_manager = volume_api.key_manager # must use *same* key manager + volume_src_key = key_manager.get_key(self.context, + volume_src['encryption_key_id']) + volume_dst_key = key_manager.get_key(self.context, + volume_dst['encryption_key_id']) + self.assertEqual(volume_src_key, volume_dst_key) + + def test_create_volume_from_encrypted_volume(self): + """Test volume can be created from an encrypted volume.""" + self.stubs.Set(keymgr, 'API', fake_keymgr.fake_api) + + volume_api = cinder.volume.api.API() + + ctxt = context.get_admin_context() + + db.volume_type_create(ctxt, + {'id': '61298380-0c12-11e3-bfd6-4b48424183be', + 'name': 'LUKS'}) + db.volume_type_encryption_update_or_create( + ctxt, + '61298380-0c12-11e3-bfd6-4b48424183be', + {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) + + db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), + 'LUKS') + volume_src = volume_api.create(self.context, + 1, + 'name', + 'description', + volume_type=db_vol_type) + volume_src['status'] = 'available' # status must be available + volume_dst = volume_api.create(self.context, + 1, + 'name', + 'description', + source_volume=volume_src) + self.assertEqual(volume_dst['id'], + db.volume_get(context.get_admin_context(), + volume_dst['id']).id) + self.assertEqual(volume_src['id'], + db.volume_get(context.get_admin_context(), + volume_dst['id']).source_volid) + + # ensure encryption keys match + self.assertIsNotNone(volume_src['encryption_key_id']) + self.assertIsNotNone(volume_dst['encryption_key_id']) + + key_manager = volume_api.key_manager # must use *same* key manager + volume_src_key = key_manager.get_key(self.context, + volume_src['encryption_key_id']) + volume_dst_key = key_manager.get_key(self.context, + volume_dst['encryption_key_id']) + self.assertEqual(volume_src_key, volume_dst_key) + + def test_create_volume_from_snapshot_fail_bad_size(self): + """Test volume can't be created from snapshot with bad volume size.""" + volume_api = cinder.volume.api.API() + snapshot = {'id': 1234, + 'status': 'available', + 'volume_size': 10} + self.assertRaises(exception.InvalidInput, + volume_api.create, + self.context, + size=1, + name='fake_name', + description='fake_desc', + snapshot=snapshot) + + def test_create_volume_from_snapshot_fail_wrong_az(self): + """Test volume can't be created from snapshot in a different az.""" + volume_api = cinder.volume.api.API() + + def fake_list_availability_zones(): + return ({'name': 'nova', 'available': True}, + {'name': 'az2', 'available': True}) + + self.stubs.Set(volume_api, + 'list_availability_zones', + fake_list_availability_zones) + + volume_src = tests_utils.create_volume(self.context, + availability_zone='az2', + **self.volume_params) + self.volume.create_volume(self.context, volume_src['id']) + snapshot = self._create_snapshot(volume_src['id']) + self.volume.create_snapshot(self.context, volume_src['id'], + snapshot['id']) + snapshot = db.snapshot_get(self.context, snapshot['id']) + + volume_dst = volume_api.create(self.context, + size=1, + name='fake_name', + description='fake_desc', + snapshot=snapshot) + self.assertEqual(volume_dst['availability_zone'], 'az2') + + self.assertRaises(exception.InvalidInput, + volume_api.create, + self.context, + size=1, + name='fake_name', + description='fake_desc', + snapshot=snapshot, + availability_zone='nova') + + def test_create_volume_with_invalid_exclusive_options(self): + """Test volume create with multiple exclusive options fails.""" + volume_api = cinder.volume.api.API() + self.assertRaises(exception.InvalidInput, + volume_api.create, + self.context, + 1, + 'name', + 'description', + snapshot='fake_id', + image_id='fake_id', + source_volume='fake_id') + + @mock.patch.object(db, 'volume_get') + @mock.patch.object(db, 'volume_admin_metadata_get') + def test_initialize_connection_fetchqos(self, + _mock_volume_admin_metadata_get, + _mock_volume_get): + """Make sure initialize_connection returns correct information.""" + _mock_volume_get.return_value = {'volume_type_id': 'fake_type_id', + 'volume_admin_metadata': {}} + _mock_volume_admin_metadata_get.return_value = {} + connector = {'ip': 'IP', 'initiator': 'INITIATOR'} + qos_values = {'consumer': 'front-end', + 'specs': { + 'key1': 'value1', + 'key2': 'value2'} + } + + with contextlib.nested( + mock.patch.object(cinder.volume.volume_types, + 'get_volume_type_qos_specs'), + mock.patch.object(cinder.tests.fake_driver.FakeISCSIDriver, + 'initialize_connection') + ) as (type_qos, driver_init): + type_qos.return_value = dict(qos_specs=qos_values) + driver_init.return_value = {'data': {}} + qos_specs_expected = {'key1': 'value1', + 'key2': 'value2'} + # initialize_connection() passes qos_specs that is designated to + # be consumed by front-end or both front-end and back-end + conn_info = self.volume.initialize_connection(self.context, + 'fake_volume_id', + connector) + self.assertDictMatch(qos_specs_expected, + conn_info['data']['qos_specs']) + + qos_values.update({'consumer': 'both'}) + conn_info = self.volume.initialize_connection(self.context, + 'fake_volume_id', + connector) + self.assertDictMatch(qos_specs_expected, + conn_info['data']['qos_specs']) + # initialize_connection() skips qos_specs that is designated to be + # consumed by back-end only + qos_values.update({'consumer': 'back-end'}) + type_qos.return_value = dict(qos_specs=qos_values) + conn_info = self.volume.initialize_connection(self.context, + 'fake_volume_id', + connector) + self.assertEqual(None, conn_info['data']['qos_specs']) + + def test_run_attach_detach_volume_for_instance(self): """Make sure volume can be attached and detached from instance.""" - instance_uuid = '12345678-1234-5678-1234-567812345678' mountpoint = "/dev/sdf" - volume = self._create_volume() + # attach volume to the instance then to detach + instance_uuid = '12345678-1234-5678-1234-567812345678' + volume = tests_utils.create_volume(self.context, + admin_metadata={'readonly': 'True'}, + **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) - db.volume_attached(self.context, volume_id, instance_uuid, mountpoint) + self.volume.attach_volume(self.context, volume_id, instance_uuid, + None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") self.assertEqual(vol['mountpoint'], mountpoint) self.assertEqual(vol['instance_uuid'], instance_uuid) + self.assertIsNone(vol['attached_host']) + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 2) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'True') + self.assertEqual(admin_metadata[1]['key'], 'attached_mode') + self.assertEqual(admin_metadata[1]['value'], 'ro') + connector = {'initiator': 'iqn.2012-07.org.fake:01'} + conn_info = self.volume.initialize_connection(self.context, + volume_id, connector) + self.assertEqual(conn_info['data']['access_mode'], 'ro') self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume_id) - db.volume_detached(self.context, volume_id) + self.volume.detach_volume(self.context, volume_id) vol = db.volume_get(self.context, volume_id) self.assertEqual(vol['status'], "available") @@ -215,6 +1030,264 @@ def test_run_attach_detach_volume(self): self.context, volume_id) + def test_run_attach_detach_volume_for_host(self): + """Make sure volume can be attached and detached from host.""" + mountpoint = "/dev/sdf" + volume = tests_utils.create_volume( + self.context, + admin_metadata={'readonly': 'False'}, + **self.volume_params) + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + self.volume.attach_volume(self.context, volume_id, None, + 'fake_host', mountpoint, 'rw') + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + self.assertEqual(vol['mountpoint'], mountpoint) + self.assertIsNone(vol['instance_uuid']) + # sanitized, conforms to RFC-952 and RFC-1123 specs. + self.assertEqual(vol['attached_host'], 'fake-host') + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 2) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'False') + self.assertEqual(admin_metadata[1]['key'], 'attached_mode') + self.assertEqual(admin_metadata[1]['value'], 'rw') + connector = {'initiator': 'iqn.2012-07.org.fake:01'} + conn_info = self.volume.initialize_connection(self.context, + volume_id, connector) + self.assertEqual(conn_info['data']['access_mode'], 'rw') + + self.assertRaises(exception.VolumeAttached, + self.volume.delete_volume, + self.context, + volume_id) + self.volume.detach_volume(self.context, volume_id) + vol = db.volume_get(self.context, volume_id) + self.assertEqual(vol['status'], "available") + + self.volume.delete_volume(self.context, volume_id) + self.assertRaises(exception.VolumeNotFound, + db.volume_get, + self.context, + volume_id) + + def test_run_attach_detach_volume_with_attach_mode(self): + instance_uuid = '12345678-1234-5678-1234-567812345678' + mountpoint = "/dev/sdf" + volume = tests_utils.create_volume(self.context, + admin_metadata={'readonly': 'True'}, + **self.volume_params) + volume_id = volume['id'] + db.volume_update(self.context, volume_id, {'status': 'available', + 'mountpoint': None, + 'instance_uuid': None, + 'attached_host': None, + 'attached_mode': None}) + self.volume.attach_volume(self.context, volume_id, instance_uuid, + None, mountpoint, 'ro') + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + self.assertEqual(vol['mountpoint'], mountpoint) + self.assertEqual(vol['instance_uuid'], instance_uuid) + self.assertIsNone(vol['attached_host']) + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 2) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'True') + self.assertEqual(admin_metadata[1]['key'], 'attached_mode') + self.assertEqual(admin_metadata[1]['value'], 'ro') + connector = {'initiator': 'iqn.2012-07.org.fake:01'} + conn_info = self.volume.initialize_connection(self.context, + volume_id, connector) + self.assertEqual(conn_info['data']['access_mode'], 'ro') + + self.volume.detach_volume(self.context, volume_id) + vol = db.volume_get(self.context, volume_id) + self.assertEqual(vol['status'], "available") + self.assertEqual(vol['attach_status'], "detached") + self.assertIsNone(vol['mountpoint']) + self.assertIsNone(vol['instance_uuid']) + self.assertIsNone(vol['attached_host']) + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 1) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'True') + + self.volume.attach_volume(self.context, volume_id, None, + 'fake_host', mountpoint, 'ro') + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + self.assertEqual(vol['mountpoint'], mountpoint) + self.assertIsNone(vol['instance_uuid']) + self.assertEqual(vol['attached_host'], 'fake-host') + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 2) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'True') + self.assertEqual(admin_metadata[1]['key'], 'attached_mode') + self.assertEqual(admin_metadata[1]['value'], 'ro') + connector = {'initiator': 'iqn.2012-07.org.fake:01'} + conn_info = self.volume.initialize_connection(self.context, + volume_id, connector) + self.assertEqual(conn_info['data']['access_mode'], 'ro') + + self.volume.detach_volume(self.context, volume_id) + vol = db.volume_get(self.context, volume_id) + self.assertEqual(vol['status'], "available") + self.assertEqual(vol['attach_status'], "detached") + self.assertIsNone(vol['mountpoint']) + self.assertIsNone(vol['instance_uuid']) + self.assertIsNone(vol['attached_host']) + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 1) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'True') + + self.volume.delete_volume(self.context, volume_id) + self.assertRaises(exception.VolumeNotFound, + db.volume_get, + self.context, + volume_id) + + def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self): + # Not allow using 'read-write' mode attach readonly volume + instance_uuid = '12345678-1234-5678-1234-567812345678' + mountpoint = "/dev/sdf" + volume = tests_utils.create_volume(self.context, + admin_metadata={'readonly': 'True'}, + **self.volume_params) + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + self.assertRaises(exception.InvalidVolumeAttachMode, + self.volume.attach_volume, + self.context, + volume_id, + instance_uuid, + None, + mountpoint, + 'rw') + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(vol['status'], "error_attaching") + self.assertEqual(vol['attach_status'], "detached") + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 2) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'True') + self.assertEqual(admin_metadata[1]['key'], 'attached_mode') + self.assertEqual(admin_metadata[1]['value'], 'rw') + + db.volume_update(self.context, volume_id, {'status': 'available'}) + self.assertRaises(exception.InvalidVolumeAttachMode, + self.volume.attach_volume, + self.context, + volume_id, + None, + 'fake_host', + mountpoint, + 'rw') + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(vol['status'], "error_attaching") + self.assertEqual(vol['attach_status'], "detached") + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 2) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'True') + self.assertEqual(admin_metadata[1]['key'], 'attached_mode') + self.assertEqual(admin_metadata[1]['value'], 'rw') + + def test_run_api_attach_detach_volume_with_wrong_attach_mode(self): + # Not allow using 'read-write' mode attach readonly volume + instance_uuid = '12345678-1234-5678-1234-567812345678' + mountpoint = "/dev/sdf" + volume = tests_utils.create_volume(self.context, + admin_metadata={'readonly': 'True'}, + **self.volume_params) + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + volume_api = cinder.volume.api.API() + self.assertRaises(exception.InvalidVolumeAttachMode, + volume_api.attach, + self.context, + volume, + instance_uuid, + None, + mountpoint, + 'rw') + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(vol['attach_status'], "detached") + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 1) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'True') + + db.volume_update(self.context, volume_id, {'status': 'available'}) + self.assertRaises(exception.InvalidVolumeAttachMode, + volume_api.attach, + self.context, + volume, + None, + 'fake_host', + mountpoint, + 'rw') + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(vol['attach_status'], "detached") + admin_metadata = vol['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 1) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'True') + + @mock.patch.object(db, 'volume_get') + @mock.patch.object(cinder.volume.api.API, 'update') + def test_reserve_volume_success(self, volume_get, volume_update): + fake_volume = { + 'id': FAKE_UUID, + 'status': 'available' + } + + volume_get.return_value = fake_volume + volume_update.return_value = fake_volume + + self.assertIsNone(cinder.volume.api.API().reserve_volume( + self.context, + fake_volume, + )) + + self.assertTrue(volume_get.called) + self.assertTrue(volume_update.called) + + def test_reserve_volume_bad_status(self): + fake_volume = { + 'id': FAKE_UUID, + 'status': 'in-use' + } + + with mock.patch.object(db, 'volume_get') as mock_volume_get: + mock_volume_get.return_value = fake_volume + self.assertRaises(exception.InvalidVolume, + cinder.volume.api.API().reserve_volume, + self.context, + fake_volume) + self.assertTrue(mock_volume_get.called) + + def test_unreserve_volume_success(self): + fake_volume = { + 'id': FAKE_UUID, + 'status': 'attaching' + } + + with mock.patch.object(cinder.volume.api.API, + 'update') as mock_volume_update: + mock_volume_update.return_value = fake_volume + self.assertIsNone(cinder.volume.api.API().unreserve_volume( + self.context, + fake_volume + )) + self.assertTrue(mock_volume_update.called) + def test_concurrent_volumes_get_different_targets(self): """Ensure multiple concurrent volumes get different targets.""" volume_ids = [] @@ -226,12 +1299,12 @@ def _check(volume_id): admin_context = context.get_admin_context() iscsi_target = db.volume_get_iscsi_target_num(admin_context, volume_id) - self.assert_(iscsi_target not in targets) + self.assertNotIn(iscsi_target, targets) targets.append(iscsi_target) - total_slots = FLAGS.iscsi_num_targets + total_slots = CONF.iscsi_num_targets for _index in xrange(total_slots): - self._create_volume() + tests_utils.create_volume(self.context, **self.volume_params) for volume_id in volume_ids: self.volume.delete_volume(self.context, volume_id) @@ -242,7 +1315,7 @@ def test_multi_node(self): pass @staticmethod - def _create_snapshot(volume_id, size='0'): + def _create_snapshot(volume_id, size='0', metadata=None): """Create a snapshot object.""" snap = {} snap['volume_size'] = size @@ -250,29 +1323,86 @@ def _create_snapshot(volume_id, size='0'): snap['project_id'] = 'fake' snap['volume_id'] = volume_id snap['status'] = "creating" + if metadata is not None: + snap['metadata'] = metadata return db.snapshot_create(context.get_admin_context(), snap) def test_create_delete_snapshot(self): """Test snapshot can be created and deleted.""" - volume = self._create_volume() + volume = tests_utils.create_volume( + self.context, + availability_zone=CONF.storage_availability_zone, + **self.volume_params) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 0) self.volume.create_volume(self.context, volume['id']) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 2) snapshot_id = self._create_snapshot(volume['id'])['id'] self.volume.create_snapshot(self.context, volume['id'], snapshot_id) self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), snapshot_id).id) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 4) + msg = test_notifier.NOTIFICATIONS[2] + self.assertEqual(msg['event_type'], 'snapshot.create.start') + expected = { + 'created_at': 'DONTCARE', + 'deleted': '', + 'display_name': None, + 'snapshot_id': snapshot_id, + 'status': 'creating', + 'tenant_id': 'fake', + 'user_id': 'fake', + 'volume_id': volume['id'], + 'volume_size': 0, + 'availability_zone': 'nova' + } + self.assertDictMatch(msg['payload'], expected) + msg = test_notifier.NOTIFICATIONS[3] + self.assertEqual(msg['event_type'], 'snapshot.create.end') + self.assertDictMatch(msg['payload'], expected) self.volume.delete_snapshot(self.context, snapshot_id) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 6) + msg = test_notifier.NOTIFICATIONS[4] + self.assertEqual(msg['event_type'], 'snapshot.delete.start') + expected['status'] = 'available' + self.assertDictMatch(msg['payload'], expected) + msg = test_notifier.NOTIFICATIONS[5] + self.assertEqual(msg['event_type'], 'snapshot.delete.end') + self.assertDictMatch(msg['payload'], expected) + + snap = db.snapshot_get(context.get_admin_context(read_deleted='yes'), + snapshot_id) + self.assertEqual(snap['status'], 'deleted') self.assertRaises(exception.NotFound, db.snapshot_get, self.context, snapshot_id) self.volume.delete_volume(self.context, volume['id']) - def test_cant_delete_volume_in_use(self): + def test_create_delete_snapshot_with_metadata(self): + """Test snapshot can be created with metadata and deleted.""" + test_meta = {'fake_key': 'fake_value'} + volume = tests_utils.create_volume(self.context, **self.volume_params) + snapshot = self._create_snapshot(volume['id'], metadata=test_meta) + snapshot_id = snapshot['id'] + + snap = db.snapshot_get(context.get_admin_context(), snapshot_id) + result_dict = dict(snap.iteritems()) + result_meta = { + result_dict['snapshot_metadata'][0].key: + result_dict['snapshot_metadata'][0].value} + self.assertEqual(result_meta, test_meta) + self.volume.delete_snapshot(self.context, snapshot_id) + self.assertRaises(exception.NotFound, + db.snapshot_get, + self.context, + snapshot_id) + + def test_cannot_delete_volume_in_use(self): """Test volume can't be deleted in invalid stats.""" # create a volume and assign to host - volume = self._create_volume() + volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume['id']) volume['status'] = 'in-use' volume['host'] = 'fakehost' @@ -291,7 +1421,7 @@ def test_cant_delete_volume_in_use(self): def test_force_delete_volume(self): """Test volume can be forced to delete.""" # create a volume and assign to host - volume = self._create_volume() + volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume['id']) volume['status'] = 'error_deleting' volume['host'] = 'fakehost' @@ -309,14 +1439,32 @@ def test_force_delete_volume(self): # status is deleting volume = db.volume_get(context.get_admin_context(), volume['id']) - self.assertEquals(volume['status'], 'deleting') + self.assertEqual(volume['status'], 'deleting') # clean up self.volume.delete_volume(self.context, volume['id']) - def test_cant_delete_volume_with_snapshots(self): + def test_cannot_force_delete_attached_volume(self): + """Test volume can't be force delete in attached state.""" + volume = tests_utils.create_volume(self.context, **self.volume_params) + self.volume.create_volume(self.context, volume['id']) + volume['status'] = 'in-use' + volume['attach_status'] = 'attached' + volume['host'] = 'fakehost' + + volume_api = cinder.volume.api.API() + + self.assertRaises(exception.VolumeAttached, + volume_api.delete, + self.context, + volume, + force=True) + + self.volume.delete_volume(self.context, volume['id']) + + def test_cannot_delete_volume_with_snapshots(self): """Test volume can't be deleted with dependent snapshots.""" - volume = self._create_volume() + volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume['id']) snapshot_id = self._create_snapshot(volume['id'])['id'] self.volume.create_snapshot(self.context, volume['id'], snapshot_id) @@ -338,7 +1486,7 @@ def test_cant_delete_volume_with_snapshots(self): def test_can_delete_errored_snapshot(self): """Test snapshot can be created and deleted.""" - volume = self._create_volume() + volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume['id']) snapshot_id = self._create_snapshot(volume['id'])['id'] self.volume.create_snapshot(self.context, volume['id'], snapshot_id) @@ -348,7 +1496,7 @@ def test_can_delete_errored_snapshot(self): volume_api = cinder.volume.api.API() snapshot['status'] = 'badstatus' - self.assertRaises(exception.InvalidVolume, + self.assertRaises(exception.InvalidSnapshot, volume_api.delete_snapshot, self.context, snapshot) @@ -364,11 +1512,30 @@ def fake_cast(ctxt, topic, msg): pass self.stubs.Set(rpc, 'cast', fake_cast) instance_uuid = '12345678-1234-5678-1234-567812345678' - - volume = self._create_volume() + # create volume and attach to the instance + volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume['id']) db.volume_attached(self.context, volume['id'], instance_uuid, - '/dev/sda1') + None, '/dev/sda1') + + volume_api = cinder.volume.api.API() + volume = volume_api.get(self.context, volume['id']) + self.assertRaises(exception.InvalidVolume, + volume_api.create_snapshot, + self.context, volume, + 'fake_name', 'fake_description') + snapshot_ref = volume_api.create_snapshot_force(self.context, + volume, + 'fake_name', + 'fake_description') + db.snapshot_destroy(self.context, snapshot_ref['id']) + db.volume_destroy(self.context, volume['id']) + + # create volume and attach to the host + volume = tests_utils.create_volume(self.context, **self.volume_params) + self.volume.create_volume(self.context, volume['id']) + db.volume_attached(self.context, volume['id'], None, + 'fake_host', '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) @@ -385,15 +1552,23 @@ def fake_cast(ctxt, topic, msg): def test_delete_busy_snapshot(self): """Test snapshot can be created and deleted.""" - volume = self._create_volume() + + self.volume.driver.vg = FakeBrickLVM('cinder-volumes', + False, + None, + 'default') + + volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) snapshot_id = self._create_snapshot(volume_id)['id'] self.volume.create_snapshot(self.context, volume_id, snapshot_id) self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot') - self.volume.driver.delete_snapshot(mox.IgnoreArg()) \ - .AndRaise(exception.SnapshotIsBusy) + + self.volume.driver.delete_snapshot( + mox.IgnoreArg()).AndRaise( + exception.SnapshotIsBusy(snapshot_name='fake')) self.mox.ReplayAll() self.volume.delete_snapshot(self.context, snapshot_id) snapshot_ref = db.snapshot_get(self.context, snapshot_id) @@ -404,204 +1579,149 @@ def test_delete_busy_snapshot(self): self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume_id) - def _create_volume_from_image(self, expected_status, - fakeout_copy_image_to_volume=False): - """Call copy image to volume, Test the status of volume after calling - copying image to volume.""" - def fake_local_path(volume): - return dst_path - - def fake_copy_image_to_volume(context, volume, image_id): - pass - - dst_fd, dst_path = tempfile.mkstemp() - os.close(dst_fd) - self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) - if fakeout_copy_image_to_volume: - self.stubs.Set(self.volume, '_copy_image_to_volume', - fake_copy_image_to_volume) + def test_delete_no_dev_fails(self): + """Test delete snapshot with no dev file fails.""" + self.stubs.Set(os.path, 'exists', lambda x: False) + self.volume.driver.vg = FakeBrickLVM('cinder-volumes', + False, + None, + 'default') - image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - volume_id = 1 - # creating volume testdata - db.volume_create(self.context, {'id': volume_id, - 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'display_description': 'Test Desc', - 'size': 20, - 'status': 'creating', - 'instance_uuid': None, - 'host': 'dummy'}) - try: - self.volume.create_volume(self.context, - volume_id, - image_id=image_id) - - volume = db.volume_get(self.context, volume_id) - self.assertEqual(volume['status'], expected_status) - finally: - # cleanup - db.volume_destroy(self.context, volume_id) - os.unlink(dst_path) - - def test_create_volume_from_image_status_downloading(self): - """Verify that before copying image to volume, it is in downloading - state.""" - self._create_volume_from_image('downloading', True) - - def test_create_volume_from_image_status_available(self): - """Verify that before copying image to volume, it is in available - state.""" - self._create_volume_from_image('available') - - def test_create_volume_from_image_exception(self): - """Verify that create volume from image, the volume status is - 'downloading'.""" - dst_fd, dst_path = tempfile.mkstemp() - os.close(dst_fd) + volume = tests_utils.create_volume(self.context, **self.volume_params) + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + snapshot_id = self._create_snapshot(volume_id)['id'] + self.volume.create_snapshot(self.context, volume_id, snapshot_id) - self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path) + self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot') - image_id = 'aaaaaaaa-0000-0000-0000-000000000000' - # creating volume testdata - volume_id = 1 - db.volume_create(self.context, {'id': volume_id, - 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'display_description': 'Test Desc', - 'size': 20, - 'status': 'creating', - 'host': 'dummy'}) + self.volume.driver.delete_snapshot( + mox.IgnoreArg()).AndRaise( + exception.SnapshotIsBusy(snapshot_name='fake')) + self.mox.ReplayAll() + self.volume.delete_snapshot(self.context, snapshot_id) + snapshot_ref = db.snapshot_get(self.context, snapshot_id) + self.assertEqual(snapshot_id, snapshot_ref.id) + self.assertEqual("available", snapshot_ref.status) - self.assertRaises(exception.ImageNotFound, - self.volume.create_volume, + self.mox.UnsetStubs() + self.assertRaises(exception.VolumeBackendAPIException, + self.volume.delete_snapshot, self.context, - volume_id, - None, - image_id) - volume = db.volume_get(self.context, volume_id) - self.assertEqual(volume['status'], "error") - # cleanup - db.volume_destroy(self.context, volume_id) - os.unlink(dst_path) + snapshot_id) + self.assertRaises(exception.VolumeBackendAPIException, + self.volume.delete_volume, + self.context, + volume_id) - def test_copy_volume_to_image_status_available(self): - dst_fd, dst_path = tempfile.mkstemp() - os.close(dst_fd) + def _create_volume_from_image(self, fakeout_copy_image_to_volume=False, + fakeout_clone_image=False): + """Test function of create_volume_from_image. + Test cases call this function to create a volume from image, caller + can choose whether to fake out copy_image_to_volume and conle_image, + after calling this, test cases should check status of the volume. + """ def fake_local_path(volume): return dst_path - self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) - - image_id = '70a599e0-31e7-49b7-b260-868f441e862b' - # creating volume testdata - volume_id = 1 - db.volume_create(self.context, {'id': volume_id, - 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'display_description': 'Test Desc', - 'size': 20, - 'status': 'uploading', - 'instance_uuid': None, - 'host': 'dummy'}) + def fake_copy_image_to_volume(context, volume, + image_service, image_id): + pass - try: - # start test - self.volume.copy_volume_to_image(self.context, - volume_id, - image_id) + def fake_fetch_to_raw(ctx, image_service, image_id, path, blocksize, + size=None): + pass - volume = db.volume_get(self.context, volume_id) - self.assertEqual(volume['status'], 'available') - finally: - # cleanup - db.volume_destroy(self.context, volume_id) - os.unlink(dst_path) + def fake_clone_image(volume_ref, image_location, image_id, image_meta): + return {'provider_location': None}, True - def test_copy_volume_to_image_status_use(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) - - def fake_local_path(volume): - return dst_path - self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) + if fakeout_clone_image: + self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image) + self.stubs.Set(image_utils, 'fetch_to_raw', fake_fetch_to_raw) + if fakeout_copy_image_to_volume: + self.stubs.Set(self.volume, '_copy_image_to_volume', + fake_copy_image_to_volume) - #image_id = '70a599e0-31e7-49b7-b260-868f441e862b' - image_id = 'a440c04b-79fa-479c-bed1-0b816eaec379' + image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' + volume_id = tests_utils.create_volume(self.context, + **self.volume_params)['id'] # creating volume testdata - volume_id = 1 - db.volume_create(self.context, - {'id': volume_id, - 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'display_description': 'Test Desc', - 'size': 20, - 'status': 'uploading', - 'instance_uuid': - 'b21f957d-a72f-4b93-b5a5-45b1161abb02', - 'host': 'dummy'}) - try: - # start test - self.volume.copy_volume_to_image(self.context, - volume_id, - image_id) - - volume = db.volume_get(self.context, volume_id) - self.assertEqual(volume['status'], 'in-use') + self.volume.create_volume(self.context, + volume_id, + image_id=image_id) finally: # cleanup - db.volume_destroy(self.context, volume_id) os.unlink(dst_path) + volume = db.volume_get(self.context, volume_id) + return volume - def test_copy_volume_to_image_exception(self): + def test_create_volume_from_image_cloned_status_available(self): + """Test create volume from image via cloning. + + Verify that after cloning image to volume, it is in available + state and is bootable. + """ + volume = self._create_volume_from_image() + self.assertEqual(volume['status'], 'available') + self.assertEqual(volume['bootable'], True) + self.volume.delete_volume(self.context, volume['id']) + + def test_create_volume_from_image_not_cloned_status_available(self): + """Test create volume from image via full copy. + + Verify that after copying image to volume, it is in available + state and is bootable. + """ + volume = self._create_volume_from_image(fakeout_clone_image=True) + self.assertEqual(volume['status'], 'available') + self.assertEqual(volume['bootable'], True) + self.volume.delete_volume(self.context, volume['id']) + + def test_create_volume_from_image_exception(self): + """Verify that create volume from a non-existing image, the volume + status is 'error' and is not bootable. + """ dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) - def fake_local_path(volume): - return dst_path - - self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) + self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path) - image_id = 'aaaaaaaa-0000-0000-0000-000000000000' # creating volume testdata volume_id = 1 - db.volume_create(self.context, {'id': volume_id, - 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'display_description': 'Test Desc', - 'size': 20, - 'status': 'in-use', - 'host': 'dummy'}) - - try: - # start test - self.assertRaises(exception.ImageNotFound, - self.volume.copy_volume_to_image, - self.context, - volume_id, - image_id) + db.volume_create(self.context, + {'id': volume_id, + 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'creating', + 'host': 'dummy'}) - volume = db.volume_get(self.context, volume_id) - self.assertEqual(volume['status'], 'available') - finally: - # cleanup - db.volume_destroy(self.context, volume_id) - os.unlink(dst_path) + self.assertRaises(exception.ImageNotFound, + self.volume.create_volume, + self.context, + volume_id, None, None, None, + None, + FAKE_UUID) + volume = db.volume_get(self.context, volume_id) + self.assertEqual(volume['status'], "error") + self.assertEqual(volume['bootable'], False) + # cleanup + db.volume_destroy(self.context, volume_id) + os.unlink(dst_path) def test_create_volume_from_exact_sized_image(self): """Verify that an image which is exactly the same size as the - volume, will work correctly.""" - class _FakeImageService: - def __init__(self, db_driver=None, image_service=None): - pass - - def show(self, context, image_id): - return {'size': 2 * 1024 * 1024 * 1024} - - image_id = '70a599e0-31e7-49b7-b260-868f441e862b' - + volume, will work correctly. + """ try: volume_id = None volume_api = cinder.volume.api.API( - image_service=_FakeImageService()) + image_service=FakeImageService()) volume = volume_api.create(self.context, 2, 'name', 'description', image_id=1) volume_id = volume['id'] @@ -613,16 +1733,31 @@ def show(self, context, image_id): def test_create_volume_from_oversized_image(self): """Verify that an image which is too big will fail correctly.""" - class _FakeImageService: - def __init__(self, db_driver=None, image_service=None): - pass - + class _ModifiedFakeImageService(FakeImageService): def show(self, context, image_id): - return {'size': 2 * 1024 * 1024 * 1024 + 1} + return {'size': 2 * units.GiB + 1, + 'disk_format': 'raw', + 'container_format': 'bare'} + + volume_api = cinder.volume.api.API(image_service= + _ModifiedFakeImageService()) + + self.assertRaises(exception.InvalidInput, + volume_api.create, + self.context, 2, + 'name', 'description', image_id=1) - image_id = '70a599e0-31e7-49b7-b260-868f441e862b' + def test_create_volume_with_mindisk_error(self): + """Verify volumes smaller than image minDisk will cause an error.""" + class _ModifiedFakeImageService(FakeImageService): + def show(self, context, image_id): + return {'size': 2 * units.GiB, + 'disk_format': 'raw', + 'container_format': 'bare', + 'min_disk': 5} - volume_api = cinder.volume.api.API(image_service=_FakeImageService()) + volume_api = cinder.volume.api.API(image_service= + _ModifiedFakeImageService()) self.assertRaises(exception.InvalidInput, volume_api.create, @@ -630,13 +1765,13 @@ def show(self, context, image_id): 'name', 'description', image_id=1) def _do_test_create_volume_with_size(self, size): - def fake_reserve(context, expire=None, **deltas): + def fake_reserve(context, expire=None, project_id=None, **deltas): return ["RESERVATION"] - def fake_commit(context, reservations): + def fake_commit(context, reservations, project_id=None): pass - def fake_rollback(context, reservations): + def fake_rollback(context, reservations, project_id=None): pass self.stubs.Set(QUOTAS, "reserve", fake_reserve) @@ -649,7 +1784,7 @@ def fake_rollback(context, reservations): size, 'name', 'description') - self.assertEquals(volume['size'], int(size)) + self.assertEqual(volume['size'], int(size)) def test_create_volume_int_size(self): """Test volume creation with int size.""" @@ -660,13 +1795,13 @@ def test_create_volume_string_size(self): self._do_test_create_volume_with_size('2') def test_create_volume_with_bad_size(self): - def fake_reserve(context, expire=None, **deltas): + def fake_reserve(context, expire=None, project_id=None, **deltas): return ["RESERVATION"] - def fake_commit(context, reservations): + def fake_commit(context, reservations, project_id=None): pass - def fake_rollback(context, reservations): + def fake_rollback(context, reservations, project_id=None): pass self.stubs.Set(QUOTAS, "reserve", fake_reserve) @@ -682,33 +1817,9 @@ def fake_rollback(context, reservations): 'name', 'description') - def test_create_volume_usage_notification(self): - """Ensure create volume generates appropriate usage notification""" - volume = self._create_volume() - volume_id = volume['id'] - self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) - self.volume.create_volume(self.context, volume_id) - self.assertEquals(len(test_notifier.NOTIFICATIONS), 2) - msg = test_notifier.NOTIFICATIONS[0] - self.assertEquals(msg['event_type'], 'volume.create.start') - msg = test_notifier.NOTIFICATIONS[1] - self.assertEquals(msg['priority'], 'INFO') - self.assertEquals(msg['event_type'], 'volume.create.end') - payload = msg['payload'] - self.assertEquals(payload['tenant_id'], volume['project_id']) - self.assertEquals(payload['user_id'], volume['user_id']) - self.assertEquals(payload['volume_id'], volume['id']) - self.assertEquals(payload['status'], 'creating') - self.assertEquals(payload['size'], volume['size']) - self.assertTrue('display_name' in payload) - self.assertTrue('snapshot_id' in payload) - self.assertTrue('launched_at' in payload) - self.assertTrue('created_at' in payload) - self.volume.delete_volume(self.context, volume_id) - def test_begin_roll_detaching_volume(self): """Test begin_detaching and roll_detaching functions.""" - volume = self._create_volume() + volume = tests_utils.create_volume(self.context, **self.volume_params) volume_api = cinder.volume.api.API() volume_api.begin_detaching(self.context, volume) volume = db.volume_get(self.context, volume['id']) @@ -719,27 +1830,661 @@ def test_begin_roll_detaching_volume(self): def test_volume_api_update(self): # create a raw vol - volume = self._create_volume() + volume = tests_utils.create_volume(self.context, **self.volume_params) # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} volume_api.update(self.context, volume, update_dict) # read changes from db vol = db.volume_get(context.get_admin_context(), volume['id']) - self.assertEquals(vol['display_name'], 'test update name') + self.assertEqual(vol['display_name'], 'test update name') def test_volume_api_update_snapshot(self): # create raw snapshot - volume = self._create_volume() + volume = tests_utils.create_volume(self.context, **self.volume_params) snapshot = self._create_snapshot(volume['id']) - self.assertEquals(snapshot['display_name'], None) + self.assertIsNone(snapshot['display_name']) # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} volume_api.update_snapshot(self.context, snapshot, update_dict) # read changes from db snap = db.snapshot_get(context.get_admin_context(), snapshot['id']) - self.assertEquals(snap['display_name'], 'test update name') + self.assertEqual(snap['display_name'], 'test update name') + + def test_extend_volume(self): + """Test volume can be extended at API level.""" + # create a volume and assign to host + volume = tests_utils.create_volume(self.context, size=2, + status='creating', host=CONF.host) + self.volume.create_volume(self.context, volume['id']) + volume['status'] = 'in-use' + volume['host'] = 'fakehost' + + volume_api = cinder.volume.api.API() + + # Extend fails when status != available + self.assertRaises(exception.InvalidVolume, + volume_api.extend, + self.context, + volume, + 3) + + volume['status'] = 'available' + # Extend fails when new_size < orig_size + self.assertRaises(exception.InvalidInput, + volume_api.extend, + self.context, + volume, + 1) + + # Extend fails when new_size == orig_size + self.assertRaises(exception.InvalidInput, + volume_api.extend, + self.context, + volume, + 2) + + # works when new_size > orig_size + volume_api.extend(self.context, volume, 3) + + volume = db.volume_get(context.get_admin_context(), volume['id']) + self.assertEqual(volume['status'], 'extending') + + # clean up + self.volume.delete_volume(self.context, volume['id']) + + def test_extend_volume_driver_not_initialized(self): + """Test volume can be extended at API level.""" + # create a volume and assign to host + volume = tests_utils.create_volume(self.context, size=2, + status='available', + host=CONF.host) + self.volume.create_volume(self.context, volume['id']) + + # NOTE(flaper87): Set initialized to False + self.volume.driver._initialized = False + + self.assertRaises(exception.DriverNotInitialized, + self.volume.extend_volume, + self.context, volume['id'], 3) + + volume = db.volume_get(context.get_admin_context(), volume['id']) + self.assertEqual(volume.status, 'error_extending') + + # NOTE(flaper87): Set initialized to True, + # lets cleanup the mess. + self.volume.driver._initialized = True + self.volume.delete_volume(self.context, volume['id']) + + def test_extend_volume_manager(self): + """Test volume can be extended at the manager level.""" + def fake_reserve(context, expire=None, project_id=None, **deltas): + return ['RESERVATION'] + + def fake_reserve_exc(context, expire=None, project_id=None, **deltas): + raise exception.OverQuota(overs=['gigabytes'], + quotas={'gigabytes': 20}, + usages={'gigabytes': {'reserved': 5, + 'in_use': 15}}) + + def fake_extend_exc(volume, new_size): + raise exception.CinderException('fake exception') + + volume = tests_utils.create_volume(self.context, size=2, + status='creating', host=CONF.host) + self.volume.create_volume(self.context, volume['id']) + + # Test quota exceeded + self.stubs.Set(QUOTAS, 'reserve', fake_reserve_exc) + self.stubs.Set(QUOTAS, 'commit', lambda x, y, project_id=None: True) + self.stubs.Set(QUOTAS, 'rollback', lambda x, y: True) + volume['status'] = 'extending' + self.volume.extend_volume(self.context, volume['id'], '4') + volume = db.volume_get(context.get_admin_context(), volume['id']) + self.assertEqual(volume['size'], 2) + self.assertEqual(volume['status'], 'error_extending') + + # Test driver exception + self.stubs.Set(QUOTAS, 'reserve', fake_reserve) + self.stubs.Set(self.volume.driver, 'extend_volume', fake_extend_exc) + volume['status'] = 'extending' + self.volume.extend_volume(self.context, volume['id'], '4') + volume = db.volume_get(context.get_admin_context(), volume['id']) + self.assertEqual(volume['size'], 2) + self.assertEqual(volume['status'], 'error_extending') + + # Test driver success + self.stubs.Set(self.volume.driver, 'extend_volume', + lambda x, y: True) + volume['status'] = 'extending' + self.volume.extend_volume(self.context, volume['id'], '4') + volume = db.volume_get(context.get_admin_context(), volume['id']) + self.assertEqual(volume['size'], 4) + self.assertEqual(volume['status'], 'available') + + # clean up + self.volume.delete_volume(self.context, volume['id']) + + def test_create_volume_from_unelevated_context(self): + """Test context does't change after volume creation failure.""" + def fake_create_volume(*args, **kwargs): + raise exception.CinderException('fake exception') + + #create context for testing + ctxt = self.context.deepcopy() + if 'admin' in ctxt.roles: + ctxt.roles.remove('admin') + ctxt.is_admin = False + #create one copy of context for future comparison + self.saved_ctxt = ctxt.deepcopy() + + self.stubs.Set(self.volume.driver, 'create_volume', fake_create_volume) + + volume_src = tests_utils.create_volume(self.context, + **self.volume_params) + self.assertRaises(exception.CinderException, + self.volume.create_volume, ctxt, volume_src['id']) + + def test_create_volume_from_sourcevol(self): + """Test volume can be created from a source volume.""" + def fake_create_cloned_volume(volume, src_vref): + pass + + self.stubs.Set(self.volume.driver, 'create_cloned_volume', + fake_create_cloned_volume) + volume_src = tests_utils.create_volume(self.context, + **self.volume_params) + self.volume.create_volume(self.context, volume_src['id']) + volume_dst = tests_utils.create_volume(self.context, + source_volid=volume_src['id'], + **self.volume_params) + self.volume.create_volume(self.context, volume_dst['id'], + source_volid=volume_src['id']) + self.assertEqual('available', + db.volume_get(context.get_admin_context(), + volume_dst['id']).status) + self.volume.delete_volume(self.context, volume_dst['id']) + self.volume.delete_volume(self.context, volume_src['id']) + + def test_create_volume_from_sourcevol_fail_wrong_az(self): + """Test volume can't be cloned from an other volume in different az.""" + volume_api = cinder.volume.api.API() + + def fake_list_availability_zones(): + return ({'name': 'nova', 'available': True}, + {'name': 'az2', 'available': True}) + + self.stubs.Set(volume_api, + 'list_availability_zones', + fake_list_availability_zones) + + volume_src = tests_utils.create_volume(self.context, + availability_zone='az2', + **self.volume_params) + self.volume.create_volume(self.context, volume_src['id']) + + volume_src = db.volume_get(self.context, volume_src['id']) + + volume_dst = volume_api.create(self.context, + size=1, + name='fake_name', + description='fake_desc', + source_volume=volume_src) + self.assertEqual(volume_dst['availability_zone'], 'az2') + + self.assertRaises(exception.InvalidInput, + volume_api.create, + self.context, + size=1, + name='fake_name', + description='fake_desc', + source_volume=volume_src, + availability_zone='nova') + + def test_create_volume_from_sourcevol_with_glance_metadata(self): + """Test glance metadata can be correctly copied to new volume.""" + def fake_create_cloned_volume(volume, src_vref): + pass + + self.stubs.Set(self.volume.driver, 'create_cloned_volume', + fake_create_cloned_volume) + volume_src = self._create_volume_from_image() + self.volume.create_volume(self.context, volume_src['id']) + volume_dst = tests_utils.create_volume(self.context, + source_volid=volume_src['id'], + **self.volume_params) + self.volume.create_volume(self.context, volume_dst['id'], + source_volid=volume_src['id']) + self.assertEqual('available', + db.volume_get(context.get_admin_context(), + volume_dst['id']).status) + src_glancemeta = db.volume_get(context.get_admin_context(), + volume_src['id']).volume_glance_metadata + dst_glancemeta = db.volume_get(context.get_admin_context(), + volume_dst['id']).volume_glance_metadata + for meta_src in src_glancemeta: + for meta_dst in dst_glancemeta: + if meta_dst.key == meta_src.key: + self.assertEqual(meta_dst.value, meta_src.value) + self.volume.delete_volume(self.context, volume_src['id']) + self.volume.delete_volume(self.context, volume_dst['id']) + + def test_create_volume_from_sourcevol_failed_clone(self): + """Test src vol status will be restore by error handling code.""" + def fake_error_create_cloned_volume(volume, src_vref): + db.volume_update(self.context, src_vref['id'], {'status': 'error'}) + raise exception.CinderException('fake exception') + + self.stubs.Set(self.volume.driver, 'create_cloned_volume', + fake_error_create_cloned_volume) + volume_src = tests_utils.create_volume(self.context, + **self.volume_params) + self.volume.create_volume(self.context, volume_src['id']) + volume_dst = tests_utils.create_volume(self.context, + source_volid=volume_src['id'], + **self.volume_params) + self.assertRaises(exception.CinderException, + self.volume.create_volume, + self.context, + volume_dst['id'], None, None, None, None, None, + volume_src['id']) + self.assertEqual(volume_src['status'], 'creating') + self.volume.delete_volume(self.context, volume_dst['id']) + self.volume.delete_volume(self.context, volume_src['id']) + + def test_list_availability_zones_enabled_service(self): + services = [ + {'availability_zone': 'ping', 'disabled': 0}, + {'availability_zone': 'ping', 'disabled': 1}, + {'availability_zone': 'pong', 'disabled': 0}, + {'availability_zone': 'pung', 'disabled': 1}, + ] + + def stub_service_get_all_by_topic(*args, **kwargs): + return services + + self.stubs.Set(db, 'service_get_all_by_topic', + stub_service_get_all_by_topic) + + volume_api = cinder.volume.api.API() + azs = volume_api.list_availability_zones() + + expected = ( + {'name': 'pung', 'available': False}, + {'name': 'pong', 'available': True}, + {'name': 'ping', 'available': True}, + ) + + self.assertEqual(expected, azs) + + def test_migrate_volume_driver(self): + """Test volume migration done by driver.""" + # stub out driver and rpc functions + self.stubs.Set(self.volume.driver, 'migrate_volume', + lambda x, y, z, new_type_id=None: (True, + {'user_id': 'foo'})) + + volume = tests_utils.create_volume(self.context, size=0, + host=CONF.host, + migration_status='migrating') + host_obj = {'host': 'newhost', 'capabilities': {}} + self.volume.migrate_volume(self.context, volume['id'], + host_obj, False) + + # check volume properties + volume = db.volume_get(context.get_admin_context(), volume['id']) + self.assertEqual(volume['host'], 'newhost') + self.assertIsNone(volume['migration_status']) + + def test_migrate_volume_generic(self): + def fake_migr(vol, host): + raise Exception('should not be called') + + def fake_delete_volume_rpc(self, ctxt, vol_id): + raise Exception('should not be called') + + def fake_create_volume(self, ctxt, volume, host, req_spec, filters, + allow_reschedule=True): + db.volume_update(ctxt, volume['id'], + {'status': 'available'}) + + self.stubs.Set(self.volume.driver, 'migrate_volume', fake_migr) + self.stubs.Set(volume_rpcapi.VolumeAPI, 'create_volume', + fake_create_volume) + self.stubs.Set(self.volume.driver, 'copy_volume_data', + lambda x, y, z, remote='dest': True) + self.stubs.Set(volume_rpcapi.VolumeAPI, 'delete_volume', + fake_delete_volume_rpc) + + volume = tests_utils.create_volume(self.context, size=0, + host=CONF.host) + host_obj = {'host': 'newhost', 'capabilities': {}} + self.volume.migrate_volume(self.context, volume['id'], + host_obj, True) + volume = db.volume_get(context.get_admin_context(), volume['id']) + self.assertEqual(volume['host'], 'newhost') + self.assertIsNone(volume['migration_status']) + + def _retype_volume_exec(self, driver, snap=False, policy='on-demand', + migrate_exc=False, exc=None, diff_equal=False): + elevated = context.get_admin_context() + project_id = self.context.project_id + + db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}}) + old_vol_type = db.volume_type_get_by_name(elevated, 'old') + db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}}) + vol_type = db.volume_type_get_by_name(elevated, 'new') + db.quota_create(elevated, project_id, 'volumes_new', 10) + + volume = tests_utils.create_volume(self.context, size=1, + host=CONF.host, status='retyping', + volume_type_id=old_vol_type['id']) + if snap: + self._create_snapshot(volume['id'], size=volume['size']) + host_obj = {'host': 'newhost', 'capabilities': {}} + + reserve_opts = {'volumes': 1, 'gigabytes': volume['size']} + QUOTAS.add_volume_type_opts(self.context, + reserve_opts, + vol_type['id']) + reservations = QUOTAS.reserve(self.context, + project_id=project_id, + **reserve_opts) + + with mock.patch.object(self.volume.driver, 'retype') as _retype: + with mock.patch.object(volume_types, 'volume_types_diff') as _diff: + with mock.patch.object(self.volume, 'migrate_volume') as _mig: + _retype.return_value = driver + _diff.return_value = ({}, diff_equal) + if migrate_exc: + _mig.side_effect = KeyError + else: + _mig.return_value = True + + if not exc: + self.volume.retype(self.context, volume['id'], + vol_type['id'], host_obj, + migration_policy=policy, + reservations=reservations) + else: + self.assertRaises(exc, self.volume.retype, + self.context, volume['id'], + vol_type['id'], host_obj, + migration_policy=policy, + reservations=reservations) + + # get volume/quota properties + volume = db.volume_get(elevated, volume['id']) + try: + usage = db.quota_usage_get(elevated, project_id, 'volumes_new') + volumes_in_use = usage.in_use + except exception.QuotaUsageNotFound: + volumes_in_use = 0 + + # check properties + if not exc: + self.assertEqual(volume['volume_type_id'], vol_type['id']) + self.assertEqual(volume['status'], 'available') + self.assertEqual(volume['host'], 'newhost') + self.assertEqual(volumes_in_use, 1) + else: + self.assertEqual(volume['volume_type_id'], old_vol_type['id']) + self.assertEqual(volume['status'], 'available') + self.assertEqual(volume['host'], CONF.host) + self.assertEqual(volumes_in_use, 0) + + def test_retype_volume_driver_success(self): + self._retype_volume_exec(True) + + def test_retype_volume_migration_bad_policy(self): + # Test volume retype that requires migration by not allowed + self._retype_volume_exec(False, policy='never', + exc=exception.VolumeMigrationFailed) + + def test_retype_volume_migration_with_snaps(self): + self._retype_volume_exec(False, snap=True, exc=exception.InvalidVolume) + + def test_retype_volume_migration_failed(self): + self._retype_volume_exec(False, migrate_exc=True, exc=KeyError) + + def test_retype_volume_migration_success(self): + self._retype_volume_exec(False, migrate_exc=False, exc=None) + + def test_retype_volume_migration_equal_types(self): + self._retype_volume_exec(False, diff_equal=True) + + def test_migrate_driver_not_initialized(self): + volume = tests_utils.create_volume(self.context, size=0, + host=CONF.host) + host_obj = {'host': 'newhost', 'capabilities': {}} + + self.volume.driver._initialized = False + self.assertRaises(exception.DriverNotInitialized, + self.volume.migrate_volume, + self.context, volume['id'], + host_obj, True) + + volume = db.volume_get(context.get_admin_context(), volume['id']) + self.assertEqual(volume.migration_status, 'error') + + # NOTE(flaper87): Set initialized to True, + # lets cleanup the mess. + self.volume.driver._initialized = True + self.volume.delete_volume(self.context, volume['id']) + + def test_update_volume_readonly_flag(self): + """Test volume readonly flag can be updated at API level.""" + # create a volume and assign to host + volume = tests_utils.create_volume(self.context, + admin_metadata={'readonly': 'True'}, + **self.volume_params) + self.volume.create_volume(self.context, volume['id']) + volume['status'] = 'in-use' + + volume_api = cinder.volume.api.API() + + # Update fails when status != available + self.assertRaises(exception.InvalidVolume, + volume_api.update_readonly_flag, + self.context, + volume, + False) + + volume['status'] = 'available' + + # works when volume in 'available' status + volume_api.update_readonly_flag(self.context, volume, False) + + volume = db.volume_get(context.get_admin_context(), volume['id']) + self.assertEqual(volume['status'], 'available') + admin_metadata = volume['volume_admin_metadata'] + self.assertEqual(len(admin_metadata), 1) + self.assertEqual(admin_metadata[0]['key'], 'readonly') + self.assertEqual(admin_metadata[0]['value'], 'False') + + # clean up + self.volume.delete_volume(self.context, volume['id']) + + +class CopyVolumeToImageTestCase(BaseVolumeTestCase): + def fake_local_path(self, volume): + return self.dst_path + + def setUp(self): + super(CopyVolumeToImageTestCase, self).setUp() + self.dst_fd, self.dst_path = tempfile.mkstemp() + os.close(self.dst_fd) + self.stubs.Set(self.volume.driver, 'local_path', self.fake_local_path) + self.image_meta = { + 'id': '70a599e0-31e7-49b7-b260-868f441e862b', + 'container_format': 'bare', + 'disk_format': 'raw' + } + self.volume_id = 1 + self.volume_attrs = { + 'id': self.volume_id, + 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'uploading', + 'host': 'dummy' + } + + def tearDown(self): + db.volume_destroy(self.context, self.volume_id) + os.unlink(self.dst_path) + super(CopyVolumeToImageTestCase, self).tearDown() + + def test_copy_volume_to_image_status_available(self): + # creating volume testdata + self.volume_attrs['instance_uuid'] = None + db.volume_create(self.context, self.volume_attrs) + + # start test + self.volume.copy_volume_to_image(self.context, + self.volume_id, + self.image_meta) + + volume = db.volume_get(self.context, self.volume_id) + self.assertEqual(volume['status'], 'available') + + def test_copy_volume_to_image_status_use(self): + self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379' + # creating volume testdata + self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \ + '45b1161abb02' + db.volume_create(self.context, self.volume_attrs) + + # start test + self.volume.copy_volume_to_image(self.context, + self.volume_id, + self.image_meta) + + volume = db.volume_get(self.context, self.volume_id) + self.assertEqual(volume['status'], 'in-use') + + def test_copy_volume_to_image_exception(self): + self.image_meta['id'] = FAKE_UUID + # creating volume testdata + self.volume_attrs['status'] = 'in-use' + db.volume_create(self.context, self.volume_attrs) + + # start test + self.assertRaises(exception.ImageNotFound, + self.volume.copy_volume_to_image, + self.context, + self.volume_id, + self.image_meta) + + volume = db.volume_get(self.context, self.volume_id) + self.assertEqual(volume['status'], 'available') + + +class GetActiveByWindowTestCase(BaseVolumeTestCase): + def setUp(self): + super(GetActiveByWindowTestCase, self).setUp() + self.ctx = context.get_admin_context(read_deleted="yes") + self.db_attrs = [ + { + 'id': 1, + 'host': 'devstack', + 'project_id': 'p1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'deleted': True, 'status': 'deleted', + 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1), + }, + + { + 'id': 2, + 'host': 'devstack', + 'project_id': 'p1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'deleted': True, 'status': 'deleted', + 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1), + }, + { + 'id': 3, + 'host': 'devstack', + 'project_id': 'p1', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'deleted': True, 'status': 'deleted', + 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1), + }, + { + 'id': 4, + 'host': 'devstack', + 'project_id': 'p1', + 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), + }, + { + 'id': 5, + 'host': 'devstack', + 'project_id': 'p1', + 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), + } + ] + + def test_volume_get_active_by_window(self): + # Find all all volumes valid within a timeframe window. + + # Not in window + db.volume_create(self.ctx, self.db_attrs[0]) + + # In - deleted in window + db.volume_create(self.ctx, self.db_attrs[1]) + + # In - deleted after window + db.volume_create(self.ctx, self.db_attrs[2]) + + # In - created in window + db.volume_create(self.context, self.db_attrs[3]) + + # Not of window. + db.volume_create(self.context, self.db_attrs[4]) + + volumes = db.volume_get_active_by_window( + self.context, + datetime.datetime(1, 3, 1, 1, 1, 1), + datetime.datetime(1, 4, 1, 1, 1, 1), + project_id='p1') + self.assertEqual(len(volumes), 3) + self.assertEqual(volumes[0].id, u'2') + self.assertEqual(volumes[1].id, u'3') + self.assertEqual(volumes[2].id, u'4') + + def test_snapshot_get_active_by_window(self): + # Find all all snapshots valid within a timeframe window. + vol = db.volume_create(self.context, {'id': 1}) + for i in range(5): + self.db_attrs[i]['volume_id'] = 1 + + # Not in window + db.snapshot_create(self.ctx, self.db_attrs[0]) + + # In - deleted in window + db.snapshot_create(self.ctx, self.db_attrs[1]) + + # In - deleted after window + db.snapshot_create(self.ctx, self.db_attrs[2]) + + # In - created in window + db.snapshot_create(self.context, self.db_attrs[3]) + # Not of window. + db.snapshot_create(self.context, self.db_attrs[4]) + + snapshots = db.snapshot_get_active_by_window( + self.context, + datetime.datetime(1, 3, 1, 1, 1, 1), + datetime.datetime(1, 4, 1, 1, 1, 1), + project_id='p1') + self.assertEqual(len(snapshots), 3) + self.assertEqual(snapshots[0].id, u'2') + self.assertEqual(snapshots[0].volume.id, u'1') + self.assertEqual(snapshots[1].id, u'3') + self.assertEqual(snapshots[1].volume.id, u'1') + self.assertEqual(snapshots[2].id, u'4') + self.assertEqual(snapshots[2].volume.id, u'1') class DriverTestCase(test.TestCase): @@ -751,19 +2496,21 @@ def setUp(self): vol_tmpdir = tempfile.mkdtemp() self.flags(volume_driver=self.driver_name, volumes_dir=vol_tmpdir) - self.volume = importutils.import_object(FLAGS.volume_manager) + self.volume = importutils.import_object(CONF.volume_manager) self.context = context.get_admin_context() self.output = "" self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target) + self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True) def _fake_execute(_command, *_args, **_kwargs): """Fake _execute.""" return self.output, None self.volume.driver.set_execute(_fake_execute) + self.volume.driver.set_initialized() def tearDown(self): try: - shutil.rmtree(FLAGS.volumes_dir) + shutil.rmtree(CONF.volumes_dir) except OSError: pass super(DriverTestCase, self).tearDown() @@ -772,7 +2519,7 @@ def fake_get_target(obj, iqn): return 1 def _attach_volume(self): - """Attach volumes to an instance. """ + """Attach volumes to an instance.""" return [] def _detach_volume(self, volume_id_list): @@ -782,34 +2529,274 @@ def _detach_volume(self, volume_id_list): self.volume.delete_volume(self.context, volume_id) -class VolumeDriverTestCase(DriverTestCase): +class GenericVolumeDriverTestCase(DriverTestCase): + """Test case for VolumeDriver.""" + driver_name = "cinder.tests.fake_driver.LoggingVolumeDriver" + + def test_backup_volume(self): + vol = tests_utils.create_volume(self.context) + backup = {'volume_id': vol['id']} + properties = {} + attach_info = {'device': {'path': '/dev/null'}} + backup_service = self.mox.CreateMock(backup_driver.BackupDriver) + root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf' + self.mox.StubOutWithMock(self.volume.driver.db, 'volume_get') + self.mox.StubOutWithMock(cinder.brick.initiator.connector, + 'get_connector_properties') + self.mox.StubOutWithMock(self.volume.driver, '_attach_volume') + self.mox.StubOutWithMock(os, 'getuid') + self.mox.StubOutWithMock(utils, 'execute') + self.mox.StubOutWithMock(fileutils, 'file_open') + self.mox.StubOutWithMock(self.volume.driver, '_detach_volume') + self.mox.StubOutWithMock(self.volume.driver, 'terminate_connection') + + self.volume.driver.db.volume_get(self.context, vol['id']).\ + AndReturn(vol) + cinder.brick.initiator.connector.\ + get_connector_properties(root_helper, CONF.my_ip).\ + AndReturn(properties) + self.volume.driver._attach_volume(self.context, vol, properties).\ + AndReturn(attach_info) + os.getuid() + utils.execute('chown', None, '/dev/null', run_as_root=True) + f = fileutils.file_open('/dev/null').AndReturn(file('/dev/null')) + backup_service.backup(backup, f) + utils.execute('chown', 0, '/dev/null', run_as_root=True) + self.volume.driver._detach_volume(attach_info) + self.volume.driver.terminate_connection(vol, properties) + self.mox.ReplayAll() + self.volume.driver.backup_volume(self.context, backup, backup_service) + self.mox.UnsetStubs() + + def test_restore_backup(self): + vol = tests_utils.create_volume(self.context) + backup = {'volume_id': vol['id'], + 'id': 'backup-for-%s' % vol['id']} + properties = {} + attach_info = {'device': {'path': '/dev/null'}} + root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf' + backup_service = self.mox.CreateMock(backup_driver.BackupDriver) + self.mox.StubOutWithMock(cinder.brick.initiator.connector, + 'get_connector_properties') + self.mox.StubOutWithMock(self.volume.driver, '_attach_volume') + self.mox.StubOutWithMock(os, 'getuid') + self.mox.StubOutWithMock(utils, 'execute') + self.mox.StubOutWithMock(fileutils, 'file_open') + self.mox.StubOutWithMock(self.volume.driver, '_detach_volume') + self.mox.StubOutWithMock(self.volume.driver, 'terminate_connection') + + cinder.brick.initiator.connector.\ + get_connector_properties(root_helper, CONF.my_ip).\ + AndReturn(properties) + self.volume.driver._attach_volume(self.context, vol, properties).\ + AndReturn(attach_info) + os.getuid() + utils.execute('chown', None, '/dev/null', run_as_root=True) + f = fileutils.file_open('/dev/null', 'wb').AndReturn(file('/dev/null')) + backup_service.restore(backup, vol['id'], f) + utils.execute('chown', 0, '/dev/null', run_as_root=True) + self.volume.driver._detach_volume(attach_info) + self.volume.driver.terminate_connection(vol, properties) + self.mox.ReplayAll() + self.volume.driver.restore_backup(self.context, backup, vol, + backup_service) + self.mox.UnsetStubs() + + +class LVMISCSIVolumeDriverTestCase(DriverTestCase): """Test case for VolumeDriver""" - driver_name = "cinder.volume.driver.VolumeDriver" + driver_name = "cinder.volume.drivers.lvm.LVMISCSIDriver" def test_delete_busy_volume(self): """Test deleting a busy volume.""" self.stubs.Set(self.volume.driver, '_volume_not_present', lambda x: False) self.stubs.Set(self.volume.driver, '_delete_volume', - lambda x, y: False) - # Want DriverTestCase._fake_execute to return 'o' so that - # volume.driver.delete_volume() raises the VolumeIsBusy exception. - self.output = 'o' + lambda x: False) + + self.volume.driver.vg = FakeBrickLVM('cinder-volumes', + False, + None, + 'default') + + self.stubs.Set(self.volume.driver.vg, 'lv_has_snapshot', + lambda x: True) self.assertRaises(exception.VolumeIsBusy, self.volume.driver.delete_volume, {'name': 'test1', 'size': 1024}) - # when DriverTestCase._fake_execute returns something other than - # 'o' volume.driver.delete_volume() does not raise an exception. + + self.stubs.Set(self.volume.driver.vg, 'lv_has_snapshot', + lambda x: False) self.output = 'x' self.volume.driver.delete_volume({'name': 'test1', 'size': 1024}) + def test_lvm_migrate_volume_no_loc_info(self): + host = {'capabilities': {}} + vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} + moved, model_update = self.volume.driver.migrate_volume(self.context, + vol, host) + self.assertEqual(moved, False) + self.assertIsNone(model_update) + + def test_lvm_migrate_volume_bad_loc_info(self): + capabilities = {'location_info': 'foo'} + host = {'capabilities': capabilities} + vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} + moved, model_update = self.volume.driver.migrate_volume(self.context, + vol, host) + self.assertEqual(moved, False) + self.assertIsNone(model_update) + + def test_lvm_migrate_volume_diff_driver(self): + capabilities = {'location_info': 'FooDriver:foo:bar'} + host = {'capabilities': capabilities} + vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} + moved, model_update = self.volume.driver.migrate_volume(self.context, + vol, host) + self.assertEqual(moved, False) + self.assertIsNone(model_update) + + def test_lvm_migrate_volume_diff_host(self): + capabilities = {'location_info': 'LVMVolumeDriver:foo:bar'} + host = {'capabilities': capabilities} + vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} + moved, model_update = self.volume.driver.migrate_volume(self.context, + vol, host) + self.assertEqual(moved, False) + self.assertIsNone(model_update) + + def test_lvm_migrate_volume_in_use(self): + hostname = socket.gethostname() + capabilities = {'location_info': 'LVMVolumeDriver:%s:bar' % hostname} + host = {'capabilities': capabilities} + vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'in-use'} + moved, model_update = self.volume.driver.migrate_volume(self.context, + vol, host) + self.assertEqual(moved, False) + self.assertIsNone(model_update) + + def test_lvm_migrate_volume_proceed(self): + hostname = socket.gethostname() + capabilities = {'location_info': 'LVMVolumeDriver:%s:' + 'cinder-volumes-2:default:0' % hostname} + host = {'capabilities': capabilities} + vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} + + def fake_execute(*args, **kwargs): + pass + + def get_all_volume_groups(): + # NOTE(flaper87) Return just the destination + # host to test the check of dest VG existence. + return [{'name': 'cinder-volumes-2'}] + + def _fake_get_all_physical_volumes(obj, root_helper, vg_name): + return [{}] + + self.stubs.Set(brick_lvm.LVM, + 'get_all_physical_volumes', + _fake_get_all_physical_volumes) + + self.stubs.Set(self.volume.driver, '_execute', fake_execute) + + self.stubs.Set(volutils, 'copy_volume', + lambda x, y, z, sync=False, execute='foo', + blocksize=mox.IgnoreArg(): None) + + self.stubs.Set(volutils, 'get_all_volume_groups', + get_all_volume_groups) + + self.stubs.Set(self.volume.driver, '_delete_volume', + lambda x: None) + + self.stubs.Set(self.volume.driver, '_create_export', + lambda x, y, vg='vg': None) + + self.volume.driver.vg = FakeBrickLVM('cinder-volumes', + False, + None, + 'default') + moved, model_update = self.volume.driver.migrate_volume(self.context, + vol, host) + self.assertEqual(moved, True) + self.assertIsNone(model_update) + + +class LVMVolumeDriverTestCase(DriverTestCase): + """Test case for VolumeDriver""" + driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" + FAKE_VOLUME = {'name': 'test1', + 'id': 'test1'} + + def test_delete_volume_invalid_parameter(self): + configuration = conf.Configuration(fake_opt, 'fake_group') + configuration.volume_clear = 'zero' + configuration.volume_clear_size = 0 + lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) + self.mox.StubOutWithMock(os.path, 'exists') + + os.path.exists(mox.IgnoreArg()).AndReturn(True) + + self.mox.ReplayAll() + + # Test volume without 'size' field and 'volume_size' field + self.assertRaises(exception.InvalidParameterValue, + lvm_driver._delete_volume, + self.FAKE_VOLUME) + + def test_delete_volume_bad_path(self): + configuration = conf.Configuration(fake_opt, 'fake_group') + configuration.volume_clear = 'zero' + configuration.volume_clear_size = 0 + volume = dict(self.FAKE_VOLUME, size=1) + lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) + + self.mox.StubOutWithMock(os.path, 'exists') + os.path.exists(mox.IgnoreArg()).AndReturn(False) + self.mox.ReplayAll() + + self.assertRaises(exception.VolumeBackendAPIException, + lvm_driver._delete_volume, volume) + + def test_delete_volume_thinlvm_snap(self): + configuration = conf.Configuration(fake_opt, 'fake_group') + configuration.volume_clear = 'zero' + configuration.volume_clear_size = 0 + configuration.lvm_type = 'thin' + lvm_driver = lvm.LVMISCSIDriver(configuration=configuration, + vg_obj=mox.MockAnything()) + + # Ensures that copy_volume is not called for ThinLVM + self.mox.StubOutWithMock(volutils, 'copy_volume') + self.mox.StubOutWithMock(volutils, 'clear_volume') + self.mox.StubOutWithMock(lvm_driver, '_execute') + self.mox.ReplayAll() + + uuid = '00000000-0000-0000-0000-c3aa7ee01536' + + fake_snapshot = {'name': 'volume-' + uuid, + 'id': uuid, + 'size': 123} + + lvm_driver._delete_volume(fake_snapshot, is_snapshot=True) + class ISCSITestCase(DriverTestCase): """Test Case for ISCSIDriver""" - driver_name = "cinder.volume.driver.ISCSIDriver" + driver_name = "cinder.volume.drivers.lvm.LVMISCSIDriver" + base_driver = driver.ISCSIDriver + + def setUp(self): + super(ISCSITestCase, self).setUp() + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.num_iscsi_scan_tries = 3 + self.configuration.iscsi_num_targets = 100 + self.configuration.iscsi_target_prefix = 'iqn.2010-10.org.openstack:' + self.configuration.iscsi_ip_address = '0.0.0.0' + self.configuration.iscsi_port = 3260 def _attach_volume(self): - """Attach volumes to an instance. """ + """Attach volumes to an instance.""" volume_id_list = [] for index in xrange(3): vol = {} @@ -827,6 +2814,130 @@ def _attach_volume(self): return volume_id_list + def test_do_iscsi_discovery(self): + self.configuration.append_config_values(mox.IgnoreArg()) + iscsi_driver = self.base_driver(configuration=self.configuration) + iscsi_driver._execute = lambda *a, **kw: \ + ("%s dummy" % CONF.iscsi_ip_address, '') + volume = {"name": "dummy", + "host": "0.0.0.0"} + iscsi_driver._do_iscsi_discovery(volume) + + def test_get_iscsi_properties(self): + volume = {"provider_location": '', + "id": "0", + "provider_auth": "a b c", + "attached_mode": "rw"} + iscsi_driver = self.base_driver(configuration=self.configuration) + iscsi_driver._do_iscsi_discovery = lambda v: "0.0.0.0:0000,0 iqn:iqn 0" + result = iscsi_driver._get_iscsi_properties(volume) + self.assertEqual(result["target_portal"], "0.0.0.0:0000") + self.assertEqual(result["target_iqn"], "iqn:iqn") + self.assertEqual(result["target_lun"], 0) + + def test_get_volume_stats(self): + + def _fake_get_all_physical_volumes(obj, root_helper, vg_name): + return [{}] + + def _fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True): + return [{'name': 'cinder-volumes', + 'size': '5.52', + 'available': '0.52', + 'lv_count': '2', + 'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}] + + self.stubs.Set(brick_lvm.LVM, + 'get_all_volume_groups', + _fake_get_all_volume_groups) + + self.stubs.Set(brick_lvm.LVM, + 'get_all_physical_volumes', + _fake_get_all_physical_volumes) + + self.volume.driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') + + self.volume.driver._update_volume_stats() + + stats = self.volume.driver._stats + + self.assertEqual(stats['total_capacity_gb'], float('5.52')) + self.assertEqual(stats['free_capacity_gb'], float('0.52')) + + def test_validate_connector(self): + iscsi_driver = self.base_driver(configuration=self.configuration) + # Validate a valid connector + connector = {'ip': '10.0.0.2', + 'host': 'fakehost', + 'initiator': 'iqn.2012-07.org.fake:01'} + iscsi_driver.validate_connector(connector) + + # Validate a connector without the initiator + connector = {'ip': '10.0.0.2', 'host': 'fakehost'} + self.assertRaises(exception.VolumeBackendAPIException, + iscsi_driver.validate_connector, connector) + + +class ISERTestCase(ISCSITestCase): + """Test Case for ISERDriver.""" + driver_name = "cinder.volume.drivers.lvm.LVMISERDriver" + base_driver = driver.ISERDriver + + def setUp(self): + super(ISERTestCase, self).setUp() + self.configuration = mox.MockObject(conf.Configuration) + self.configuration.num_iser_scan_tries = 3 + self.configuration.iser_num_targets = 100 + self.configuration.iser_target_prefix = 'iqn.2010-10.org.openstack:' + self.configuration.iser_ip_address = '0.0.0.0' + self.configuration.iser_port = 3260 + + def test_get_volume_stats(self): + def _fake_get_all_physical_volumes(obj, root_helper, vg_name): + return [{}] + + def _fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True): + return [{'name': 'cinder-volumes', + 'size': '5.52', + 'available': '0.52', + 'lv_count': '2', + 'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}] + + self.stubs.Set(brick_lvm.LVM, + 'get_all_physical_volumes', + _fake_get_all_physical_volumes) + + self.stubs.Set(brick_lvm.LVM, + 'get_all_volume_groups', + _fake_get_all_volume_groups) + self.volume.driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') + + stats = self.volume.driver.get_volume_stats(refresh=True) + + self.assertEqual(stats['total_capacity_gb'], float('5.52')) + self.assertEqual(stats['free_capacity_gb'], float('0.52')) + self.assertEqual(stats['storage_protocol'], 'iSER') + + def test_get_volume_stats2(self): + iser_driver = self.base_driver(configuration=self.configuration) + + stats = iser_driver.get_volume_stats(refresh=True) + + self.assertEqual(stats['total_capacity_gb'], 'infinite') + self.assertEqual(stats['free_capacity_gb'], 'infinite') + self.assertEqual(stats['storage_protocol'], 'iSER') + + +class FibreChannelTestCase(DriverTestCase): + """Test Case for FibreChannelDriver.""" + driver_name = "cinder.volume.driver.FibreChannelDriver" + + def test_initialize_connection(self): + self.driver = driver.FibreChannelDriver() + self.driver.do_setup(None) + self.assertRaises(NotImplementedError, + self.driver.initialize_connection, {}, {}) + class VolumePolicyTestCase(test.TestCase): @@ -837,13 +2948,14 @@ def setUp(self): cinder.policy.init() self.context = context.get_admin_context() + self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True) def tearDown(self): super(VolumePolicyTestCase, self).tearDown() cinder.policy.reset() def _set_rules(self, rules): - cinder.common.policy.set_brain(cinder.common.policy.HttpBrain(rules)) + cinder.common.policy.set_brain(cinder.common.policy.Brain(rules)) def test_check_policy(self): self.mox.StubOutWithMock(cinder.policy, 'enforce') diff --git a/cinder/tests/test_volume_configuration.py b/cinder/tests/test_volume_configuration.py new file mode 100644 index 0000000000..63b33b33c9 --- /dev/null +++ b/cinder/tests/test_volume_configuration.py @@ -0,0 +1,68 @@ +# Copyright (c) 2012 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for the configuration wrapper in volume drivers.""" + + +from oslo.config import cfg + +from cinder.openstack.common import log as logging +from cinder import test +from cinder.volume import configuration + + +LOG = logging.getLogger(__name__) + + +volume_opts = [ + cfg.StrOpt('str_opt', default='STR_OPT'), + cfg.BoolOpt('bool_opt', default=False) +] +more_volume_opts = [ + cfg.IntOpt('int_opt', default=1), +] + +CONF = cfg.CONF +CONF.register_opts(volume_opts) +CONF.register_opts(more_volume_opts) + + +class VolumeConfigurationTest(test.TestCase): + def setUp(self): + super(VolumeConfigurationTest, self).setUp() + + def tearDown(self): + super(VolumeConfigurationTest, self).tearDown() + + def test_group_grafts_opts(self): + c = configuration.Configuration(volume_opts, config_group='foo') + self.assertEqual(c.str_opt, CONF.foo.str_opt) + self.assertEqual(c.bool_opt, CONF.foo.bool_opt) + + def test_opts_no_group(self): + c = configuration.Configuration(volume_opts) + self.assertEqual(c.str_opt, CONF.str_opt) + self.assertEqual(c.bool_opt, CONF.bool_opt) + + def test_grafting_multiple_opts(self): + c = configuration.Configuration(volume_opts, config_group='foo') + c.append_config_values(more_volume_opts) + self.assertEqual(c.str_opt, CONF.foo.str_opt) + self.assertEqual(c.bool_opt, CONF.foo.bool_opt) + self.assertEqual(c.int_opt, CONF.foo.int_opt) + + def test_safe_get(self): + c = configuration.Configuration(volume_opts, config_group='foo') + self.assertIsNone(c.safe_get('none_opt')) diff --git a/cinder/tests/test_volume_glance_metadata.py b/cinder/tests/test_volume_glance_metadata.py new file mode 100644 index 0000000000..a3578730c2 --- /dev/null +++ b/cinder/tests/test_volume_glance_metadata.py @@ -0,0 +1,168 @@ +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack Foundation +# Copyright 2011 University of Southern California +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for volume types extra specs code +""" + +from cinder import context +from cinder import db +from cinder import exception +from cinder import test + + +class VolumeGlanceMetadataTestCase(test.TestCase): + + def setUp(self): + super(VolumeGlanceMetadataTestCase, self).setUp() + self.ctxt = context.get_admin_context() + + def test_vol_glance_metadata_bad_vol_id(self): + ctxt = context.get_admin_context() + self.assertRaises(exception.VolumeNotFound, + db.volume_glance_metadata_create, + ctxt, 1, 'key1', 'value1') + self.assertRaises(exception.VolumeNotFound, + db.volume_glance_metadata_get, ctxt, 1) + db.volume_glance_metadata_delete_by_volume(ctxt, 10) + + def test_vol_update_glance_metadata(self): + ctxt = context.get_admin_context() + db.volume_create(ctxt, {'id': 1}) + db.volume_create(ctxt, {'id': 2}) + vol_metadata = db.volume_glance_metadata_create(ctxt, 1, 'key1', + 'value1') + vol_metadata = db.volume_glance_metadata_create(ctxt, 2, 'key1', + 'value1') + vol_metadata = db.volume_glance_metadata_create(ctxt, 2, + 'key2', + 'value2') + vol_metadata = db.volume_glance_metadata_create(ctxt, 2, + 'key3', + 123) + + expected_metadata_1 = {'volume_id': '1', + 'key': 'key1', + 'value': 'value1'} + + metadata = db.volume_glance_metadata_get(ctxt, 1) + self.assertEqual(len(metadata), 1) + for key, value in expected_metadata_1.items(): + self.assertEqual(metadata[0][key], value) + + expected_metadata_2 = ({'volume_id': '2', + 'key': 'key1', + 'value': 'value1'}, + {'volume_id': '2', + 'key': 'key2', + 'value': 'value2'}, + {'volume_id': '2', + 'key': 'key3', + 'value': '123'}) + + metadata = db.volume_glance_metadata_get(ctxt, 2) + self.assertEqual(len(metadata), 3) + for expected, meta in zip(expected_metadata_2, metadata): + for key, value in expected.iteritems(): + self.assertEqual(meta[key], value) + + self.assertRaises(exception.GlanceMetadataExists, + db.volume_glance_metadata_create, + ctxt, 1, 'key1', 'value1a') + + metadata = db.volume_glance_metadata_get(ctxt, 1) + self.assertEqual(len(metadata), 1) + for key, value in expected_metadata_1.items(): + self.assertEqual(metadata[0][key], value) + + def test_vols_get_glance_metadata(self): + ctxt = context.get_admin_context() + db.volume_create(ctxt, {'id': '1'}) + db.volume_create(ctxt, {'id': '2'}) + db.volume_create(ctxt, {'id': '3'}) + db.volume_glance_metadata_create(ctxt, '1', 'key1', 'value1') + db.volume_glance_metadata_create(ctxt, '2', 'key2', 'value2') + db.volume_glance_metadata_create(ctxt, '2', 'key22', 'value22') + + metadata = db.volume_glance_metadata_get_all(ctxt) + self.assertEqual(len(metadata), 3) + self._assert_metadata_equals('1', 'key1', 'value1', metadata[0]) + self._assert_metadata_equals('2', 'key2', 'value2', metadata[1]) + self._assert_metadata_equals('2', 'key22', 'value22', metadata[2]) + + def _assert_metadata_equals(self, volume_id, key, value, observed): + self.assertEqual(volume_id, observed.volume_id) + self.assertEqual(key, observed.key) + self.assertEqual(value, observed.value) + + def test_vol_delete_glance_metadata(self): + ctxt = context.get_admin_context() + db.volume_create(ctxt, {'id': 1}) + db.volume_glance_metadata_delete_by_volume(ctxt, 1) + vol_metadata = db.volume_glance_metadata_create(ctxt, 1, 'key1', + 'value1') + db.volume_glance_metadata_delete_by_volume(ctxt, 1) + self.assertRaises(exception.GlanceMetadataNotFound, + db.volume_glance_metadata_get, ctxt, 1) + + def test_vol_glance_metadata_copy_to_snapshot(self): + ctxt = context.get_admin_context() + db.volume_create(ctxt, {'id': 1}) + db.snapshot_create(ctxt, {'id': 100, 'volume_id': 1}) + vol_meta = db.volume_glance_metadata_create(ctxt, 1, 'key1', + 'value1') + db.volume_glance_metadata_copy_to_snapshot(ctxt, 100, 1) + + expected_meta = {'snapshot_id': '100', + 'key': 'key1', + 'value': 'value1'} + + for meta in db.volume_snapshot_glance_metadata_get(ctxt, 100): + for (key, value) in expected_meta.items(): + self.assertEqual(meta[key], value) + + def test_vol_glance_metadata_copy_from_volume_to_volume(self): + ctxt = context.get_admin_context() + db.volume_create(ctxt, {'id': 1}) + db.volume_create(ctxt, {'id': 100, 'source_volid': 1}) + vol_meta = db.volume_glance_metadata_create(ctxt, 1, 'key1', + 'value1') + db.volume_glance_metadata_copy_from_volume_to_volume(ctxt, 1, 100) + + expected_meta = {'key': 'key1', + 'value': 'value1'} + + for meta in db.volume_glance_metadata_get(ctxt, 100): + for (key, value) in expected_meta.items(): + self.assertEqual(meta[key], value) + + def test_volume_glance_metadata_copy_to_volume(self): + vol1 = db.volume_create(self.ctxt, {}) + vol2 = db.volume_create(self.ctxt, {}) + db.volume_glance_metadata_create(self.ctxt, vol1['id'], 'm1', 'v1') + snapshot = db.snapshot_create(self.ctxt, {'volume_id': vol1['id']}) + db.volume_glance_metadata_copy_to_snapshot(self.ctxt, snapshot['id'], + vol1['id']) + db.volume_glance_metadata_copy_to_volume(self.ctxt, vol2['id'], + snapshot['id']) + metadata = db.volume_glance_metadata_get(self.ctxt, vol2['id']) + metadata = dict([(m['key'], m['value']) for m in metadata]) + self.assertEqual(metadata, {'m1': 'v1'}) + + def test_volume_snapshot_glance_metadata_get_nonexistent(self): + vol = db.volume_create(self.ctxt, {}) + snapshot = db.snapshot_create(self.ctxt, {'volume_id': vol['id']}) + self.assertRaises(exception.GlanceMetadataNotFound, + db.volume_snapshot_glance_metadata_get, + self.ctxt, snapshot['id']) diff --git a/cinder/tests/test_volume_rpcapi.py b/cinder/tests/test_volume_rpcapi.py new file mode 100644 index 0000000000..9beabd1a40 --- /dev/null +++ b/cinder/tests/test_volume_rpcapi.py @@ -0,0 +1,268 @@ +# Copyright 2012, Intel, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for cinder.volume.rpcapi +""" + + +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder.openstack.common import jsonutils +from cinder.openstack.common import rpc +from cinder import test +from cinder.volume import rpcapi as volume_rpcapi + + +CONF = cfg.CONF + + +class VolumeRpcAPITestCase(test.TestCase): + + def setUp(self): + super(VolumeRpcAPITestCase, self).setUp() + self.context = context.get_admin_context() + vol = {} + vol['host'] = 'fake_host' + vol['availability_zone'] = CONF.storage_availability_zone + vol['status'] = "available" + vol['attach_status'] = "detached" + vol['metadata'] = {"test_key": "test_val"} + volume = db.volume_create(self.context, vol) + + snpshot = { + 'volume_id': 'fake_id', + 'status': "creating", + 'progress': '0%', + 'volume_size': 0, + 'display_name': 'fake_name', + 'display_description': 'fake_description'} + snapshot = db.snapshot_create(self.context, snpshot) + self.fake_volume = jsonutils.to_primitive(volume) + self.fake_volume_metadata = volume["volume_metadata"] + self.fake_snapshot = jsonutils.to_primitive(snapshot) + + def test_serialized_volume_has_id(self): + self.assertIn('id', self.fake_volume) + + def _test_volume_api(self, method, rpc_method, **kwargs): + ctxt = context.RequestContext('fake_user', 'fake_project') + + if 'rpcapi_class' in kwargs: + rpcapi_class = kwargs['rpcapi_class'] + del kwargs['rpcapi_class'] + else: + rpcapi_class = volume_rpcapi.VolumeAPI + rpcapi = rpcapi_class() + expected_retval = 'foo' if method == 'call' else None + + expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION) + + if 'request_spec' in kwargs: + spec = jsonutils.to_primitive(kwargs['request_spec']) + kwargs['request_spec'] = spec + + expected_msg = rpcapi.make_msg(method, **kwargs) + if 'volume' in expected_msg['args']: + volume = expected_msg['args']['volume'] + del expected_msg['args']['volume'] + expected_msg['args']['volume_id'] = volume['id'] + if 'snapshot' in expected_msg['args']: + snapshot = expected_msg['args']['snapshot'] + del expected_msg['args']['snapshot'] + expected_msg['args']['snapshot_id'] = snapshot['id'] + if 'host' in expected_msg['args']: + del expected_msg['args']['host'] + if 'dest_host' in expected_msg['args']: + dest_host = expected_msg['args']['dest_host'] + dest_host_dict = {'host': dest_host.host, + 'capabilities': dest_host.capabilities} + del expected_msg['args']['dest_host'] + expected_msg['args']['host'] = dest_host_dict + if 'new_volume' in expected_msg['args']: + volume = expected_msg['args']['new_volume'] + del expected_msg['args']['new_volume'] + expected_msg['args']['new_volume_id'] = volume['id'] + + expected_msg['version'] = expected_version + + if 'host' in kwargs: + host = kwargs['host'] + else: + host = kwargs['volume']['host'] + expected_topic = '%s:%s' % (CONF.volume_topic, host) + + self.fake_args = None + self.fake_kwargs = None + + def _fake_rpc_method(*args, **kwargs): + self.fake_args = args + self.fake_kwargs = kwargs + if expected_retval: + return expected_retval + + self.stubs.Set(rpc, rpc_method, _fake_rpc_method) + + retval = getattr(rpcapi, method)(ctxt, **kwargs) + + self.assertEqual(retval, expected_retval) + expected_args = [ctxt, expected_topic, expected_msg] + for arg, expected_arg in zip(self.fake_args, expected_args): + self.assertEqual(arg, expected_arg) + + def test_create_volume(self): + self._test_volume_api('create_volume', + rpc_method='cast', + volume=self.fake_volume, + host='fake_host1', + request_spec='fake_request_spec', + filter_properties='fake_properties', + allow_reschedule=True, + snapshot_id='fake_snapshot_id', + image_id='fake_image_id', + source_volid='fake_src_id', + version='1.4') + + def test_create_volume_serialization(self): + request_spec = {"metadata": self.fake_volume_metadata} + self._test_volume_api('create_volume', + rpc_method='cast', + volume=self.fake_volume, + host='fake_host1', + request_spec=request_spec, + filter_properties='fake_properties', + allow_reschedule=True, + snapshot_id='fake_snapshot_id', + image_id='fake_image_id', + source_volid='fake_src_id', + version='1.4') + + def test_delete_volume(self): + self._test_volume_api('delete_volume', + rpc_method='cast', + volume=self.fake_volume) + + def test_create_snapshot(self): + self._test_volume_api('create_snapshot', + rpc_method='cast', + volume=self.fake_volume, + snapshot=self.fake_snapshot) + + def test_delete_snapshot(self): + self._test_volume_api('delete_snapshot', + rpc_method='cast', + snapshot=self.fake_snapshot, + host='fake_host') + + def test_attach_volume_to_instance(self): + self._test_volume_api('attach_volume', + rpc_method='call', + volume=self.fake_volume, + instance_uuid='fake_uuid', + host_name=None, + mountpoint='fake_mountpoint', + mode='ro', + version='1.11') + + def test_attach_volume_to_host(self): + self._test_volume_api('attach_volume', + rpc_method='call', + volume=self.fake_volume, + instance_uuid=None, + host_name='fake_host', + mountpoint='fake_mountpoint', + mode='rw', + version='1.11') + + def test_detach_volume(self): + self._test_volume_api('detach_volume', + rpc_method='call', + volume=self.fake_volume) + + def test_copy_volume_to_image(self): + self._test_volume_api('copy_volume_to_image', + rpc_method='cast', + volume=self.fake_volume, + image_meta={'id': 'fake_image_id', + 'container_format': 'fake_type', + 'disk_format': 'fake_type'}, + version='1.3') + + def test_initialize_connection(self): + self._test_volume_api('initialize_connection', + rpc_method='call', + volume=self.fake_volume, + connector='fake_connector') + + def test_terminate_connection(self): + self._test_volume_api('terminate_connection', + rpc_method='call', + volume=self.fake_volume, + connector='fake_connector', + force=False) + + def test_accept_transfer(self): + self._test_volume_api('accept_transfer', + rpc_method='cast', + volume=self.fake_volume, + new_user='e5565fd0-06c8-11e3-' + '8ffd-0800200c9b77', + new_project='e4465fd0-06c8-11e3' + '-8ffd-0800200c9a66', + version='1.9') + + def test_extend_volume(self): + self._test_volume_api('extend_volume', + rpc_method='cast', + volume=self.fake_volume, + new_size=1, + version='1.6') + + def test_migrate_volume(self): + class FakeHost(object): + def __init__(self): + self.host = 'host' + self.capabilities = {} + dest_host = FakeHost() + self._test_volume_api('migrate_volume', + rpc_method='cast', + volume=self.fake_volume, + dest_host=dest_host, + force_host_copy=True, + version='1.8') + + def test_migrate_volume_completion(self): + self._test_volume_api('migrate_volume_completion', + rpc_method='call', + volume=self.fake_volume, + new_volume=self.fake_volume, + error=False, + version='1.10') + + def test_retype(self): + class FakeHost(object): + def __init__(self): + self.host = 'host' + self.capabilities = {} + dest_host = FakeHost() + self._test_volume_api('retype', + rpc_method='cast', + volume=self.fake_volume, + new_type_id='fake', + dest_host=dest_host, + migration_policy='never', + reservations=None, + version='1.12') diff --git a/cinder/tests/test_volume_transfer.py b/cinder/tests/test_volume_transfer.py new file mode 100644 index 0000000000..eac3db1bf6 --- /dev/null +++ b/cinder/tests/test_volume_transfer.py @@ -0,0 +1,137 @@ +# Copyright (c) 2013 OpenStack Foundation +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit Tests for volume transfers.""" + + +import datetime + +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import test +from cinder.tests import utils +from cinder.transfer import api as transfer_api + + +LOG = logging.getLogger(__name__) + + +class VolumeTransferTestCase(test.TestCase): + """Test cases for volume transfer code.""" + def setUp(self): + super(VolumeTransferTestCase, self).setUp() + self.ctxt = context.RequestContext(user_id='user_id', + project_id='project_id') + self.updated_at = datetime.datetime(1, 1, 1, 1, 1, 1) + + def test_transfer_volume_create_delete(self): + tx_api = transfer_api.API() + utils.create_volume(self.ctxt, id='1', + updated_at=self.updated_at) + response = tx_api.create(self.ctxt, '1', 'Description') + volume = db.volume_get(self.ctxt, '1') + self.assertEqual('awaiting-transfer', volume['status'], + 'Unexpected state') + + tx_api.delete(self.ctxt, response['id']) + volume = db.volume_get(self.ctxt, '1') + self.assertEqual('available', volume['status'], 'Unexpected state') + + def test_transfer_invalid_volume(self): + tx_api = transfer_api.API() + utils.create_volume(self.ctxt, id='1', status='in-use', + updated_at=self.updated_at) + self.assertRaises(exception.InvalidVolume, + tx_api.create, + self.ctxt, '1', 'Description') + volume = db.volume_get(self.ctxt, '1') + self.assertEqual('in-use', volume['status'], 'Unexpected state') + + def test_transfer_accept(self): + tx_api = transfer_api.API() + utils.create_volume(self.ctxt, id='1', + updated_at=self.updated_at) + transfer = tx_api.create(self.ctxt, '1', 'Description') + volume = db.volume_get(self.ctxt, '1') + self.assertEqual('awaiting-transfer', volume['status'], + 'Unexpected state') + + self.assertRaises(exception.TransferNotFound, + tx_api.accept, + self.ctxt, '2', transfer['auth_key']) + + self.assertRaises(exception.InvalidAuthKey, + tx_api.accept, + self.ctxt, transfer['id'], 'wrong') + + db.volume_update(self.ctxt, '1', {'status': 'wrong'}) + self.assertRaises(exception.InvalidVolume, + tx_api.accept, + self.ctxt, transfer['id'], transfer['auth_key']) + db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'}) + + self.ctxt.user_id = 'new_user_id' + self.ctxt.project_id = 'new_project_id' + response = tx_api.accept(self.ctxt, + transfer['id'], + transfer['auth_key']) + volume = db.volume_get(self.ctxt, '1') + self.assertEqual(volume['project_id'], 'new_project_id', + 'Unexpected project id') + self.assertEqual(volume['user_id'], 'new_user_id', + 'Unexpected user id') + + self.assertEqual(volume['id'], response['volume_id'], + 'Unexpected volume id in response.') + self.assertEqual(transfer['id'], response['id'], + 'Unexpected transfer id in response.') + + def test_transfer_get(self): + tx_api = transfer_api.API() + volume = utils.create_volume(self.ctxt, id='1', + updated_at=self.updated_at) + transfer = tx_api.create(self.ctxt, volume['id'], 'Description') + t = tx_api.get(self.ctxt, transfer['id']) + self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') + + ts = tx_api.get_all(self.ctxt) + self.assertEqual(len(ts), 1, 'Unexpected number of transfers.') + + nctxt = context.RequestContext(user_id='new_user_id', + project_id='new_project_id') + utils.create_volume(nctxt, id='2', updated_at=self.updated_at) + self.assertRaises(exception.TransferNotFound, + tx_api.get, + nctxt, + transfer['id']) + + ts = tx_api.get_all(nctxt) + self.assertEqual(len(ts), 0, 'Unexpected transfers listed.') + + def test_delete_transfer_with_deleted_volume(self): + #create a volume + volume = utils.create_volume(self.ctxt, id='1', + updated_at=self.updated_at) + #create a transfer + tx_api = transfer_api.API() + transfer = tx_api.create(self.ctxt, volume['id'], 'Description') + t = tx_api.get(self.ctxt, transfer['id']) + self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') + #force delete volume + db.volume_destroy(context.get_admin_context(), volume['id']) + #Make sure transfer has been deleted. + self.assertRaises(exception.TransferNotFound, + tx_api.get, + self.ctxt, + transfer['id']) diff --git a/cinder/tests/test_volume_types.py b/cinder/tests/test_volume_types.py index 8650b4c059..02b2f07857 100644 --- a/cinder/tests/test_volume_types.py +++ b/cinder/tests/test_volume_types.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. +# Copyright (c) 2011 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -16,42 +14,46 @@ """ Unit Tests for volume types code """ + + +import datetime import time from cinder import context +from cinder import db +from cinder.db.sqlalchemy import api as db_api +from cinder.db.sqlalchemy import models from cinder import exception -from cinder import flags from cinder.openstack.common import log as logging from cinder import test +from cinder.tests import conf_fixture +from cinder.volume import qos_specs from cinder.volume import volume_types -from cinder.db.sqlalchemy import session as sql_session -from cinder.db.sqlalchemy import models -FLAGS = flags.FLAGS + LOG = logging.getLogger(__name__) class VolumeTypeTestCase(test.TestCase): - """Test cases for volume type code""" + """Test cases for volume type code.""" def setUp(self): super(VolumeTypeTestCase, self).setUp() self.ctxt = context.get_admin_context() self.vol_type1_name = str(int(time.time())) - self.vol_type1_specs = dict( - type="physical drive", - drive_type="SAS", - size="300", - rpm="7200", - visible="True") + self.vol_type1_specs = dict(type="physical drive", + drive_type="SAS", + size="300", + rpm="7200", + visible="True") def test_volume_type_create_then_destroy(self): - """Ensure volume types can be created and deleted""" + """Ensure volume types can be created and deleted.""" prev_all_vtypes = volume_types.get_all_types(self.ctxt) - volume_types.create(self.ctxt, - self.vol_type1_name, - self.vol_type1_specs) + type_ref = volume_types.create(self.ctxt, + self.vol_type1_name, + self.vol_type1_specs) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) @@ -60,40 +62,70 @@ def test_volume_type_create_then_destroy(self): for k, v in self.vol_type1_specs.iteritems(): self.assertEqual(v, new['extra_specs'][k], - 'one of fields doesnt match') + 'one of fields does not match') new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(len(prev_all_vtypes) + 1, len(new_all_vtypes), 'drive type was not created') - volume_types.destroy(self.ctxt, self.vol_type1_name) + volume_types.destroy(self.ctxt, type_ref['id']) new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(prev_all_vtypes, new_all_vtypes, 'drive type was not deleted') def test_get_all_volume_types(self): - """Ensures that all volume types can be retrieved""" - session = sql_session.get_session() + """Ensures that all volume types can be retrieved.""" + session = db_api.get_session() total_volume_types = session.query(models.VolumeTypes).count() vol_types = volume_types.get_all_types(self.ctxt) self.assertEqual(total_volume_types, len(vol_types)) + def test_get_default_volume_type(self): + """Ensures default volume type can be retrieved.""" + type_ref = volume_types.create(self.ctxt, + conf_fixture.def_vol_type, + {}) + default_vol_type = volume_types.get_default_volume_type() + self.assertEqual(default_vol_type.get('name'), + conf_fixture.def_vol_type) + + def test_default_volume_type_missing_in_db(self): + """Ensures proper exception raised if default volume type + is not in database. + """ + session = db_api.get_session() + default_vol_type = volume_types.get_default_volume_type() + self.assertEqual(default_vol_type, {}) + def test_non_existent_vol_type_shouldnt_delete(self): - """Ensures that volume type creation fails with invalid args""" - self.assertRaises(exception.VolumeTypeNotFoundByName, + """Ensures that volume type creation fails with invalid args.""" + self.assertRaises(exception.VolumeTypeNotFound, volume_types.destroy, self.ctxt, "sfsfsdfdfs") + def test_volume_type_with_volumes_shouldnt_delete(self): + """Ensures volume type deletion with associated volumes fail.""" + type_ref = volume_types.create(self.ctxt, self.vol_type1_name) + db.volume_create(self.ctxt, + {'id': '1', + 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'display_description': 'Test Desc', + 'size': 20, + 'status': 'available', + 'volume_type_id': type_ref['id']}) + self.assertRaises(exception.VolumeTypeInUse, + volume_types.destroy, self.ctxt, type_ref['id']) + def test_repeated_vol_types_shouldnt_raise(self): - """Ensures that volume duplicates don't raise""" + """Ensures that volume duplicates don't raise.""" new_name = self.vol_type1_name + "dup" - volume_types.create(self.ctxt, new_name) - volume_types.destroy(self.ctxt, new_name) - volume_types.create(self.ctxt, new_name) + type_ref = volume_types.create(self.ctxt, new_name) + volume_types.destroy(self.ctxt, type_ref['id']) + type_ref = volume_types.create(self.ctxt, new_name) def test_invalid_volume_types_params(self): - """Ensures that volume type creation fails with invalid args""" + """Ensures that volume type creation fails with invalid args.""" self.assertRaises(exception.InvalidVolumeType, volume_types.destroy, self.ctxt, None) self.assertRaises(exception.InvalidVolumeType, @@ -103,7 +135,7 @@ def test_invalid_volume_types_params(self): self.ctxt, None) def test_volume_type_get_by_id_and_name(self): - """Ensure volume types get returns same entry""" + """Ensure volume types get returns same entry.""" volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs) @@ -114,7 +146,7 @@ def test_volume_type_get_by_id_and_name(self): self.assertEqual(new, new2) def test_volume_type_search_by_extra_spec(self): - """Ensure volume types get by extra spec returns correct type""" + """Ensure volume types get by extra spec returns correct type.""" volume_types.create(self.ctxt, "type1", {"key1": "val1", "key2": "val2"}) volume_types.create(self.ctxt, "type2", {"key2": "val2", @@ -122,29 +154,32 @@ def test_volume_type_search_by_extra_spec(self): volume_types.create(self.ctxt, "type3", {"key3": "another_value", "key4": "val4"}) - vol_types = volume_types.get_all_types(self.ctxt, - search_opts={'extra_specs': {"key1": "val1"}}) + vol_types = volume_types.get_all_types( + self.ctxt, + search_opts={'extra_specs': {"key1": "val1"}}) LOG.info("vol_types: %s" % vol_types) self.assertEqual(len(vol_types), 1) - self.assertTrue("type1" in vol_types.keys()) + self.assertIn("type1", vol_types.keys()) self.assertEqual(vol_types['type1']['extra_specs'], {"key1": "val1", "key2": "val2"}) - vol_types = volume_types.get_all_types(self.ctxt, - search_opts={'extra_specs': {"key2": "val2"}}) + vol_types = volume_types.get_all_types( + self.ctxt, + search_opts={'extra_specs': {"key2": "val2"}}) LOG.info("vol_types: %s" % vol_types) self.assertEqual(len(vol_types), 2) - self.assertTrue("type1" in vol_types.keys()) - self.assertTrue("type2" in vol_types.keys()) + self.assertIn("type1", vol_types.keys()) + self.assertIn("type2", vol_types.keys()) - vol_types = volume_types.get_all_types(self.ctxt, - search_opts={'extra_specs': {"key3": "val3"}}) + vol_types = volume_types.get_all_types( + self.ctxt, + search_opts={'extra_specs': {"key3": "val3"}}) LOG.info("vol_types: %s" % vol_types) self.assertEqual(len(vol_types), 1) - self.assertTrue("type2" in vol_types.keys()) + self.assertIn("type2", vol_types.keys()) def test_volume_type_search_by_extra_spec_multiple(self): - """Ensure volume types get by extra spec returns correct type""" + """Ensure volume types get by extra spec returns correct type.""" volume_types.create(self.ctxt, "type1", {"key1": "val1", "key2": "val2", "key3": "val3"}) @@ -154,14 +189,116 @@ def test_volume_type_search_by_extra_spec_multiple(self): "key3": "val3", "key4": "val4"}) - vol_types = volume_types.get_all_types(self.ctxt, - search_opts={'extra_specs': {"key1": "val1", - "key3": "val3"}}) + vol_types = volume_types.get_all_types( + self.ctxt, + search_opts={'extra_specs': {"key1": "val1", + "key3": "val3"}}) LOG.info("vol_types: %s" % vol_types) self.assertEqual(len(vol_types), 2) - self.assertTrue("type1" in vol_types.keys()) - self.assertTrue("type3" in vol_types.keys()) + self.assertIn("type1", vol_types.keys()) + self.assertIn("type3", vol_types.keys()) self.assertEqual(vol_types['type1']['extra_specs'], {"key1": "val1", "key2": "val2", "key3": "val3"}) self.assertEqual(vol_types['type3']['extra_specs'], {"key1": "val1", "key3": "val3", "key4": "val4"}) + + def test_is_encrypted(self): + volume_type = volume_types.create(self.ctxt, "type1") + volume_type_id = volume_type.get('id') + self.assertFalse(volume_types.is_encrypted(self.ctxt, volume_type_id)) + + encryption = { + 'control_location': 'front-end', + 'provider': 'fake_provider', + } + db_api.volume_type_encryption_update_or_create(self.ctxt, + volume_type_id, + encryption) + self.assertTrue(volume_types.is_encrypted(self.ctxt, volume_type_id)) + + def test_get_volume_type_qos_specs(self): + qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'k1': 'v1', + 'k2': 'v2', + 'k3': 'v3'}) + type_ref = volume_types.create(self.ctxt, "type1", {"key2": "val2", + "key3": "val3"}) + res = volume_types.get_volume_type_qos_specs(type_ref['id']) + self.assertIsNone(res['qos_specs']) + qos_specs.associate_qos_with_type(self.ctxt, + qos_ref['id'], + type_ref['id']) + + expected = {'qos_specs': {'id': qos_ref['id'], + 'name': 'qos-specs-1', + 'consumer': 'back-end', + 'specs': { + 'k1': 'v1', + 'k2': 'v2', + 'k3': 'v3'}}} + res = volume_types.get_volume_type_qos_specs(type_ref['id']) + self.assertDictMatch(expected, res) + + def test_volume_types_diff(self): + #type_ref 1 and 2 have the same extra_specs, while 3 has different + keyvals1 = {"key1": "val1", "key2": "val2"} + keyvals2 = {"key1": "val0", "key2": "val2"} + type_ref1 = volume_types.create(self.ctxt, "type1", keyvals1) + type_ref2 = volume_types.create(self.ctxt, "type2", keyvals1) + type_ref3 = volume_types.create(self.ctxt, "type3", keyvals2) + + # Check equality with only extra_specs + diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], + type_ref2['id']) + self.assertEqual(same, True) + self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val1')) + diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], + type_ref3['id']) + self.assertEqual(same, False) + self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val0')) + + #qos_ref 1 and 2 have the same specs, while 3 has different + qos_keyvals1 = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'} + qos_keyvals2 = {'k1': 'v0', 'k2': 'v2', 'k3': 'v3'} + qos_ref1 = qos_specs.create(self.ctxt, 'qos-specs-1', qos_keyvals1) + qos_ref2 = qos_specs.create(self.ctxt, 'qos-specs-2', qos_keyvals1) + qos_ref3 = qos_specs.create(self.ctxt, 'qos-specs-3', qos_keyvals2) + + # Check equality with qos specs too + qos_specs.associate_qos_with_type(self.ctxt, qos_ref1['id'], + type_ref1['id']) + qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'], + type_ref2['id']) + diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], + type_ref2['id']) + self.assertEqual(same, True) + self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val1')) + self.assertEqual(diff['qos_specs']['k1'], ('v1', 'v1')) + qos_specs.disassociate_qos_specs(self.ctxt, qos_ref2['id'], + type_ref2['id']) + qos_specs.associate_qos_with_type(self.ctxt, qos_ref3['id'], + type_ref2['id']) + diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], + type_ref2['id']) + self.assertEqual(same, False) + self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val1')) + self.assertEqual(diff['qos_specs']['k1'], ('v1', 'v0')) + qos_specs.disassociate_qos_specs(self.ctxt, qos_ref3['id'], + type_ref2['id']) + qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'], + type_ref2['id']) + + # And add encryption for good measure + enc_keyvals1 = {'cipher': 'c1', 'key_size': 256, 'provider': 'p1', + 'control_location': 'front-end'} + enc_keyvals2 = {'cipher': 'c1', 'key_size': 128, 'provider': 'p1', + 'control_location': 'front-end'} + db.volume_type_encryption_update_or_create(self.ctxt, type_ref1['id'], + enc_keyvals1) + db.volume_type_encryption_update_or_create(self.ctxt, type_ref2['id'], + enc_keyvals2) + diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], + type_ref2['id']) + self.assertEqual(same, False) + self.assertEqual(diff['extra_specs']['key1'], ('val1', 'val1')) + self.assertEqual(diff['qos_specs']['k1'], ('v1', 'v1')) + self.assertEqual(diff['encryption']['key_size'], (256, 128)) diff --git a/cinder/tests/test_volume_types_extra_specs.py b/cinder/tests/test_volume_types_extra_specs.py index e7241086fb..bb8268ca1c 100644 --- a/cinder/tests/test_volume_types_extra_specs.py +++ b/cinder/tests/test_volume_types_extra_specs.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. +# Copyright (c) 2011 OpenStack Foundation # Copyright 2011 University of Southern California # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -30,8 +28,8 @@ def setUp(self): self.context = context.get_admin_context() self.vol_type1 = dict(name="TEST: Regular volume test") self.vol_type1_specs = dict(vol_extra1="value1", - vol_extra2="value2", - vol_extra3=3) + vol_extra2="value2", + vol_extra3=3) self.vol_type1['extra_specs'] = self.vol_type1_specs ref = db.volume_type_create(self.context, self.vol_type1) self.volume_type1_id = ref.id @@ -45,86 +43,84 @@ def setUp(self): def tearDown(self): # Remove the volume type from the database db.volume_type_destroy(context.get_admin_context(), - self.vol_type1['name']) + self.vol_type1['id']) db.volume_type_destroy(context.get_admin_context(), - self.vol_type2_noextra['name']) + self.vol_type2_noextra['id']) super(VolumeTypeExtraSpecsTestCase, self).tearDown() def test_volume_type_specs_get(self): expected_specs = self.vol_type1_specs.copy() actual_specs = db.volume_type_extra_specs_get( - context.get_admin_context(), - self.volume_type1_id) - self.assertEquals(expected_specs, actual_specs) + context.get_admin_context(), + self.volume_type1_id) + self.assertEqual(expected_specs, actual_specs) def test_volume_type_extra_specs_delete(self): expected_specs = self.vol_type1_specs.copy() del expected_specs['vol_extra2'] db.volume_type_extra_specs_delete(context.get_admin_context(), - self.volume_type1_id, - 'vol_extra2') + self.volume_type1_id, + 'vol_extra2') actual_specs = db.volume_type_extra_specs_get( - context.get_admin_context(), - self.volume_type1_id) - self.assertEquals(expected_specs, actual_specs) + context.get_admin_context(), + self.volume_type1_id) + self.assertEqual(expected_specs, actual_specs) def test_volume_type_extra_specs_update(self): expected_specs = self.vol_type1_specs.copy() expected_specs['vol_extra3'] = "4" db.volume_type_extra_specs_update_or_create( - context.get_admin_context(), - self.volume_type1_id, - dict(vol_extra3=4)) + context.get_admin_context(), + self.volume_type1_id, + dict(vol_extra3=4)) actual_specs = db.volume_type_extra_specs_get( - context.get_admin_context(), - self.volume_type1_id) - self.assertEquals(expected_specs, actual_specs) + context.get_admin_context(), + self.volume_type1_id) + self.assertEqual(expected_specs, actual_specs) def test_volume_type_extra_specs_create(self): expected_specs = self.vol_type1_specs.copy() expected_specs['vol_extra4'] = 'value4' expected_specs['vol_extra5'] = 'value5' db.volume_type_extra_specs_update_or_create( - context.get_admin_context(), - self.volume_type1_id, - dict(vol_extra4="value4", - vol_extra5="value5")) + context.get_admin_context(), + self.volume_type1_id, + dict(vol_extra4="value4", + vol_extra5="value5")) actual_specs = db.volume_type_extra_specs_get( - context.get_admin_context(), - self.volume_type1_id) - self.assertEquals(expected_specs, actual_specs) + context.get_admin_context(), + self.volume_type1_id) + self.assertEqual(expected_specs, actual_specs) def test_volume_type_get_with_extra_specs(self): volume_type = db.volume_type_get( - context.get_admin_context(), - self.volume_type1_id) - self.assertEquals(volume_type['extra_specs'], - self.vol_type1_specs) + context.get_admin_context(), + self.volume_type1_id) + self.assertEqual(volume_type['extra_specs'], self.vol_type1_specs) volume_type = db.volume_type_get( - context.get_admin_context(), - self.vol_type2_id) - self.assertEquals(volume_type['extra_specs'], {}) + context.get_admin_context(), + self.vol_type2_id) + self.assertEqual(volume_type['extra_specs'], {}) def test_volume_type_get_by_name_with_extra_specs(self): volume_type = db.volume_type_get_by_name( - context.get_admin_context(), - self.vol_type1['name']) - self.assertEquals(volume_type['extra_specs'], - self.vol_type1_specs) + context.get_admin_context(), + self.vol_type1['name']) + self.assertEqual(volume_type['extra_specs'], self.vol_type1_specs) volume_type = db.volume_type_get_by_name( - context.get_admin_context(), - self.vol_type2_noextra['name']) - self.assertEquals(volume_type['extra_specs'], {}) + context.get_admin_context(), + self.vol_type2_noextra['name']) + self.assertEqual(volume_type['extra_specs'], {}) def test_volume_type_get_all(self): expected_specs = self.vol_type1_specs.copy() types = db.volume_type_get_all(context.get_admin_context()) - self.assertEquals( + self.assertEqual( types[self.vol_type1['name']]['extra_specs'], expected_specs) - self.assertEquals( + self.assertEqual( types[self.vol_type2_noextra['name']]['extra_specs'], {}) diff --git a/cinder/tests/test_volume_utils.py b/cinder/tests/test_volume_utils.py index 3a644d34b0..e92e381789 100644 --- a/cinder/tests/test_volume_utils.py +++ b/cinder/tests/test_volume_utils.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -17,29 +15,41 @@ """Tests For miscellaneous util methods used with volume.""" -from cinder import db -from cinder import flags +import os +import re + +from oslo.config import cfg + from cinder import context -from cinder import test -from cinder.volume import utils as volume_utils +from cinder import db +from cinder import exception from cinder.openstack.common import importutils from cinder.openstack.common import log as logging from cinder.openstack.common.notifier import api as notifier_api from cinder.openstack.common.notifier import test_notifier +from cinder import test +from cinder import utils +from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS + +CONF = cfg.CONF class UsageInfoTestCase(test.TestCase): + QUEUE_NAME = 'cinder-volume' + HOSTNAME = 'my-host.com' + HOSTIP = '10.0.0.1' + BACKEND = 'test_backend' + MULTI_AT_BACKEND = 'test_b@ckend' + def setUp(self): super(UsageInfoTestCase, self).setUp() - self.flags(connection_type='fake', - host='fake', + self.flags(host='fake', notification_driver=[test_notifier.__name__]) - self.volume = importutils.import_object(FLAGS.volume_manager) + self.volume = importutils.import_object(CONF.volume_manager) self.user_id = 'fake' self.project_id = 'fake' self.snapshot_id = 'fake' @@ -52,13 +62,13 @@ def tearDown(self): super(UsageInfoTestCase, self).tearDown() def _create_volume(self, params={}): - """Create a test volume""" + """Create a test volume.""" vol = {} vol['snapshot_id'] = self.snapshot_id vol['user_id'] = self.user_id vol['project_id'] = self.project_id - vol['host'] = FLAGS.host - vol['availability_zone'] = FLAGS.storage_availability_zone + vol['host'] = CONF.host + vol['availability_zone'] = CONF.storage_availability_zone vol['status'] = "creating" vol['attach_status'] = "detached" vol['size'] = self.volume_size @@ -71,19 +81,141 @@ def test_notify_usage_exists(self): volume = db.volume_get(self.context, volume_id) volume_utils.notify_usage_exists(self.context, volume) LOG.info("%r" % test_notifier.NOTIFICATIONS) - self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + self.assertEqual(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] - self.assertEquals(msg['priority'], 'INFO') - self.assertEquals(msg['event_type'], 'volume.exists') + self.assertEqual(msg['priority'], 'INFO') + self.assertEqual(msg['event_type'], 'volume.exists') payload = msg['payload'] - self.assertEquals(payload['tenant_id'], self.project_id) - self.assertEquals(payload['user_id'], self.user_id) - self.assertEquals(payload['snapshot_id'], self.snapshot_id) - self.assertEquals(payload['volume_id'], volume.id) - self.assertEquals(payload['size'], self.volume_size) + self.assertEqual(payload['tenant_id'], self.project_id) + self.assertEqual(payload['user_id'], self.user_id) + self.assertEqual(payload['snapshot_id'], self.snapshot_id) + self.assertEqual(payload['volume_id'], volume.id) + self.assertEqual(payload['size'], self.volume_size) for attr in ('display_name', 'created_at', 'launched_at', 'status', 'audit_period_beginning', 'audit_period_ending'): - self.assertTrue(attr in payload, - msg="Key %s not in payload" % attr) + self.assertIn(attr, payload) db.volume_destroy(context.get_admin_context(), volume['id']) + + def test_get_host_from_queue_simple(self): + fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTNAME, self.BACKEND) + self.assertEqual(volume_utils.get_host_from_queue(fullname), + self.HOSTNAME) + + def test_get_host_from_queue_ip(self): + fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTIP, self.BACKEND) + self.assertEqual(volume_utils.get_host_from_queue(fullname), + self.HOSTIP) + + def test_get_host_from_queue_multi_at_symbol(self): + fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTNAME, + self.MULTI_AT_BACKEND) + self.assertEqual(volume_utils.get_host_from_queue(fullname), + self.HOSTNAME) + + def test_get_host_from_queue_ip_multi_at_symbol(self): + fullname = "%s.%s@%s" % (self.QUEUE_NAME, self.HOSTIP, + self.MULTI_AT_BACKEND) + self.assertEqual(volume_utils.get_host_from_queue(fullname), + self.HOSTIP) + + +class LVMVolumeDriverTestCase(test.TestCase): + def test_convert_blocksize_option(self): + # Test valid volume_dd_blocksize + bs, count = volume_utils._calculate_count(1024, '10M') + self.assertEqual(bs, '10M') + self.assertEqual(count, 103) + + bs, count = volume_utils._calculate_count(1024, '1xBBB') + self.assertEqual(bs, '1M') + self.assertEqual(count, 1024) + + # Test 'volume_dd_blocksize' with fraction + bs, count = volume_utils._calculate_count(1024, '1.3M') + self.assertEqual(bs, '1M') + self.assertEqual(count, 1024) + + # Test zero-size 'volume_dd_blocksize' + bs, count = volume_utils._calculate_count(1024, '0M') + self.assertEqual(bs, '1M') + self.assertEqual(count, 1024) + + # Test negative 'volume_dd_blocksize' + bs, count = volume_utils._calculate_count(1024, '-1M') + self.assertEqual(bs, '1M') + self.assertEqual(count, 1024) + + # Test non-digital 'volume_dd_blocksize' + bs, count = volume_utils._calculate_count(1024, 'ABM') + self.assertEqual(bs, '1M') + self.assertEqual(count, 1024) + + +class ClearVolumeTestCase(test.TestCase): + + def test_clear_volume(self): + CONF.volume_clear = 'zero' + CONF.volume_clear_size = 0 + CONF.volume_dd_blocksize = '1M' + self.mox.StubOutWithMock(volume_utils, 'copy_volume') + volume_utils.copy_volume("/dev/zero", "volume_path", 1024, + CONF.volume_dd_blocksize, sync=True, + execute=utils.execute) + self.mox.ReplayAll() + volume_utils.clear_volume(1024, "volume_path") + + def test_clear_volume_zero_and_shred(self): + CONF.volume_clear = 'zero' + CONF.volume_clear_size = 1 + clear_cmd = ['shred', '-n0', '-z', '-s1MiB', "volume_path"] + self.mox.StubOutWithMock(utils, "execute") + utils.execute(*clear_cmd, run_as_root=True) + self.mox.ReplayAll() + volume_utils.clear_volume(1024, "volume_path") + + def test_clear_volume_shred(self): + CONF.volume_clear = 'shred' + CONF.volume_clear_size = 1 + clear_cmd = ['shred', '-n3', '-s1MiB', "volume_path"] + self.mox.StubOutWithMock(utils, "execute") + utils.execute(*clear_cmd, run_as_root=True) + self.mox.ReplayAll() + volume_utils.clear_volume(1024, "volume_path") + + def test_clear_volume_shred_not_clear_size(self): + CONF.volume_clear = 'shred' + CONF.volume_clear_size = None + clear_cmd = ['shred', '-n3', "volume_path"] + self.mox.StubOutWithMock(utils, "execute") + utils.execute(*clear_cmd, run_as_root=True) + self.mox.ReplayAll() + volume_utils.clear_volume(1024, "volume_path") + + def test_clear_volume_invalid_opt(self): + CONF.volume_clear = 'non_existent_volume_clearer' + CONF.volume_clear_size = 0 + self.mox.StubOutWithMock(volume_utils, 'copy_volume') + + self.mox.ReplayAll() + + self.assertRaises(exception.InvalidConfigurationValue, + volume_utils.clear_volume, + 1024, "volume_path") + + def test_clear_volume_lvm_snap(self): + self.stubs.Set(os.path, 'exists', lambda x: True) + CONF.volume_clear = 'zero' + CONF.volume_clear_size = 0 + + uuid = '00000000-0000-0000-0000-90ed32cdeed3' + name = 'snapshot-' + uuid + mangle_name = '_' + re.sub(r'-', r'--', name) + vol_path = '/dev/mapper/cinder--volumes-%s-cow' % mangle_name + + def fake_copy_volume(srcstr, deststr, size, blocksize, **kwargs): + self.assertEqual(deststr, vol_path) + return True + + self.stubs.Set(volume_utils, 'copy_volume', fake_copy_volume) + volume_utils.clear_volume(123, vol_path) diff --git a/cinder/tests/test_windows.py b/cinder/tests/test_windows.py new file mode 100644 index 0000000000..0aa8d80e9e --- /dev/null +++ b/cinder/tests/test_windows.py @@ -0,0 +1,387 @@ +# Copyright 2012 Pedro Navarro Perez +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit tests for Windows Server 2012 OpenStack Cinder volume driver +""" + + +import os +import shutil +import tempfile + +from oslo.config import cfg + +import mox as mox_lib +from mox import IgnoreArg +from mox import stubout + +from cinder import test + +from cinder.image import image_utils + +from cinder.tests.windows import db_fakes +from cinder.volume import configuration as conf +from cinder.volume.drivers.windows import windows +from cinder.volume.drivers.windows import windows_utils + + +CONF = cfg.CONF + + +class TestWindowsDriver(test.TestCase): + + def __init__(self, method): + super(TestWindowsDriver, self).__init__(method) + + def setUp(self): + self.lun_path_tempdir = tempfile.mkdtemp() + super(TestWindowsDriver, self).setUp() + self._mox = mox_lib.Mox() + self.stubs = stubout.StubOutForTesting() + self.flags( + windows_iscsi_lun_path=self.lun_path_tempdir, + ) + self._setup_stubs() + configuration = conf.Configuration(None) + configuration.append_config_values(windows.windows_opts) + + self._driver = windows.WindowsDriver(configuration=configuration) + self._driver.do_setup({}) + + def tearDown(self): + self._mox.UnsetStubs() + self.stubs.UnsetAll() + shutil.rmtree(self.lun_path_tempdir) + super(TestWindowsDriver, self).tearDown() + + def _setup_stubs(self): + + def fake_wutils__init__(self): + pass + windows_utils.WindowsUtils.__init__ = fake_wutils__init__ + + def fake_local_path(self, volume): + return os.path.join(CONF.windows_iscsi_lun_path, + str(volume['name']) + ".vhd") + + def test_check_for_setup_errors(self): + mox = self._mox + drv = self._driver + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'check_for_setup_error') + windows_utils.WindowsUtils.check_for_setup_error() + + mox.ReplayAll() + + drv.check_for_setup_error() + + mox.VerifyAll() + + def test_create_volume(self): + mox = self._mox + drv = self._driver + vol = db_fakes.get_fake_volume_info() + + self.stubs.Set(drv, 'local_path', self.fake_local_path) + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'create_volume') + + windows_utils.WindowsUtils.create_volume(self.fake_local_path(vol), + vol['name'], vol['size']) + + mox.ReplayAll() + + drv.create_volume(vol) + + mox.VerifyAll() + + def test_delete_volume(self): + """delete_volume simple test case.""" + mox = self._mox + drv = self._driver + + vol = db_fakes.get_fake_volume_info() + + mox.StubOutWithMock(drv, 'local_path') + drv.local_path(vol).AndReturn(self.fake_local_path(vol)) + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'delete_volume') + windows_utils.WindowsUtils.delete_volume(vol['name'], + self.fake_local_path(vol)) + mox.ReplayAll() + + drv.delete_volume(vol) + + mox.VerifyAll() + + def test_create_snapshot(self): + mox = self._mox + drv = self._driver + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'create_snapshot') + volume = db_fakes.get_fake_volume_info() + snapshot = db_fakes.get_fake_snapshot_info() + + self.stubs.Set(drv, 'local_path', self.fake_local_path(snapshot)) + + windows_utils.WindowsUtils.create_snapshot(volume['name'], + snapshot['name']) + + mox.ReplayAll() + + drv.create_snapshot(snapshot) + + mox.VerifyAll() + + def test_create_volume_from_snapshot(self): + mox = self._mox + drv = self._driver + + snapshot = db_fakes.get_fake_snapshot_info() + volume = db_fakes.get_fake_volume_info() + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'create_volume_from_snapshot') + windows_utils.WindowsUtils.\ + create_volume_from_snapshot(volume['name'], snapshot['name']) + + mox.ReplayAll() + + drv.create_volume_from_snapshot(volume, snapshot) + + mox.VerifyAll() + + def test_delete_snapshot(self): + mox = self._mox + drv = self._driver + + snapshot = db_fakes.get_fake_snapshot_info() + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'delete_snapshot') + windows_utils.WindowsUtils.delete_snapshot(snapshot['name']) + + mox.ReplayAll() + + drv.delete_snapshot(snapshot) + + mox.VerifyAll() + + def test_create_export(self): + mox = self._mox + drv = self._driver + + volume = db_fakes.get_fake_volume_info() + + initiator_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name']) + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'create_iscsi_target') + windows_utils.WindowsUtils.create_iscsi_target(initiator_name, + IgnoreArg()) + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'add_disk_to_target') + windows_utils.WindowsUtils.add_disk_to_target(volume['name'], + initiator_name) + + mox.ReplayAll() + + export_info = drv.create_export(None, volume) + + mox.VerifyAll() + + self.assertEqual(export_info['provider_location'], initiator_name) + + def test_initialize_connection(self): + mox = self._mox + drv = self._driver + + volume = db_fakes.get_fake_volume_info() + initiator_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name']) + + connector = db_fakes.get_fake_connector_info() + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'associate_initiator_with_iscsi_target') + windows_utils.WindowsUtils.associate_initiator_with_iscsi_target( + volume['provider_location'], initiator_name, ) + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'get_host_information') + windows_utils.WindowsUtils.get_host_information( + volume, volume['provider_location']) + + mox.ReplayAll() + + drv.initialize_connection(volume, connector) + + mox.VerifyAll() + + def test_terminate_connection(self): + mox = self._mox + drv = self._driver + + volume = db_fakes.get_fake_volume_info() + initiator_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name']) + connector = db_fakes.get_fake_connector_info() + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'delete_iscsi_target') + windows_utils.WindowsUtils.delete_iscsi_target( + initiator_name, volume['provider_location']) + + mox.ReplayAll() + + drv.terminate_connection(volume, connector) + + mox.VerifyAll() + + def test_ensure_export(self): + mox = self._mox + drv = self._driver + + volume = db_fakes.get_fake_volume_info() + + initiator_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name']) + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'create_iscsi_target') + windows_utils.WindowsUtils.create_iscsi_target(initiator_name, True) + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'add_disk_to_target') + windows_utils.WindowsUtils.add_disk_to_target(volume['name'], + initiator_name) + + mox.ReplayAll() + + drv.ensure_export(None, volume) + + mox.VerifyAll() + + def test_remove_export(self): + mox = self._mox + drv = self._driver + + volume = db_fakes.get_fake_volume_info() + + target_name = volume['provider_location'] + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'remove_iscsi_target') + windows_utils.WindowsUtils.remove_iscsi_target(target_name) + + mox.ReplayAll() + + drv.remove_export(None, volume) + + mox.VerifyAll() + + def test_copy_image_to_volume(self): + """resize_image common case usage.""" + mox = self._mox + drv = self._driver + + volume = db_fakes.get_fake_volume_info() + + self.stubs.Set(drv, 'local_path', self.fake_local_path) + + mox.StubOutWithMock(image_utils, 'fetch_to_vhd') + image_utils.fetch_to_vhd(None, None, None, + self.fake_local_path(volume), + mox_lib.IgnoreArg()) + + mox.ReplayAll() + + drv.copy_image_to_volume(None, volume, None, None) + + mox.VerifyAll() + + def test_copy_volume_to_image(self): + mox = self._mox + drv = self._driver + + vol = db_fakes.get_fake_volume_info() + + image_meta = db_fakes.get_fake_image_meta() + + self.stubs.Set(drv, 'local_path', self.fake_local_path) + + mox.StubOutWithMock(image_utils, 'upload_volume') + + temp_vhd_path = os.path.join(CONF.image_conversion_dir, + str(image_meta['id']) + ".vhd") + + image_utils.upload_volume(None, None, image_meta, temp_vhd_path, 'vpc') + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'copy_vhd_disk') + + windows_utils.WindowsUtils.copy_vhd_disk(self.fake_local_path(vol), + temp_vhd_path) + + mox.ReplayAll() + + drv.copy_volume_to_image(None, vol, None, image_meta) + + mox.VerifyAll() + + def test_create_cloned_volume(self): + mox = self._mox + drv = self._driver + + volume = db_fakes.get_fake_volume_info() + volume_cloned = db_fakes.get_fake_volume_info_cloned() + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'create_volume') + + windows_utils.WindowsUtils.create_volume(IgnoreArg(), IgnoreArg(), + IgnoreArg()) + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, + 'copy_vhd_disk') + windows_utils.WindowsUtils.copy_vhd_disk(self.fake_local_path( + volume_cloned), self.fake_local_path(volume)) + + mox.ReplayAll() + + drv.create_cloned_volume(volume, volume_cloned) + + mox.VerifyAll() + + def test_extend_volume(self): + mox = self._mox + drv = self._driver + + volume = db_fakes.get_fake_volume_info() + + TEST_VOLUME_ADDITIONAL_SIZE_MB = 1024 + TEST_VOLUME_ADDITIONAL_SIZE_GB = 1 + + self._mox.StubOutWithMock(windows_utils.WindowsUtils, 'extend') + + windows_utils.WindowsUtils.extend(volume['name'], + TEST_VOLUME_ADDITIONAL_SIZE_MB) + + new_size = volume['size'] + TEST_VOLUME_ADDITIONAL_SIZE_GB + + mox.ReplayAll() + + drv.extend_volume(volume, new_size) + + mox.VerifyAll() diff --git a/cinder/tests/test_wsgi.py b/cinder/tests/test_wsgi.py index 87eb6a25fe..c637117233 100644 --- a/cinder/tests/test_wsgi.py +++ b/cinder/tests/test_wsgi.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -18,18 +16,26 @@ """Unit tests for `cinder.wsgi`.""" +import mock import os.path import tempfile +import urllib2 -import unittest +from oslo.config import cfg +import testtools +import webob import webob.dec -from cinder.api import openstack as openstack_api from cinder import exception -from cinder.volume import xiv +from cinder.openstack.common import gettextutils from cinder import test import cinder.wsgi +CONF = cfg.CONF + +TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'var')) + class TestLoaderNothingExists(test.TestCase): """Loader tests where os.path.exists always returns False.""" @@ -45,7 +51,7 @@ def test_config_not_found(self): ) -class TestLoaderNormalFilesystem(unittest.TestCase): +class TestLoaderNormalFilesystem(test.TestCase): """Loader tests with normal filesystem (unmodified os.path module).""" _paste_config = """ @@ -55,14 +61,16 @@ class TestLoaderNormalFilesystem(unittest.TestCase): """ def setUp(self): + super(TestLoaderNormalFilesystem, self).setUp() self.config = tempfile.NamedTemporaryFile(mode="w+t") self.config.write(self._paste_config.lstrip()) self.config.seek(0) self.config.flush() self.loader = cinder.wsgi.Loader(self.config.name) + self.addCleanup(self.config.close) def test_config_found(self): - self.assertEquals(self.config.name, self.loader.config_path) + self.assertEqual(self.config.name, self.loader.config_path) def test_app_not_found(self): self.assertRaises( @@ -73,18 +81,21 @@ def test_app_not_found(self): def test_app_found(self): url_parser = self.loader.load_app("test_app") - self.assertEquals("/tmp", url_parser.directory) + self.assertEqual("/tmp", url_parser.directory) - def tearDown(self): - self.config.close() - -class TestWSGIServer(unittest.TestCase): +class TestWSGIServer(test.TestCase): """WSGI server tests.""" + def _ipv6_configured(): + try: + with file('/proc/net/if_inet6') as f: + return len(f.read()) > 0 + except IOError: + return False def test_no_app(self): server = cinder.wsgi.Server("test_app", None) - self.assertEquals("test_app", server.name) + self.assertEqual("test_app", server.name) def test_start_random_port(self): server = cinder.wsgi.Server("test_random_port", None, host="127.0.0.1") @@ -94,11 +105,94 @@ def test_start_random_port(self): server.stop() server.wait() + @testtools.skipIf(not _ipv6_configured(), + "Test requires an IPV6 configured interface") + def test_start_random_port_with_ipv6(self): + server = cinder.wsgi.Server("test_random_port", + None, + host="::1") + server.start() + self.assertEqual("::1", server.host) + self.assertNotEqual(0, server.port) + server.stop() + server.wait() + + def test_app(self): + greetings = 'Hello, World!!!' + + def hello_world(env, start_response): + if env['PATH_INFO'] != '/': + start_response('404 Not Found', + [('Content-Type', 'text/plain')]) + return ['Not Found\r\n'] + start_response('200 OK', [('Content-Type', 'text/plain')]) + return [greetings] + + server = cinder.wsgi.Server("test_app", hello_world) + server.start() + + response = urllib2.urlopen('http://127.0.0.1:%d/' % server.port) + self.assertEqual(greetings, response.read()) + + server.stop() + + def test_app_using_ssl(self): + CONF.set_default("ssl_cert_file", + os.path.join(TEST_VAR_DIR, 'certificate.crt')) + CONF.set_default("ssl_key_file", + os.path.join(TEST_VAR_DIR, 'privatekey.key')) + + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = cinder.wsgi.Server("test_app", hello_world) + server.start() + + response = urllib2.urlopen('https://127.0.0.1:%d/' % server.port) + self.assertEqual(greetings, response.read()) + + server.stop() + + @testtools.skipIf(not _ipv6_configured(), + "Test requires an IPV6 configured interface") + def test_app_using_ipv6_and_ssl(self): + CONF.set_default("ssl_cert_file", + os.path.join(TEST_VAR_DIR, 'certificate.crt')) + CONF.set_default("ssl_key_file", + os.path.join(TEST_VAR_DIR, 'privatekey.key')) + + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = cinder.wsgi.Server("test_app", + hello_world, + host="::1", + port=0) + server.start() + + response = urllib2.urlopen('https://[::1]:%d/' % server.port) + self.assertEqual(greetings, response.read()) + + server.stop() + class ExceptionTest(test.TestCase): def _wsgi_app(self, inner_app): - return openstack_api.FaultWrapper(inner_app) + # NOTE(luisg): In order to test localization, we need to + # make sure the lazy _() is installed in the 'fault' module + # also we don't want to install the _() system-wide and + # potentially break other test cases, so we do it here for this + # test suite only. + gettextutils.install('', lazy=True) + from cinder.api.middleware import fault + return fault.FaultWrapper(inner_app) def _do_test_exception_safety_reflected_in_faults(self, expose): class ExceptionWithSafety(exception.CinderException): @@ -110,11 +204,11 @@ def fail(req): api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) - self.assertTrue('{"computeFault' in resp.body, resp.body) + self.assertIn('{"computeFault', resp.body) expected = ('ExceptionWithSafety: some explanation' if expose else 'The server has either erred or is incapable ' 'of performing the requested operation.') - self.assertTrue(expected in resp.body, resp.body) + self.assertIn(expected, resp.body) self.assertEqual(resp.status_int, 500, resp.body) def test_safe_exceptions_are_described_in_faults(self): @@ -130,13 +224,13 @@ def fail(req): api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) - self.assertTrue(msg in resp.body, resp.body) + self.assertIn(msg, resp.body) self.assertEqual(resp.status_int, exception_type.code, resp.body) if hasattr(exception_type, 'headers'): for (key, value) in exception_type.headers.iteritems(): - self.assertTrue(key in resp.headers) - self.assertEquals(resp.headers[key], value) + self.assertIn(key, resp.headers) + self.assertEqual(resp.headers[key], value) def test_quota_error_mapping(self): self._do_test_exception_mapping(exception.QuotaError, 'too many used') @@ -168,3 +262,41 @@ def fail(req): api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertEqual(500, resp.status_int) + + @mock.patch('cinder.openstack.common.gettextutils.translate') + def test_cinder_exception_with_localized_explanation(self, mock_t9n): + msg = 'My Not Found' + msg_translation = 'Mi No Encontrado' + message = gettextutils.Message(msg, '') + + @webob.dec.wsgify + def fail(req): + class MyVolumeNotFound(exception.NotFound): + def __init__(self): + self.msg = message + self.safe = True + raise MyVolumeNotFound() + + # Test response without localization + def mock_get_non_localized_message(msgid, locale): + return msg + + mock_t9n.side_effect = mock_get_non_localized_message + + api = self._wsgi_app(fail) + resp = webob.Request.blank('/').get_response(api) + self.assertEqual(404, resp.status_int) + self.assertIn(msg, resp.body) + + # Test response with localization + def mock_translate(msgid, locale): + if isinstance(msgid, gettextutils.Message): + return msg_translation + return msgid + + mock_t9n.side_effect = mock_translate + + api = self._wsgi_app(fail) + resp = webob.Request.blank('/').get_response(api) + self.assertEqual(404, resp.status_int) + self.assertIn(msg_translation, resp.body) diff --git a/cinder/tests/test_xenapi_sm.py b/cinder/tests/test_xenapi_sm.py new file mode 100644 index 0000000000..e9df123c24 --- /dev/null +++ b/cinder/tests/test_xenapi_sm.py @@ -0,0 +1,508 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import contextlib +import StringIO + +import mock +import mox + +from cinder.db import api as db_api +from cinder import exception +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.xenapi import lib +from cinder.volume.drivers.xenapi import sm as driver +from cinder.volume.drivers.xenapi import tools + + +class MockContext(object): + def __init__(ctxt, auth_token): + ctxt.auth_token = auth_token + + +@contextlib.contextmanager +def simple_context(value): + yield value + + +def get_configured_driver(server='ignore_server', path='ignore_path'): + configuration = mox.MockObject(conf.Configuration) + configuration.xenapi_nfs_server = server + configuration.xenapi_nfs_serverpath = path + configuration.append_config_values(mox.IgnoreArg()) + configuration.volume_dd_blocksize = '1M' + return driver.XenAPINFSDriver(configuration=configuration) + + +class DriverTestCase(test.TestCase): + + def assert_flag(self, flagname): + self.assertTrue(hasattr(driver.CONF, flagname)) + + def test_config_options(self): + self.assert_flag('xenapi_connection_url') + self.assert_flag('xenapi_connection_username') + self.assert_flag('xenapi_connection_password') + self.assert_flag('xenapi_nfs_server') + self.assert_flag('xenapi_nfs_serverpath') + self.assert_flag('xenapi_sr_base_path') + + def test_do_setup(self): + mock = mox.Mox() + mock.StubOutWithMock(driver, 'xenapi_lib') + mock.StubOutWithMock(driver, 'xenapi_opts') + + configuration = mox.MockObject(conf.Configuration) + configuration.xenapi_connection_url = 'url' + configuration.xenapi_connection_username = 'user' + configuration.xenapi_connection_password = 'pass' + configuration.append_config_values(mox.IgnoreArg()) + + session_factory = object() + nfsops = object() + + driver.xenapi_lib.SessionFactory('url', 'user', 'pass').AndReturn( + session_factory) + + driver.xenapi_lib.NFSBasedVolumeOperations( + session_factory).AndReturn(nfsops) + + drv = driver.XenAPINFSDriver(configuration=configuration) + + mock.ReplayAll() + drv.do_setup('context') + mock.VerifyAll() + + self.assertEqual(nfsops, drv.nfs_ops) + + def test_create_volume(self): + mock = mox.Mox() + + ops = mock.CreateMock(lib.NFSBasedVolumeOperations) + drv = get_configured_driver('server', 'path') + drv.nfs_ops = ops + + volume_details = dict( + sr_uuid='sr_uuid', + vdi_uuid='vdi_uuid' + ) + ops.create_volume( + 'server', 'path', 1, 'name', 'desc').AndReturn(volume_details) + + mock.ReplayAll() + result = drv.create_volume(dict( + size=1, display_name='name', display_description='desc')) + mock.VerifyAll() + + self.assertEqual(dict(provider_location='sr_uuid/vdi_uuid'), result) + + def test_delete_volume(self): + mock = mox.Mox() + + ops = mock.CreateMock(lib.NFSBasedVolumeOperations) + drv = get_configured_driver('server', 'path') + drv.nfs_ops = ops + + ops.delete_volume('server', 'path', 'sr_uuid', 'vdi_uuid') + + mock.ReplayAll() + result = drv.delete_volume(dict( + provider_location='sr_uuid/vdi_uuid')) + mock.VerifyAll() + + def test_create_export_does_not_raise_exception(self): + configuration = conf.Configuration([]) + drv = driver.XenAPINFSDriver(configuration=configuration) + drv.create_export('context', 'volume') + + def test_remove_export_does_not_raise_exception(self): + configuration = conf.Configuration([]) + drv = driver.XenAPINFSDriver(configuration=configuration) + drv.remove_export('context', 'volume') + + def test_initialize_connection(self): + mock = mox.Mox() + + drv = get_configured_driver('server', 'path') + + mock.ReplayAll() + result = drv.initialize_connection( + dict( + display_name='name', + display_description='desc', + provider_location='sr_uuid/vdi_uuid'), + 'connector' + ) + mock.VerifyAll() + + self.assertEqual( + dict( + driver_volume_type='xensm', + data=dict( + name_label='name', + name_description='desc', + sr_uuid='sr_uuid', + vdi_uuid='vdi_uuid', + sr_type='nfs', + server='server', + serverpath='path', + introduce_sr_keys=['sr_type', 'server', 'serverpath'] + ) + ), + result + ) + + def test_initialize_connection_null_values(self): + mock = mox.Mox() + + drv = get_configured_driver('server', 'path') + + mock.ReplayAll() + result = drv.initialize_connection( + dict( + display_name=None, + display_description=None, + provider_location='sr_uuid/vdi_uuid'), + 'connector' + ) + mock.VerifyAll() + + self.assertEqual( + dict( + driver_volume_type='xensm', + data=dict( + name_label='', + name_description='', + sr_uuid='sr_uuid', + vdi_uuid='vdi_uuid', + sr_type='nfs', + server='server', + serverpath='path', + introduce_sr_keys=['sr_type', 'server', 'serverpath'] + ) + ), + result + ) + + def _setup_mock_driver(self, server, serverpath, sr_base_path="_srbp"): + mock = mox.Mox() + + drv = get_configured_driver(server, serverpath) + ops = mock.CreateMock(lib.NFSBasedVolumeOperations) + db = mock.CreateMock(db_api) + drv.nfs_ops = ops + drv.db = db + + mock.StubOutWithMock(driver, 'CONF') + driver.CONF.xenapi_nfs_server = server + driver.CONF.xenapi_nfs_serverpath = serverpath + driver.CONF.xenapi_sr_base_path = sr_base_path + + return mock, drv + + def test_create_snapshot(self): + mock, drv = self._setup_mock_driver('server', 'serverpath') + + snapshot = dict( + volume_id="volume-id", + display_name="snapshot-name", + display_description="snapshot-desc", + volume=dict(provider_location="sr-uuid/vdi-uuid")) + + drv.nfs_ops.copy_volume( + "server", "serverpath", "sr-uuid", "vdi-uuid", + "snapshot-name", "snapshot-desc" + ).AndReturn(dict(sr_uuid="copied-sr", vdi_uuid="copied-vdi")) + + mock.ReplayAll() + result = drv.create_snapshot(snapshot) + mock.VerifyAll() + self.assertEqual( + dict(provider_location="copied-sr/copied-vdi"), + result) + + def test_create_volume_from_snapshot(self): + mock, drv = self._setup_mock_driver('server', 'serverpath') + + snapshot = dict( + provider_location='src-sr-uuid/src-vdi-uuid') + volume = dict( + display_name='tgt-name', name_description='tgt-desc') + + drv.nfs_ops.copy_volume( + "server", "serverpath", "src-sr-uuid", "src-vdi-uuid", + "tgt-name", "tgt-desc" + ).AndReturn(dict(sr_uuid="copied-sr", vdi_uuid="copied-vdi")) + + mock.ReplayAll() + result = drv.create_volume_from_snapshot(volume, snapshot) + mock.VerifyAll() + + self.assertEqual( + dict(provider_location='copied-sr/copied-vdi'), result) + + def test_delete_snapshot(self): + mock, drv = self._setup_mock_driver('server', 'serverpath') + + snapshot = dict( + provider_location='src-sr-uuid/src-vdi-uuid') + + drv.nfs_ops.delete_volume( + "server", "serverpath", "src-sr-uuid", "src-vdi-uuid") + + mock.ReplayAll() + drv.delete_snapshot(snapshot) + mock.VerifyAll() + + def test_copy_volume_to_image_xenserver_case(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + mock.StubOutWithMock(drv, '_use_glance_plugin_to_upload_volume') + mock.StubOutWithMock(driver.image_utils, 'is_xenserver_format') + context = MockContext('token') + + driver.image_utils.is_xenserver_format('image_meta').AndReturn(True) + + drv._use_glance_plugin_to_upload_volume( + context, 'volume', 'image_service', 'image_meta').AndReturn( + 'result') + mock.ReplayAll() + + result = drv.copy_volume_to_image( + context, "volume", "image_service", "image_meta") + self.assertEqual('result', result) + + mock.VerifyAll() + + def test_copy_volume_to_image_non_xenserver_case(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + mock.StubOutWithMock(drv, '_use_image_utils_to_upload_volume') + mock.StubOutWithMock(driver.image_utils, 'is_xenserver_format') + context = MockContext('token') + + driver.image_utils.is_xenserver_format('image_meta').AndReturn(False) + + drv._use_image_utils_to_upload_volume( + context, 'volume', 'image_service', 'image_meta').AndReturn( + 'result') + mock.ReplayAll() + + result = drv.copy_volume_to_image( + context, "volume", "image_service", "image_meta") + self.assertEqual('result', result) + + mock.VerifyAll() + + def test_use_image_utils_to_upload_volume(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + volume = dict(provider_location='sr-uuid/vdi-uuid') + context = MockContext('token') + + mock.StubOutWithMock(driver.image_utils, 'upload_volume') + + drv.nfs_ops.volume_attached_here( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', True).AndReturn( + simple_context('device')) + + driver.image_utils.upload_volume( + context, 'image_service', 'image_meta', 'device') + + mock.ReplayAll() + drv._use_image_utils_to_upload_volume( + context, volume, "image_service", "image_meta") + mock.VerifyAll() + + def test_use_glance_plugin_to_upload_volume(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + volume = dict(provider_location='sr-uuid/vdi-uuid') + context = MockContext('token') + + mock.StubOutWithMock(driver.glance, 'get_api_servers') + + driver.glance.get_api_servers().AndReturn((x for x in ['glancesrv'])) + + drv.nfs_ops.use_glance_plugin_to_upload_volume( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', 'glancesrv', + 'image-id', 'token', '/var/run/sr-mount') + + mock.ReplayAll() + drv._use_glance_plugin_to_upload_volume( + context, volume, "image_service", {"id": "image-id"}) + mock.VerifyAll() + + def test_copy_image_to_volume_xenserver_case(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + mock.StubOutWithMock(drv, '_use_glance_plugin_to_copy_image_to_volume') + mock.StubOutWithMock(driver.image_utils, 'is_xenserver_image') + context = MockContext('token') + + driver.image_utils.is_xenserver_image( + context, 'image_service', 'image_id').AndReturn(True) + drv._use_glance_plugin_to_copy_image_to_volume( + context, 'volume', 'image_service', 'image_id').AndReturn('result') + mock.ReplayAll() + result = drv.copy_image_to_volume( + context, "volume", "image_service", "image_id") + self.assertEqual('result', result) + mock.VerifyAll() + + def test_copy_image_to_volume_non_xenserver_case(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + mock.StubOutWithMock(drv, '_use_image_utils_to_pipe_bytes_to_volume') + mock.StubOutWithMock(driver.image_utils, 'is_xenserver_image') + context = MockContext('token') + + driver.image_utils.is_xenserver_image( + context, 'image_service', 'image_id').AndReturn(False) + drv._use_image_utils_to_pipe_bytes_to_volume( + context, 'volume', 'image_service', 'image_id').AndReturn(True) + mock.ReplayAll() + drv.copy_image_to_volume( + context, "volume", "image_service", "image_id") + mock.VerifyAll() + + def test_use_image_utils_to_pipe_bytes_to_volume(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + volume = dict(provider_location='sr-uuid/vdi-uuid', size=1) + context = MockContext('token') + + mock.StubOutWithMock(driver.image_utils, 'fetch_to_raw') + + drv.nfs_ops.volume_attached_here( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', False).AndReturn( + simple_context('device')) + + driver.image_utils.fetch_to_raw( + context, 'image_service', 'image_id', 'device', mox.IgnoreArg(), + size=1) + + mock.ReplayAll() + drv._use_image_utils_to_pipe_bytes_to_volume( + context, volume, "image_service", "image_id") + mock.VerifyAll() + + def test_use_glance_plugin_to_copy_image_to_volume_success(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + volume = dict( + provider_location='sr-uuid/vdi-uuid', + size=2) + + mock.StubOutWithMock(driver.glance, 'get_api_servers') + + driver.glance.get_api_servers().AndReturn((x for x in ['glancesrv'])) + + drv.nfs_ops.use_glance_plugin_to_overwrite_volume( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', 'glancesrv', + 'image_id', 'token', '/var/run/sr-mount').AndReturn(True) + + drv.nfs_ops.resize_volume( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', 2) + + mock.ReplayAll() + drv._use_glance_plugin_to_copy_image_to_volume( + MockContext('token'), volume, "ignore", "image_id") + mock.VerifyAll() + + def test_use_glance_plugin_to_copy_image_to_volume_fail(self): + mock, drv = self._setup_mock_driver( + 'server', 'serverpath', '/var/run/sr-mount') + + volume = dict( + provider_location='sr-uuid/vdi-uuid', + size=2) + + mock.StubOutWithMock(driver.glance, 'get_api_servers') + + driver.glance.get_api_servers().AndReturn((x for x in ['glancesrv'])) + + drv.nfs_ops.use_glance_plugin_to_overwrite_volume( + 'server', 'serverpath', 'sr-uuid', 'vdi-uuid', 'glancesrv', + 'image_id', 'token', '/var/run/sr-mount').AndReturn(False) + + mock.ReplayAll() + + self.assertRaises( + exception.ImageCopyFailure, + lambda: drv._use_glance_plugin_to_copy_image_to_volume( + MockContext('token'), volume, "ignore", "image_id")) + + mock.VerifyAll() + + def test_get_volume_stats_reports_required_keys(self): + drv = get_configured_driver() + + stats = drv.get_volume_stats() + + required_metrics = [ + 'volume_backend_name', 'vendor_name', 'driver_version', + 'storage_protocol', 'total_capacity_gb', 'free_capacity_gb', + 'reserved_percentage' + ] + + for metric in required_metrics: + self.assertIn(metric, stats) + + def test_get_volume_stats_reports_unknown_cap(self): + drv = get_configured_driver() + + stats = drv.get_volume_stats() + + self.assertEqual('unknown', stats['free_capacity_gb']) + + def test_reported_driver_type(self): + drv = get_configured_driver() + + stats = drv.get_volume_stats() + + self.assertEqual('xensm', stats['storage_protocol']) + + +class ToolsTest(test.TestCase): + @mock.patch('cinder.volume.drivers.xenapi.tools._stripped_first_line_of') + def test_get_this_vm_uuid(self, mock_read_first_line): + mock_read_first_line.return_value = 'someuuid' + self.assertEqual('someuuid', tools.get_this_vm_uuid()) + mock_read_first_line.assert_called_once_with('/sys/hypervisor/uuid') + + def test_stripped_first_line_of(self): + mock_context_manager = mock.Mock() + mock_context_manager.__enter__ = mock.Mock( + return_value=StringIO.StringIO(' blah \n second line \n')) + mock_context_manager.__exit__ = mock.Mock(return_value=False) + mock_open = mock.Mock(return_value=mock_context_manager) + + with mock.patch('__builtin__.open', mock_open): + self.assertEqual( + 'blah', tools._stripped_first_line_of('/somefile')) + + mock_open.assert_called_once_with('/somefile', 'rb') diff --git a/cinder/tests/test_xiv.py b/cinder/tests/test_xiv.py deleted file mode 100644 index 842cda4bee..0000000000 --- a/cinder/tests/test_xiv.py +++ /dev/null @@ -1,243 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 IBM, Inc. -# Copyright (c) 2012 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Erik Zaadi -# Avishay Traeger - -from cinder import exception -from cinder import flags -from cinder import test -from cinder.volume import xiv - - -FLAGS = flags.FLAGS - -FAKE = "fake" -VOLUME = { - 'size': 16, - 'name': FAKE, - 'id': 1 - } - -CONNECTOR = { - 'initiator': "iqn.2012-07.org.fake:01:948f189c4695", - } - - -class XIVFakeProxyDriver(object): - """Fake XIV Proxy Driver.""" - - def __init__(self, xiv_info, logger, expt): - """ - Initialize Proxy - """ - - self.xiv_info = xiv_info - self.logger = logger - self.exception = expt - self.xiv_portal = \ - self.xiv_iqn = FAKE - - self.volumes = {} - - def setup(self, context): - if self.xiv_info['xiv_user'] != FLAGS.san_login: - raise self.exception.NotAuthorized() - - if self.xiv_info['xiv_address'] != FLAGS.san_ip: - raise self.exception.HostNotFound() - - def create_volume(self, volume): - if volume['size'] > 100: - raise self.exception.VolumeBackendAPIException() - self.volumes[volume['name']] = volume - - def volume_exists(self, volume): - return self.volumes.get(volume['name'], None) is not None - - def delete_volume(self, volume): - if self.volumes.get(volume['name'], None) is not None: - del self.volumes[volume['name']] - - def initialize_connection(self, volume, connector): - if not self.volume_exists(volume): - raise self.exception.VolumeNotFound() - lun_id = volume['id'] - - self.volumes[volume['name']]['attached'] = connector - - return { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': True, - 'target_portal': self.xiv_portal, - 'target_iqn': self.xiv_iqn, - 'target_lun': lun_id, - 'volume_id': volume['id'], - 'multipath': True, - # part of a patch to nova-compute to enable iscsi multipath - 'provider_location': "%s,1 %s %s" % ( - self.xiv_portal, - self.xiv_iqn, - lun_id), - }, - } - - def terminate_connection(self, volume, connector): - if not self.volume_exists(volume): - raise self.exception.VolumeNotFound() - if not self.is_volume_attached(volume, connector): - raise self.exception.VolumeNotFoundForInstance() - del self.volumes[volume['name']]['attached'] - - def is_volume_attached(self, volume, connector): - if not self.volume_exists(volume): - raise self.exception.VolumeNotFound() - - return self.volumes[volume['name']].get('attached', None) \ - == connector - - -class XIVVolumeDriverTest(test.TestCase): - """Test IBM XIV volume driver.""" - - def setUp(self): - """Initialize IVM XIV Driver.""" - super(XIVVolumeDriverTest, self).setUp() - - self.driver = xiv.XIVDriver() - - def test_initialized_should_set_xiv_info(self): - """Test that the san flags are passed to the XIV proxy.""" - - self.assertEquals( - self.driver.xiv_proxy.xiv_info['xiv_user'], - FLAGS.san_login) - self.assertEquals( - self.driver.xiv_proxy.xiv_info['xiv_pass'], - FLAGS.san_password) - self.assertEquals( - self.driver.xiv_proxy.xiv_info['xiv_address'], - FLAGS.san_ip) - self.assertEquals( - self.driver.xiv_proxy.xiv_info['xiv_vol_pool'], - FLAGS.san_clustername) - - def test_setup_should_fail_if_credentials_are_invalid(self): - """Test that the xiv_proxy validates credentials.""" - - self.driver.xiv_proxy.xiv_info['xiv_user'] = 'invalid' - self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None) - - def test_setup_should_fail_if_connection_is_invalid(self): - """Test that the xiv_proxy validates connection.""" - - self.driver.xiv_proxy.xiv_info['xiv_address'] = 'invalid' - self.assertRaises(exception.HostNotFound, self.driver.do_setup, None) - - def test_create_volume(self): - """Test creating a volume.""" - - self.driver.do_setup(None) - self.driver.create_volume(VOLUME) - has_volume = self.driver.xiv_proxy.volume_exists(VOLUME) - self.assertTrue(has_volume) - self.driver.delete_volume(VOLUME) - - def test_volume_exists(self): - """Test the volume exist method with a volume that doesn't exist.""" - - self.driver.do_setup(None) - self.assertFalse(self.driver.xiv_proxy.volume_exists({'name': FAKE})) - - def test_delete_volume(self): - """Verify that a volume is deleted.""" - - self.driver.do_setup(None) - self.driver.create_volume(VOLUME) - self.driver.delete_volume(VOLUME) - has_volume = self.driver.xiv_proxy.volume_exists(VOLUME) - self.assertFalse(has_volume) - - def test_delete_volume_should_fail_for_not_existing_volume(self): - """Verify that deleting a non-existing volume is OK.""" - - self.driver.do_setup(None) - self.driver.delete_volume(VOLUME) - - def test_create_volume_should_fail_if_no_pool_space_left(self): - """Vertify that the xiv_proxy validates volume pool space.""" - - self.driver.do_setup(None) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, - {'name': FAKE, 'id': 1, 'size': 12000}) - - def test_initialize_connection(self): - """Test that inititialize connection attaches volume to host.""" - - self.driver.do_setup(None) - self.driver.create_volume(VOLUME) - self.driver.initialize_connection(VOLUME, CONNECTOR) - - self.assertTrue( - self.driver.xiv_proxy.is_volume_attached(VOLUME, CONNECTOR)) - - self.driver.terminate_connection(VOLUME, CONNECTOR) - self.driver.delete_volume(VOLUME) - - def test_initialize_connection_should_fail_for_non_existing_volume(self): - """Verify that initialize won't work for non-existing volume.""" - - self.driver.do_setup(None) - self.assertRaises(exception.VolumeNotFound, - self.driver.initialize_connection, VOLUME, CONNECTOR) - - def test_terminate_connection(self): - """Test terminating a connection.""" - - self.driver.do_setup(None) - self.driver.create_volume(VOLUME) - self.driver.initialize_connection(VOLUME, CONNECTOR) - self.driver.terminate_connection(VOLUME, CONNECTOR) - - self.assertFalse( - self.driver.xiv_proxy.is_volume_attached( - VOLUME, - CONNECTOR)) - - self.driver.delete_volume(VOLUME) - - def test_terminate_connection_should_fail_on_non_existing_volume(self): - """Test that terminate won't work for non-existing volumes.""" - - self.driver.do_setup(None) - self.assertRaises(exception.VolumeNotFound, - self.driver.terminate_connection, VOLUME, CONNECTOR) - - def test_terminate_connection_should_fail_on_non_attached_volume(self): - """Test that terminate won't work for volumes that are not attached.""" - - self.driver.do_setup(None) - self.driver.create_volume(VOLUME) - - self.assertRaises(exception.VolumeNotFoundForInstance, - self.driver.terminate_connection, VOLUME, CONNECTOR) - - self.driver.delete_volume(VOLUME) diff --git a/cinder/tests/test_xiv_ds8k.py b/cinder/tests/test_xiv_ds8k.py new file mode 100644 index 0000000000..f02509126b --- /dev/null +++ b/cinder/tests/test_xiv_ds8k.py @@ -0,0 +1,248 @@ +# Copyright 2013 IBM Corp. +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Erik Zaadi +# Avishay Traeger + + +import mox +from oslo.config import cfg + +from cinder import exception +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers import xiv_ds8k + + +FAKE = "fake" +VOLUME = {'size': 16, + 'name': FAKE, + 'id': 1} + +CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", } + +CONF = cfg.CONF + + +class XIVDS8KFakeProxyDriver(object): + """Fake IBM XIV and DS8K Proxy Driver.""" + + def __init__(self, xiv_ds8k_info, logger, expt, driver=None): + """Initialize Proxy.""" + + self.xiv_ds8k_info = xiv_ds8k_info + self.logger = logger + self.exception = expt + self.xiv_ds8k_portal = \ + self.xiv_ds8k_iqn = FAKE + + self.volumes = {} + self.driver = driver + + def setup(self, context): + if self.xiv_ds8k_info['xiv_ds8k_user'] != self.driver\ + .configuration.san_login: + raise self.exception.NotAuthorized() + + if self.xiv_ds8k_info['xiv_ds8k_address'] != self.driver\ + .configuration.san_ip: + raise self.exception.HostNotFound(host='fake') + + def create_volume(self, volume): + if volume['size'] > 100: + raise self.exception.VolumeBackendAPIException(data='blah') + self.volumes[volume['name']] = volume + + def volume_exists(self, volume): + return self.volumes.get(volume['name'], None) is not None + + def delete_volume(self, volume): + if self.volumes.get(volume['name'], None) is not None: + del self.volumes[volume['name']] + + def initialize_connection(self, volume, connector): + if not self.volume_exists(volume): + raise self.exception.VolumeNotFound(volume_id=volume['id']) + lun_id = volume['id'] + + self.volumes[volume['name']]['attached'] = connector + + return {'driver_volume_type': 'iscsi', + 'data': {'target_discovered': True, + 'target_discovered': True, + 'target_portal': self.xiv_ds8k_portal, + 'target_iqn': self.xiv_ds8k_iqn, + 'target_lun': lun_id, + 'volume_id': volume['id'], + 'multipath': True, + 'provider_location': "%s,1 %s %s" % ( + self.xiv_ds8k_portal, + self.xiv_ds8k_iqn, + lun_id), }, + } + + def terminate_connection(self, volume, connector): + if not self.volume_exists(volume): + raise self.exception.VolumeNotFound(volume_id=volume['id']) + if not self.is_volume_attached(volume, connector): + raise self.exception.NotFound(_('Volume not found for ' + 'instance %(instance_id)s.') + % {'instance_id': 'fake'}) + del self.volumes[volume['name']]['attached'] + + def is_volume_attached(self, volume, connector): + if not self.volume_exists(volume): + raise self.exception.VolumeNotFound(volume_id=volume['id']) + + return (self.volumes[volume['name']].get('attached', None) + == connector) + + +class XIVDS8KVolumeDriverTest(test.TestCase): + """Test IBM XIV and DS8K volume driver.""" + + def setUp(self): + """Initialize IBM XIV and DS8K Driver.""" + super(XIVDS8KVolumeDriverTest, self).setUp() + + configuration = mox.MockObject(conf.Configuration) + configuration.san_is_local = False + configuration.xiv_ds8k_proxy = \ + 'cinder.tests.test_xiv_ds8k.XIVDS8KFakeProxyDriver' + configuration.xiv_ds8k_connection_type = 'iscsi' + configuration.san_ip = FAKE + configuration.san_login = FAKE + configuration.san_clustername = FAKE + configuration.san_password = FAKE + configuration.append_config_values(mox.IgnoreArg()) + + self.driver = xiv_ds8k.XIVDS8KDriver(configuration=configuration) + + def test_initialized_should_set_xiv_ds8k_info(self): + """Test that the san flags are passed to the IBM proxy.""" + + self.assertEqual( + self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'], + self.driver.configuration.san_login) + self.assertEqual( + self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_pass'], + self.driver.configuration.san_password) + self.assertEqual( + self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'], + self.driver.configuration.san_ip) + self.assertEqual( + self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_vol_pool'], + self.driver.configuration.san_clustername) + + def test_setup_should_fail_if_credentials_are_invalid(self): + """Test that the xiv_ds8k_proxy validates credentials.""" + + self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_user'] = 'invalid' + self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None) + + def test_setup_should_fail_if_connection_is_invalid(self): + """Test that the xiv_ds8k_proxy validates connection.""" + + self.driver.xiv_ds8k_proxy.xiv_ds8k_info['xiv_ds8k_address'] = \ + 'invalid' + self.assertRaises(exception.HostNotFound, self.driver.do_setup, None) + + def test_create_volume(self): + """Test creating a volume.""" + + self.driver.do_setup(None) + self.driver.create_volume(VOLUME) + has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME) + self.assertTrue(has_volume) + self.driver.delete_volume(VOLUME) + + def test_volume_exists(self): + """Test the volume exist method with a volume that doesn't exist.""" + + self.driver.do_setup(None) + self.assertFalse( + self.driver.xiv_ds8k_proxy.volume_exists({'name': FAKE})) + + def test_delete_volume(self): + """Verify that a volume is deleted.""" + + self.driver.do_setup(None) + self.driver.create_volume(VOLUME) + self.driver.delete_volume(VOLUME) + has_volume = self.driver.xiv_ds8k_proxy.volume_exists(VOLUME) + self.assertFalse(has_volume) + + def test_delete_volume_should_fail_for_not_existing_volume(self): + """Verify that deleting a non-existing volume is OK.""" + + self.driver.do_setup(None) + self.driver.delete_volume(VOLUME) + + def test_create_volume_should_fail_if_no_pool_space_left(self): + """Vertify that the xiv_ds8k_proxy validates volume pool space.""" + + self.driver.do_setup(None) + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.create_volume, + {'name': FAKE, + 'id': 1, + 'size': 12000}) + + def test_initialize_connection(self): + """Test that inititialize connection attaches volume to host.""" + + self.driver.do_setup(None) + self.driver.create_volume(VOLUME) + self.driver.initialize_connection(VOLUME, CONNECTOR) + + self.assertTrue( + self.driver.xiv_ds8k_proxy.is_volume_attached(VOLUME, CONNECTOR)) + + self.driver.terminate_connection(VOLUME, CONNECTOR) + self.driver.delete_volume(VOLUME) + + def test_initialize_connection_should_fail_for_non_existing_volume(self): + """Verify that initialize won't work for non-existing volume.""" + + self.driver.do_setup(None) + self.assertRaises(exception.VolumeNotFound, + self.driver.initialize_connection, + VOLUME, + CONNECTOR) + + def test_terminate_connection(self): + """Test terminating a connection.""" + + self.driver.do_setup(None) + self.driver.create_volume(VOLUME) + self.driver.initialize_connection(VOLUME, CONNECTOR) + self.driver.terminate_connection(VOLUME, CONNECTOR) + + self.assertFalse(self.driver.xiv_ds8k_proxy.is_volume_attached( + VOLUME, + CONNECTOR)) + + self.driver.delete_volume(VOLUME) + + def test_terminate_connection_should_fail_on_non_existing_volume(self): + """Test that terminate won't work for non-existing volumes.""" + + self.driver.do_setup(None) + self.assertRaises(exception.VolumeNotFound, + self.driver.terminate_connection, + VOLUME, + CONNECTOR) diff --git a/cinder/tests/test_zadara.py b/cinder/tests/test_zadara.py index b999ade393..2dd2393027 100644 --- a/cinder/tests/test_zadara.py +++ b/cinder/tests/test_zadara.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2012 Zadara Storage, Inc. -# Copyright (c) 2012 OpenStack LLC. +# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -23,11 +21,11 @@ import httplib from cinder import exception -from cinder import test from cinder.openstack.common import log as logging -from cinder.volume import zadara - -from lxml import etree +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.zadara import zadara_opts +from cinder.volume.drivers.zadara import ZadaraVPSAISCSIDriver LOG = logging.getLogger("cinder.volume.driver") @@ -38,7 +36,7 @@ 'access_key': '0123456789ABCDEF', 'volumes': [], 'servers': [], - 'controllers': [('active_ctrl', {'display_name': 'test_ctrl'})], + 'controllers': [('active_ctrl', {'display-name': 'test_ctrl'})], 'counter': 1000, 'login': """ @@ -99,13 +97,22 @@ def read(self): ('/api/volumes.xml', self._create_volume), ('/api/servers.xml', self._create_server), ('/api/servers/*/volumes.xml', self._attach), - ('/api/volumes/*/detach.xml', self._detach)], - 'DELETE': [('/api/volumes/*', self._delete)], + ('/api/volumes/*/detach.xml', self._detach), + ('/api/volumes/*/expand.xml', self._expand), + ('/api/consistency_groups/*/snapshots.xml', + self._create_snapshot), + ('/api/consistency_groups/*/clone.xml', + self._create_clone)], + 'DELETE': [('/api/volumes/*', self._delete), + ('/api/snapshots/*', self._delete_snapshot)], 'GET': [('/api/volumes.xml', self._list_volumes), + ('/api/pools.xml', self._list_pools), ('/api/vcontrollers.xml', self._list_controllers), ('/api/servers.xml', self._list_servers), + ('/api/consistency_groups/*/snapshots.xml', + self._list_vol_snapshots), ('/api/volumes/*/servers.xml', - self._list_vol_attachments)] + self._list_vol_attachments)] } ops_list = ops[self.method] @@ -139,8 +146,8 @@ def _get_counter(self): def _login(self): params = self._get_parameters(self.body) - if params['user'] == RUNTIME_VARS['user'] and\ - params['password'] == RUNTIME_VARS['password']: + if (params['user'] == RUNTIME_VARS['user'] and + params['password'] == RUNTIME_VARS['password']): return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key'] else: return RUNTIME_VARS['bad_login'] @@ -156,6 +163,9 @@ def _create_volume(self): if self._incorrect_access_key(params): return RUNTIME_VARS['bad_login'] + params['display-name'] = params['name'] + params['cg-name'] = params['name'] + params['snapshots'] = [] params['attachments'] = [] vpsa_vol = 'volume-%07d' % self._get_counter() RUNTIME_VARS['volumes'].append((vpsa_vol, params)) @@ -166,6 +176,7 @@ def _create_server(self): if self._incorrect_access_key(params): return RUNTIME_VARS['bad_login'] + params['display-name'] = params['display_name'] vpsa_srv = 'srv-%07d' % self._get_counter() RUNTIME_VARS['servers'].append((vpsa_srv, params)) return RUNTIME_VARS['server_created'] % vpsa_srv @@ -209,6 +220,65 @@ def _detach(self): return RUNTIME_VARS['bad_volume'] + def _expand(self): + params = self._get_parameters(self.body) + if self._incorrect_access_key(params): + return RUNTIME_VARS['bad_login'] + + vol = self.url.split('/')[3] + capacity = params['capacity'] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if vol_name == vol: + params['capacity'] = capacity + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + def _create_snapshot(self): + params = self._get_parameters(self.body) + if self._incorrect_access_key(params): + return RUNTIME_VARS['bad_login'] + + cg_name = self.url.split('/')[3] + snap_name = params['display_name'] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if params['cg-name'] == cg_name: + snapshots = params['snapshots'] + if snap_name in snapshots: + #already attached + return RUNTIME_VARS['bad_volume'] + else: + snapshots.append(snap_name) + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + def _delete_snapshot(self): + snap = self.url.split('/')[3].split('.')[0] + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if snap in params['snapshots']: + params['snapshots'].remove(snap) + return RUNTIME_VARS['good'] + + return RUNTIME_VARS['bad_volume'] + + def _create_clone(self): + params = self._get_parameters(self.body) + if self._incorrect_access_key(params): + return RUNTIME_VARS['bad_login'] + + params['display-name'] = params['name'] + params['cg-name'] = params['name'] + params['capacity'] = 1 + params['snapshots'] = [] + params['attachments'] = [] + vpsa_vol = 'volume-%07d' % self._get_counter() + RUNTIME_VARS['volumes'].append((vpsa_vol, params)) + return RUNTIME_VARS['good'] + def _delete(self): vol = self.url.split('/')[3].split('.')[0] @@ -223,10 +293,16 @@ def _delete(self): return RUNTIME_VARS['bad_volume'] - def _generate_list_resp(self, header, footer, body, lst): + def _generate_list_resp(self, header, footer, body, lst, vol): resp = header for (obj, params) in lst: - resp += body % (obj, params['display_name']) + if vol: + resp += body % (obj, + params['display-name'], + params['cg-name'], + params['capacity']) + else: + resp += body % (obj, params['display-name']) resp += footer return resp @@ -238,16 +314,20 @@ def _list_volumes(self): body = """ %s %s + %s Available - 1 + %s 1 r5 write-through 2012-01-28... 2012-01-28... """ - return self._generate_list_resp(header, footer, body, - RUNTIME_VARS['volumes']) + return self._generate_list_resp(header, + footer, + body, + RUNTIME_VARS['volumes'], + True) def _list_controllers(self): header = """ @@ -267,8 +347,19 @@ def _list_controllers(self): test_chap_user test_chap_secret """ - return self._generate_list_resp(header, footer, body, - RUNTIME_VARS['controllers']) + return self._generate_list_resp(header, + footer, + body, + RUNTIME_VARS['controllers'], + False) + + def _list_pools(self): + header = """ + 0 + + """ + footer = "" + return header + footer def _list_servers(self): header = """ @@ -286,7 +377,7 @@ def _list_servers(self): resp = header for (obj, params) in RUNTIME_VARS['servers']: - resp += body % (obj, params['display_name'], params['iqn']) + resp += body % (obj, params['display-name'], params['iqn']) resp += footer return resp @@ -317,7 +408,35 @@ def _list_vol_attachments(self): for server in attachments: srv_params = self._get_server_obj(server) resp += body % (server, - srv_params['display_name'], srv_params['iqn']) + srv_params['display-name'], + srv_params['iqn']) + resp += footer + return resp + + return RUNTIME_VARS['bad_volume'] + + def _list_vol_snapshots(self): + cg_name = self.url.split('/')[3] + + header = """ + 0 + """ + footer = "" + + body = """ + %s + %s + normal + %s + pool-00000001 + """ + + for (vol_name, params) in RUNTIME_VARS['volumes']: + if params['cg-name'] == cg_name: + snapshots = params['snapshots'] + resp = header + for snap in snapshots: + resp += body % (snap, snap, cg_name) resp += footer return resp @@ -353,19 +472,23 @@ def __init__(self, host, port): class ZadaraVPSADriverTestCase(test.TestCase): - """Test case for Zadara VPSA volume driver""" + """Test case for Zadara VPSA volume driver.""" def setUp(self): LOG.debug('Enter: setUp') super(ZadaraVPSADriverTestCase, self).setUp() - self.flags( - zadara_user='test', - zadara_password='test_password', - ) + global RUNTIME_VARS RUNTIME_VARS = copy.deepcopy(DEFAULT_RUNTIME_VARS) - self.driver = zadara.ZadaraVPSAISCSIDriver() + self.configuration = conf.Configuration(None) + self.configuration.append_config_values(zadara_opts) + self.configuration.reserved_percentage = 10 + self.configuration.zadara_user = 'test' + self.configuration.zadara_password = 'test_password' + self.configuration.zadara_vpsa_poolname = 'pool-0001' + + self.driver = ZadaraVPSAISCSIDriver(configuration=self.configuration) self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection) self.stubs.Set(httplib, 'HTTPSConnection', FakeHTTPSConnection) self.driver.do_setup(None) @@ -412,15 +535,6 @@ def test_empty_apis(self): self.driver.ensure_export(context, volume) self.driver.remove_export(context, volume) - self.assertRaises(NotImplementedError, - self.driver.create_volume_from_snapshot, - volume, None) - self.assertRaises(NotImplementedError, - self.driver.create_snapshot, - None) - self.assertRaises(NotImplementedError, - self.driver.delete_snapshot, - None) self.assertRaises(NotImplementedError, self.driver.local_path, None) @@ -428,7 +542,7 @@ def test_empty_apis(self): self.driver.check_for_setup_error() def test_volume_attach_detach(self): - """Test volume attachment and detach""" + """Test volume attachment and detach.""" volume = {'name': 'test_volume_01', 'size': 1, 'id': 123} connector = dict(initiator='test_iqn.1') @@ -450,7 +564,7 @@ def test_volume_attach_detach(self): self.driver.delete_volume(volume) def test_volume_attach_multiple_detach(self): - """Test multiple volume attachment and detach""" + """Test multiple volume attachment and detach.""" volume = {'name': 'test_volume_01', 'size': 1, 'id': 123} connector1 = dict(initiator='test_iqn.1') connector2 = dict(initiator='test_iqn.2') @@ -467,7 +581,7 @@ def test_volume_attach_multiple_detach(self): self.driver.delete_volume(volume) def test_wrong_attach_params(self): - """Test different wrong attach scenarios""" + """Test different wrong attach scenarios.""" volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101} volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102} volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103} @@ -480,7 +594,7 @@ def test_wrong_attach_params(self): volume1, connector1) def test_wrong_detach_params(self): - """Test different wrong detachment scenarios""" + """Test different wrong detachment scenarios.""" volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101} volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102} @@ -505,7 +619,7 @@ def test_wrong_detach_params(self): volume1, connector2) def test_wrong_login_reply(self): - """Test wrong login reply""" + """Test wrong login reply.""" RUNTIME_VARS['login'] = """ %s @@ -530,13 +644,13 @@ def test_wrong_login_reply(self): self.driver.do_setup, None) def test_ssl_use(self): - """Coverage test for SSL connection""" + """Coverage test for SSL connection.""" self.flags(zadara_vpsa_use_ssl=True) self.driver.do_setup(None) self.flags(zadara_vpsa_use_ssl=False) def test_bad_http_response(self): - """Coverage test for non-good HTTP response""" + """Coverage test for non-good HTTP response.""" RUNTIME_VARS['status'] = 400 volume = {'name': 'test_volume_01', 'size': 1} @@ -544,7 +658,7 @@ def test_bad_http_response(self): self.driver.create_volume, volume) def test_delete_without_detach(self): - """Test volume deletion without detach""" + """Test volume deletion without detach.""" volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101} connector1 = dict(initiator='test_iqn.1') @@ -574,3 +688,107 @@ def test_no_active_ctrl(self): self.assertRaises(exception.ZadaraVPSANoActiveController, self.driver.initialize_connection, volume, connector) + + def test_create_destroy_snapshot(self): + """Create/Delete snapshot test.""" + volume = {'name': 'test_volume_01', 'size': 1} + snapshot = {'name': 'snap_01', + 'volume_name': volume['name']} + + self.driver.create_volume(volume) + + self.assertRaises(exception.VolumeNotFound, + self.driver.create_snapshot, + {'name': snapshot['name'], + 'volume_name': 'wrong_vol'}) + + self.driver.create_snapshot(snapshot) + + # Deleted should succeed for missing volume + self.driver.delete_snapshot({'name': snapshot['name'], + 'volume_name': 'wrong_vol'}) + # Deleted should succeed for missing snap + self.driver.delete_snapshot({'name': 'wrong_snap', + 'volume_name': volume['name']}) + + self.driver.delete_snapshot(snapshot) + self.driver.delete_volume(volume) + + def test_expand_volume(self): + """Expand volume test.""" + volume = {'name': 'test_volume_01', 'size': 10} + volume2 = {'name': 'test_volume_02', 'size': 10} + + self.driver.create_volume(volume) + + self.assertRaises(exception.VolumeNotFound, + self.driver.extend_volume, + volume2, 15) + self.assertRaises(exception.InvalidInput, + self.driver.extend_volume, + volume, 5) + + self.driver.extend_volume(volume, 15) + self.driver.delete_volume(volume) + + def test_create_destroy_clones(self): + """Create/Delete clones test.""" + volume1 = {'name': 'test_volume_01', 'size': 1} + volume2 = {'name': 'test_volume_02', 'size': 1} + volume3 = {'name': 'test_volume_03', 'size': 1} + snapshot = {'name': 'snap_01', + 'volume_name': volume1['name']} + + self.driver.create_volume(volume1) + self.driver.create_snapshot(snapshot) + + # Test invalid vol reference + self.assertRaises(exception.VolumeNotFound, + self.driver.create_volume_from_snapshot, + volume2, + {'name': snapshot['name'], + 'volume_name': 'wrong_vol'}) + # Test invalid snap reference + self.assertRaises(exception.VolumeNotFound, + self.driver.create_volume_from_snapshot, + volume2, + {'name': 'wrong_snap', + 'volume_name': snapshot['volume_name']}) + # Test invalid src_vref for volume clone + self.assertRaises(exception.VolumeNotFound, + self.driver.create_cloned_volume, + volume3, volume2) + + self.driver.create_volume_from_snapshot(volume2, snapshot) + self.driver.create_cloned_volume(volume3, volume1) + + self.driver.delete_volume(volume3) + self.driver.delete_volume(volume2) + self.driver.delete_snapshot(snapshot) + self.driver.delete_volume(volume1) + + def test_get_volume_stats(self): + """Get stats test.""" + + self.mox.StubOutWithMock(self.configuration, 'safe_get') + self.configuration.safe_get('volume_backend_name'). \ + AndReturn('ZadaraVPSAISCSIDriver') + self.mox.ReplayAll() + + data = self.driver.get_volume_stats(True) + + self.assertEqual(data['vendor_name'], 'Zadara Storage') + self.assertEqual(data['total_capacity_gb'], 'infinite') + self.assertEqual(data['free_capacity_gb'], 'infinite') + + self.assertEqual(data, + {'total_capacity_gb': 'infinite', + 'free_capacity_gb': 'infinite', + 'reserved_percentage': + self.configuration.reserved_percentage, + 'QoS_support': False, + 'vendor_name': 'Zadara Storage', + 'driver_version': self.driver.VERSION, + 'storage_protocol': 'iSCSI', + 'volume_backend_name': 'ZadaraVPSAISCSIDriver', + }) diff --git a/cinder/tests/utils.py b/cinder/tests/utils.py index 5b9086b754..5262a7a543 100644 --- a/cinder/tests/utils.py +++ b/cinder/tests/utils.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 OpenStack LLC +# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -12,14 +10,59 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations +# under the License. # -import cinder.context -import cinder.db -import cinder.flags -FLAGS = cinder.flags.FLAGS +from cinder import context +from cinder import db def get_test_admin_context(): - return cinder.context.get_admin_context() + return context.get_admin_context() + + +def create_volume(ctxt, + host='test_host', + display_name='test_volume', + display_description='this is a test volume', + status='available', + migration_status=None, + size=1, + availability_zone='fake_az', + volume_type_id=None, + **kwargs): + """Create a volume object in the DB.""" + vol = {} + vol['size'] = size + vol['host'] = host + vol['user_id'] = ctxt.user_id + vol['project_id'] = ctxt.project_id + vol['status'] = status + vol['migration_status'] = migration_status + vol['display_name'] = display_name + vol['display_description'] = display_description + vol['attach_status'] = 'detached' + vol['availability_zone'] = availability_zone + if volume_type_id: + vol['volume_type_id'] = volume_type_id + for key in kwargs: + vol[key] = kwargs[key] + return db.volume_create(ctxt, vol) + + +def create_snapshot(ctxt, + volume_id, + display_name='test_snapshot', + display_description='this is a test snapshot', + status='creating'): + vol = db.volume_get(ctxt, volume_id) + snap = {} + snap['volume_id'] = volume_id + snap['user_id'] = ctxt.user_id + snap['project_id'] = ctxt.project_id + snap['status'] = status + snap['volume_size'] = vol['size'] + snap['display_name'] = display_name + snap['display_description'] = display_description + return db.snapshot_create(ctxt, snap) diff --git a/cinder/tests/var/ca.crt b/cinder/tests/var/ca.crt new file mode 100644 index 0000000000..9d66ca6270 --- /dev/null +++ b/cinder/tests/var/ca.crt @@ -0,0 +1,35 @@ +-----BEGIN CERTIFICATE----- +MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg +Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy +MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ +R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi +RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX +/l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI +N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl +GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If +ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb +tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+ +dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK +WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/ +4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk +BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID +AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j +BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx +EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG +A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM +BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h +UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4 +qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm +2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/ ++C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX +TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a +NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V +xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv +ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy +I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY +9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA +WoRMgEwjGJWqzhJZUYpUAQ== +-----END CERTIFICATE----- diff --git a/cinder/tests/var/certificate.crt b/cinder/tests/var/certificate.crt new file mode 100644 index 0000000000..3c1aa6363b --- /dev/null +++ b/cinder/tests/var/certificate.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ +R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN +MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 +ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT +BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu +avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb +Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ +bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA +BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q +8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG +/64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 +iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ +KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 +0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 +Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr +mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC +AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y +0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN +rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k +yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY +vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc +AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 +KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL +cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 +hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 +Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM +YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== +-----END CERTIFICATE----- diff --git a/cinder/tests/var/privatekey.key b/cinder/tests/var/privatekey.key new file mode 100644 index 0000000000..b63df3d29d --- /dev/null +++ b/cinder/tests/var/privatekey.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe +4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny +FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD +/P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K +gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN ++Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy +QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH +pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 +rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS +L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN +H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA +AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW +t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N +sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ +8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 +f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH +Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r +VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh +/W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR +dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh +WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw +1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK +hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM +ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh +sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o +uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ +LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U +4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n +bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc +NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn +7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp +TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 +3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL +5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ +fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze +IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz +JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p +pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD +bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB +utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP +pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ +GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq +ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps +av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB +1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX +juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag +miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS +8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed +TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= +-----END RSA PRIVATE KEY----- diff --git a/cinder/tests/windows/__init__.py b/cinder/tests/windows/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/tests/windows/db_fakes.py b/cinder/tests/windows/db_fakes.py new file mode 100644 index 0000000000..8442c2c9b4 --- /dev/null +++ b/cinder/tests/windows/db_fakes.py @@ -0,0 +1,48 @@ +# Copyright 2012 Pedro Navarro Perez +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Stubouts, mocks and fixtures for windows volume test suite +""" + + +def get_fake_volume_info(): + return {'name': 'volume_name', + 'size': 1, + 'provider_location': 'iqn.2010-10.org.openstack:' + 'volume_name', + 'id': 1, + 'provider_auth': None} + + +def get_fake_volume_info_cloned(): + return {'name': 'volume_name_cloned', + 'size': 1, + 'provider_location': 'iqn.2010-10.org.openstack:' + + 'volume_name_cloned', + 'id': 1, + 'provider_auth': None} + + +def get_fake_image_meta(): + return {'id': '10958016-e196-42e3-9e7f-5d8927ae3099' + } + + +def get_fake_snapshot_info(): + return {'name': 'snapshot_name', + 'volume_name': 'volume_name', } + + +def get_fake_connector_info(): + return {'initiator': 'iqn.2010-10.org.openstack:' + 'volume_name', } diff --git a/cinder/tests/xenapi/__init__.py b/cinder/tests/xenapi/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/transfer/__init__.py b/cinder/transfer/__init__.py new file mode 100644 index 0000000000..7a4f44547b --- /dev/null +++ b/cinder/transfer/__init__.py @@ -0,0 +1,27 @@ +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Importing full names to not pollute the namespace and cause possible +# collisions with use of 'from cinder.transfer import ' elsewhere. + + +from oslo.config import cfg + +import cinder.openstack.common.importutils + + +CONF = cfg.CONF + +API = cinder.openstack.common.importutils.import_class(CONF.transfer_api_class) diff --git a/cinder/transfer/api.py b/cinder/transfer/api.py new file mode 100644 index 0000000000..cc65edd5b2 --- /dev/null +++ b/cinder/transfer/api.py @@ -0,0 +1,210 @@ +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to transferring ownership of volumes. +""" + + +import datetime +import hashlib +import hmac +import random + +from oslo.config import cfg + +from cinder.db import base +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder import quota +from cinder.volume import api as volume_api + + +volume_transfer_opts = [ + cfg.IntOpt('volume_transfer_salt_length', default=8, + help='The number of characters in the salt.'), + cfg.IntOpt('volume_transfer_key_length', default=16, + help='The number of characters in the ' + 'autogenerated auth key.'), ] + +CONF = cfg.CONF +CONF.register_opts(volume_transfer_opts) + +LOG = logging.getLogger(__name__) +QUOTAS = quota.QUOTAS + + +class API(base.Base): + """API for interacting volume transfers.""" + + def __init__(self, db_driver=None): + self.volume_api = volume_api.API() + super(API, self).__init__(db_driver) + + def get(self, context, transfer_id): + rv = self.db.transfer_get(context, transfer_id) + return dict(rv.iteritems()) + + def delete(self, context, transfer_id): + """Make the RPC call to delete a volume transfer.""" + volume_api.check_policy(context, 'delete_transfer') + transfer = self.db.transfer_get(context, transfer_id) + + volume_ref = self.db.volume_get(context, transfer.volume_id) + if volume_ref['status'] != 'awaiting-transfer': + msg = _("Volume in unexpected state") + LOG.error(msg) + self.db.transfer_destroy(context, transfer_id) + + def get_all(self, context, filters={}): + volume_api.check_policy(context, 'get_all_transfers') + if context.is_admin and 'all_tenants' in filters: + transfers = self.db.transfer_get_all(context) + else: + transfers = self.db.transfer_get_all_by_project(context, + context.project_id) + return transfers + + def _get_random_string(self, length): + """Get a random hex string of the specified length.""" + rndstr = "" + random.seed(datetime.datetime.now().microsecond) + while len(rndstr) < length: + rndstr += hashlib.sha224(str(random.random())).hexdigest() + + return rndstr[0:length] + + def _get_crypt_hash(self, salt, auth_key): + """Generate a random hash based on the salt and the auth key.""" + return hmac.new(str(salt), + str(auth_key), + hashlib.sha1).hexdigest() + + def create(self, context, volume_id, display_name): + """Creates an entry in the transfers table.""" + volume_api.check_policy(context, 'create_transfer') + LOG.info("Generating transfer record for volume %s" % volume_id) + volume_ref = self.db.volume_get(context, volume_id) + if volume_ref['status'] != "available": + raise exception.InvalidVolume(reason=_("status must be available")) + + # The salt is just a short random string. + salt = self._get_random_string(CONF.volume_transfer_salt_length) + auth_key = self._get_random_string(CONF.volume_transfer_key_length) + crypt_hash = self._get_crypt_hash(salt, auth_key) + + # TODO(ollie): Transfer expiry needs to be implemented. + transfer_rec = {'volume_id': volume_id, + 'display_name': display_name, + 'salt': salt, + 'crypt_hash': crypt_hash, + 'expires_at': None} + + try: + transfer = self.db.transfer_create(context, transfer_rec) + except Exception: + LOG.error(_("Failed to create transfer record for %s") % volume_id) + raise + return {'id': transfer['id'], + 'volume_id': transfer['volume_id'], + 'display_name': transfer['display_name'], + 'auth_key': auth_key, + 'created_at': transfer['created_at']} + + def accept(self, context, transfer_id, auth_key): + """Accept a volume that has been offered for transfer.""" + # We must use an elevated context to see the volume that is still + # owned by the donor. + volume_api.check_policy(context, 'accept_transfer') + transfer = self.db.transfer_get(context.elevated(), transfer_id) + + crypt_hash = self._get_crypt_hash(transfer['salt'], auth_key) + if crypt_hash != transfer['crypt_hash']: + msg = (_("Attempt to transfer %s with invalid auth key.") % + transfer_id) + LOG.error(msg) + raise exception.InvalidAuthKey(reason=msg) + + volume_id = transfer['volume_id'] + vol_ref = self.db.volume_get(context.elevated(), volume_id) + + try: + reservations = QUOTAS.reserve(context, volumes=1, + gigabytes=vol_ref['size']) + except exception.OverQuota as e: + overs = e.kwargs['overs'] + usages = e.kwargs['usages'] + quotas = e.kwargs['quotas'] + + def _consumed(name): + return (usages[name]['reserved'] + usages[name]['in_use']) + + if 'gigabytes' in overs: + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "%(s_size)sG volume (%(d_consumed)dG of %(d_quota)dG " + "already consumed)") + LOG.warn(msg % {'s_pid': context.project_id, + 's_size': vol_ref['size'], + 'd_consumed': _consumed('gigabytes'), + 'd_quota': quotas['gigabytes']}) + raise exception.VolumeSizeExceedsAvailableQuota( + requested=vol_ref['size'], + consumed=_consumed('gigabytes'), + quota=quotas['gigabytes']) + elif 'volumes' in overs: + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "volume (%(d_consumed)d volumes " + "already consumed)") + LOG.warn(msg % {'s_pid': context.project_id, + 'd_consumed': _consumed('volumes')}) + raise exception.VolumeLimitExceeded(allowed=quotas['volumes']) + try: + donor_id = vol_ref['project_id'] + donor_reservations = QUOTAS.reserve(context.elevated(), + project_id=donor_id, + volumes=-1, + gigabytes=-vol_ref['size']) + except Exception: + donor_reservations = None + LOG.exception(_("Failed to update quota donating volume" + "transfer id %s") % transfer_id) + + try: + # Transfer ownership of the volume now, must use an elevated + # context. + self.volume_api.accept_transfer(context, + vol_ref, + context.user_id, + context.project_id) + self.db.transfer_accept(context.elevated(), + transfer_id, + context.user_id, + context.project_id) + QUOTAS.commit(context, reservations) + if donor_reservations: + QUOTAS.commit(context, donor_reservations, project_id=donor_id) + LOG.info(_("Volume %s has been transferred.") % volume_id) + except Exception: + with excutils.save_and_reraise_exception(): + QUOTAS.rollback(context, reservations) + if donor_reservations: + QUOTAS.rollback(context, donor_reservations, + project_id=donor_id) + + vol_ref = self.db.volume_get(context, volume_id) + return {'id': transfer_id, + 'display_name': transfer['display_name'], + 'volume_id': vol_ref['id']} diff --git a/cinder/tests/api/openstack/volume/contrib/__init__.py b/cinder/units.py similarity index 74% rename from cinder/tests/api/openstack/volume/contrib/__init__.py rename to cinder/units.py index 3be5ce944c..23e92a336e 100644 --- a/cinder/tests/api/openstack/volume/contrib/__init__.py +++ b/cinder/units.py @@ -1,7 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. +# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,6 +11,11 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +""" +A module where we define some basic units for use across Cinder. +""" -# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work -from cinder.tests import * +KiB = 1024 +MiB = KiB * 1024 +GiB = MiB * 1024 +TiB = GiB * 1024 diff --git a/cinder/utils.py b/cinder/utils.py index c6510a7ece..707ddf5cbf 100644 --- a/cinder/utils.py +++ b/cinder/utils.py @@ -1,6 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara @@ -20,47 +17,44 @@ """Utilities and helper functions.""" + import contextlib import datetime -import errno -import functools import hashlib import inspect -import itertools import os import pyclbr import random import re -import shlex import shutil -import signal -import socket -import struct +import stat import sys import tempfile -import time -import types -import uuid -import warnings -from xml.sax import saxutils -from eventlet import event -from eventlet import greenthread -from eventlet.green import subprocess +from eventlet import pools +from oslo.config import cfg +import paramiko +from xml.dom import minidom +from xml.parsers import expat +from xml import sax +from xml.sax import expatreader +from xml.sax import saxutils -from cinder.common import deprecated +from cinder.brick.initiator import connector from cinder import exception -from cinder import flags -from cinder.openstack.common import log as logging -from cinder.openstack.common import excutils from cinder.openstack.common import importutils +from cinder.openstack.common import lockutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils from cinder.openstack.common import timeutils +CONF = cfg.CONF LOG = logging.getLogger(__name__) ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" -FLAGS = flags.FLAGS + +synchronized = lockutils.synchronized_with_prefix('cinder-') def find_config(config_path): @@ -73,9 +67,9 @@ def find_config(config_path): """ possible_locations = [ config_path, - os.path.join(FLAGS.state_path, "etc", "cinder", config_path), - os.path.join(FLAGS.state_path, "etc", config_path), - os.path.join(FLAGS.state_path, config_path), + os.path.join(CONF.state_path, "etc", "cinder", config_path), + os.path.join(CONF.state_path, "etc", config_path), + os.path.join(CONF.state_path, config_path), "/etc/cinder/%s" % config_path, ] @@ -86,184 +80,179 @@ def find_config(config_path): raise exception.ConfigNotFound(path=os.path.abspath(config_path)) -def fetchfile(url, target): - LOG.debug(_('Fetching %s') % url) - execute('curl', '--fail', url, '-o', target) - +def as_int(obj, quiet=True): + # Try "2" -> 2 + try: + return int(obj) + except (ValueError, TypeError): + pass + # Try "2.5" -> 2 + try: + return int(float(obj)) + except (ValueError, TypeError): + pass + # Eck, not sure what this is then. + if not quiet: + raise TypeError(_("Can not translate %s to integer.") % (obj)) + return obj -def _subprocess_setup(): - # Python installs a SIGPIPE handler by default. This is usually not what - # non-Python subprocesses expect. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) +def check_exclusive_options(**kwargs): + """Checks that only one of the provided options is actually not-none. -def execute(*cmd, **kwargs): - """Helper method to execute command with optional retry. - - If you add a run_as_root=True command, don't forget to add the - corresponding filter to etc/cinder/rootwrap.d ! - - :param cmd: Passed to subprocess.Popen. - :param process_input: Send to opened process. - :param check_exit_code: Single bool, int, or list of allowed exit - codes. Defaults to [0]. Raise - exception.ProcessExecutionError unless - program exits with one of these code. - :param delay_on_retry: True | False. Defaults to True. If set to - True, wait a short amount of time - before retrying. - :param attempts: How many times to retry cmd. - :param run_as_root: True | False. Defaults to False. If set to True, - the command is prefixed by the command specified - in the root_helper FLAG. - - :raises exception.Error: on receiving unknown arguments - :raises exception.ProcessExecutionError: - - :returns: a tuple, (stdout, stderr) from the spawned process, or None if - the command fails. + Iterates over all the kwargs passed in and checks that only one of said + arguments is not-none, if more than one is not-none then an exception will + be raised with the names of those arguments who were not-none. """ - process_input = kwargs.pop('process_input', None) - check_exit_code = kwargs.pop('check_exit_code', [0]) - ignore_exit_code = False - if isinstance(check_exit_code, bool): - ignore_exit_code = not check_exit_code - check_exit_code = [0] - elif isinstance(check_exit_code, int): - check_exit_code = [check_exit_code] - delay_on_retry = kwargs.pop('delay_on_retry', True) - attempts = kwargs.pop('attempts', 1) - run_as_root = kwargs.pop('run_as_root', False) - shell = kwargs.pop('shell', False) - - if len(kwargs): - raise exception.Error(_('Got unknown keyword args ' - 'to utils.execute: %r') % kwargs) - - if run_as_root: - - if FLAGS.rootwrap_config is None or FLAGS.root_helper != 'sudo': - deprecated.warn(_('The root_helper option (which lets you specify ' - 'a root wrapper different from cinder-rootwrap, ' - 'and defaults to using sudo) is now deprecated. ' - 'You should use the rootwrap_config option ' - 'instead.')) - - if (FLAGS.rootwrap_config is not None): - cmd = ['sudo', 'cinder-rootwrap', - FLAGS.rootwrap_config] + list(cmd) + if not kwargs: + return + + pretty_keys = kwargs.pop("pretty_keys", True) + exclusive_options = {} + for (k, v) in kwargs.iteritems(): + if v is not None: + exclusive_options[k] = True + + if len(exclusive_options) > 1: + # Change the format of the names from pythonic to + # something that is more readable. + # + # Ex: 'the_key' -> 'the key' + if pretty_keys: + names = [k.replace('_', ' ') for k in kwargs.keys()] else: - cmd = shlex.split(FLAGS.root_helper) + list(cmd) - cmd = map(str, cmd) + names = kwargs.keys() + names = ", ".join(sorted(names)) + msg = (_("May specify only one of %s") % (names)) + raise exception.InvalidInput(reason=msg) - while attempts > 0: - attempts -= 1 + +def execute(*cmd, **kwargs): + """Convenience wrapper around oslo's execute() method.""" + if 'run_as_root' in kwargs and not 'root_helper' in kwargs: + kwargs['root_helper'] = get_root_helper() + return processutils.execute(*cmd, **kwargs) + + +def check_ssh_injection(cmd_list): + ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>', + '<'] + + # Check whether injection attacks exist + for arg in cmd_list: + arg = arg.strip() + + # Check for matching quotes on the ends + is_quoted = re.match('^(?P[\'"])(?P.*)(?P=quote)$', arg) + if is_quoted: + # Check for unescaped quotes within the quoted argument + quoted = is_quoted.group('quoted') + if quoted: + if (re.match('[\'"]', quoted) or + re.search('[^\\\\][\'"]', quoted)): + raise exception.SSHInjectionThreat(command=str(cmd_list)) + else: + # We only allow spaces within quoted arguments, and that + # is the only special character allowed within quotes + if len(arg.split()) > 1: + raise exception.SSHInjectionThreat(command=str(cmd_list)) + + # Second, check whether danger character in command. So the shell + # special operator must be a single argument. + for c in ssh_injection_pattern: + if arg == c: + continue + + result = arg.find(c) + if not result == -1: + if result == 0 or not arg[result - 1] == '\\': + raise exception.SSHInjectionThreat(command=cmd_list) + + +def create_channel(client, width, height): + """Invoke an interactive shell session on server.""" + channel = client.invoke_shell() + channel.resize_pty(width, height) + return channel + + +class SSHPool(pools.Pool): + """A simple eventlet pool to hold ssh connections.""" + + def __init__(self, ip, port, conn_timeout, login, password=None, + privatekey=None, *args, **kwargs): + self.ip = ip + self.port = port + self.login = login + self.password = password + self.conn_timeout = conn_timeout if conn_timeout else None + self.privatekey = privatekey + super(SSHPool, self).__init__(*args, **kwargs) + + def create(self): try: - LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) - _PIPE = subprocess.PIPE # pylint: disable=E1101 - obj = subprocess.Popen(cmd, - stdin=_PIPE, - stdout=_PIPE, - stderr=_PIPE, - close_fds=True, - preexec_fn=_subprocess_setup, - shell=shell) - result = None - if process_input is not None: - result = obj.communicate(process_input) + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + if self.password: + ssh.connect(self.ip, + port=self.port, + username=self.login, + password=self.password, + timeout=self.conn_timeout) + elif self.privatekey: + pkfile = os.path.expanduser(self.privatekey) + privatekey = paramiko.RSAKey.from_private_key_file(pkfile) + ssh.connect(self.ip, + port=self.port, + username=self.login, + pkey=privatekey, + timeout=self.conn_timeout) else: - result = obj.communicate() - obj.stdin.close() # pylint: disable=E1101 - _returncode = obj.returncode # pylint: disable=E1101 - if _returncode: - LOG.debug(_('Result was %s') % _returncode) - if not ignore_exit_code and _returncode not in check_exit_code: - (stdout, stderr) = result - raise exception.ProcessExecutionError( - exit_code=_returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) - return result - except exception.ProcessExecutionError: - if not attempts: - raise + msg = _("Specify a password or private_key") + raise exception.CinderException(msg) + + # Paramiko by default sets the socket timeout to 0.1 seconds, + # ignoring what we set through the sshclient. This doesn't help for + # keeping long lived connections. Hence we have to bypass it, by + # overriding it after the transport is initialized. We are setting + # the sockettimeout to None and setting a keepalive packet so that, + # the server will keep the connection open. All that does is send + # a keepalive packet every ssh_conn_timeout seconds. + if self.conn_timeout: + transport = ssh.get_transport() + transport.sock.settimeout(None) + transport.set_keepalive(self.conn_timeout) + return ssh + except Exception as e: + msg = _("Error connecting via ssh: %s") % e + LOG.error(msg) + raise paramiko.SSHException(msg) + + def get(self): + """Return an item from the pool, when one is available. + + This may cause the calling greenthread to block. Check if a + connection is active before returning it. + + For dead connections create and return a new connection. + """ + conn = super(SSHPool, self).get() + if conn: + if conn.get_transport().is_active(): + return conn else: - LOG.debug(_('%r failed. Retrying.'), cmd) - if delay_on_retry: - greenthread.sleep(random.randint(20, 200) / 100.0) - finally: - # NOTE(termie): this appears to be necessary to let the subprocess - # call clean something up in between calls, without - # it two execute calls in a row hangs the second one - greenthread.sleep(0) + conn.close() + return self.create() - -def trycmd(*args, **kwargs): - """ - A wrapper around execute() to more easily handle warnings and errors. - - Returns an (out, err) tuple of strings containing the output of - the command's stdout and stderr. If 'err' is not empty then the - command can be considered to have failed. - - :discard_warnings True | False. Defaults to False. If set to True, - then for succeeding commands, stderr is cleared - - """ - discard_warnings = kwargs.pop('discard_warnings', False) - - try: - out, err = execute(*args, **kwargs) - failed = False - except exception.ProcessExecutionError, exn: - out, err = '', str(exn) - LOG.debug(err) - failed = True - - if not failed and discard_warnings and err: - # Handle commands that output to stderr but otherwise succeed - LOG.debug(err) - err = '' - - return out, err - - -def ssh_execute(ssh, cmd, process_input=None, - addl_env=None, check_exit_code=True): - LOG.debug(_('Running cmd (SSH): %s'), ' '.join(cmd)) - if addl_env: - raise exception.Error(_('Environment not supported over SSH')) - - if process_input: - # This is (probably) fixable if we need it... - raise exception.Error(_('process_input not supported over SSH')) - - stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) - channel = stdout_stream.channel - - #stdin.write('process_input would go here') - #stdin.flush() - - # NOTE(justinsb): This seems suspicious... - # ...other SSH clients have buffering issues with this approach - stdout = stdout_stream.read() - stderr = stderr_stream.read() - stdin_stream.close() - - exit_status = channel.recv_exit_status() - - # exit_status == -1 if no exit code was returned - if exit_status != -1: - LOG.debug(_('Result was %s') % exit_status) - if check_exit_code and exit_status != 0: - raise exception.ProcessExecutionError(exit_code=exit_status, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) - - return (stdout, stderr) + def remove(self, ssh): + """Close an ssh client and remove it from free_items.""" + ssh.close() + ssh = None + if ssh in self.free_items: + self.free_items.pop(ssh) + if self.current_size > 0: + self.current_size -= 1 def cinderdir(): @@ -271,17 +260,6 @@ def cinderdir(): return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0] -def debug(arg): - LOG.debug(_('debug in callback: %s'), arg) - return arg - - -def generate_uid(topic, size=8): - characters = '01234567890abcdefghijklmnopqrstuvwxyz' - choices = [random.choice(characters) for x in xrange(size)] - return '%s-%s' % (topic, ''.join(choices)) - - # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 @@ -310,9 +288,10 @@ def last_completed_audit_period(unit=None): returns: 2 tuple of datetimes (begin, end) The begin timestamp of this audit period is the same as the - end of the previous.""" + end of the previous. + """ if not unit: - unit = FLAGS.volume_usage_audit_period + unit = CONF.volume_usage_audit_period offset = 0 if '@' in unit: @@ -364,9 +343,9 @@ def last_completed_audit_period(unit=None): elif unit == 'day': end = datetime.datetime(hour=offset, - day=rightnow.day, - month=rightnow.month, - year=rightnow.year) + day=rightnow.day, + month=rightnow.month, + year=rightnow.year) if end >= rightnow: end = end - datetime.timedelta(days=1) begin = end - datetime.timedelta(days=1) @@ -413,42 +392,9 @@ def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): return ''.join(password) -def last_octet(address): - return int(address.split('.')[-1]) - - -def get_my_linklocal(interface): - try: - if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface) - condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link' - links = [re.search(condition, x) for x in if_str[0].split('\n')] - address = [w.group(1) for w in links if w is not None] - if address[0] is not None: - return address[0] - else: - raise exception.Error(_('Link Local address is not found.:%s') - % if_str) - except Exception as ex: - raise exception.Error(_("Couldn't get Link Local IP of %(interface)s" - " :%(ex)s") % locals()) - - -def parse_mailmap(mailmap='.mailmap'): - mapping = {} - if os.path.exists(mailmap): - fp = open(mailmap, 'r') - for l in fp: - l = l.strip() - if not l.startswith('#') and ' ' in l: - canonical_email, alias = l.split(' ') - mapping[alias.lower()] = canonical_email.lower() - return mapping - - -def str_dict_replace(s, mapping): - for s1, s2 in mapping.iteritems(): - s = s.replace(s1, s2) - return s +def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): + # Use the same implementation as the password generation. + return generate_password(length, symbolgroups) class LazyPluggable(object): @@ -461,7 +407,7 @@ def __init__(self, pivot, **backends): def __get_backend(self): if not self.__backend: - backend_name = FLAGS[self.__pivot] + backend_name = CONF[self.__pivot] if backend_name not in self.__backends: raise exception.Error(_('Invalid backend: %s') % backend_name) @@ -482,64 +428,44 @@ def __getattr__(self, key): return getattr(backend, key) -class LoopingCallDone(Exception): - """Exception to break out and stop a LoopingCall. +class ProtectedExpatParser(expatreader.ExpatParser): + """An expat parser which disables DTD's and entities by default.""" - The poll-function passed to LoopingCall can raise this exception to - break out of the loop normally. This is somewhat analogous to - StopIteration. + def __init__(self, forbid_dtd=True, forbid_entities=True, + *args, **kwargs): + # Python 2.x old style class + expatreader.ExpatParser.__init__(self, *args, **kwargs) + self.forbid_dtd = forbid_dtd + self.forbid_entities = forbid_entities - An optional return-value can be included as the argument to the exception; - this return-value will be returned by LoopingCall.wait() + def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): + raise ValueError("Inline DTD forbidden") - """ + def entity_decl(self, entityName, is_parameter_entity, value, base, + systemId, publicId, notationName): + raise ValueError(" forbidden") - def __init__(self, retvalue=True): - """:param retvalue: Value that LoopingCall.wait() should return.""" - self.retvalue = retvalue - - -class LoopingCall(object): - def __init__(self, f=None, *args, **kw): - self.args = args - self.kw = kw - self.f = f - self._running = False - - def start(self, interval, initial_delay=None): - self._running = True - done = event.Event() - - def _inner(): - if initial_delay: - greenthread.sleep(initial_delay) - - try: - while self._running: - self.f(*self.args, **self.kw) - if not self._running: - break - greenthread.sleep(interval) - except LoopingCallDone, e: - self.stop() - done.send(e.retvalue) - except Exception: - LOG.exception(_('in looping call')) - done.send_exception(*sys.exc_info()) - return - else: - done.send(True) + def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): + # expat 1.2 + raise ValueError(" forbidden") - self.done = done + def reset(self): + expatreader.ExpatParser.reset(self) + if self.forbid_dtd: + self._parser.StartDoctypeDeclHandler = self.start_doctype_decl + if self.forbid_entities: + self._parser.EntityDeclHandler = self.entity_decl + self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl - greenthread.spawn(_inner) - return self.done - def stop(self): - self._running = False +def safe_minidom_parse_string(xml_string): + """Parse an XML string using minidom safely. - def wait(self): - return self.done.wait() + """ + try: + return minidom.parseString(xml_string, parser=ProtectedExpatParser()) + except sax.SAXParseException as se: + raise expat.ExpatError() def xhtml_escape(value): @@ -549,36 +475,11 @@ def xhtml_escape(value): return saxutils.escape(value, {'"': '"', "'": '''}) -def utf8(value): - """Try to turn a string into utf-8 if possible. - - Code is directly from the utf8 function in - http://github.com/facebook/tornado/blob/master/tornado/escape.py - - """ - if isinstance(value, unicode): - return value.encode('utf-8') - assert isinstance(value, str) - return value - - -def delete_if_exists(pathname): - """delete a file, but ignore file not found error""" - - try: - os.unlink(pathname) - except OSError as e: - if e.errno == errno.ENOENT: - return - else: - raise - - def get_from_path(items, path): """Returns a list of items matching the specified path. Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item - in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the + in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the intermediate results are lists it will treat each list item individually. A 'None' in items or any child expressions will be ignored, this function will not throw because of None (anywhere) in items. The returned list @@ -625,111 +526,22 @@ def get_from_path(items, path): return get_from_path(results, remainder) -def flatten_dict(dict_, flattened=None): - """Recursively flatten a nested dictionary.""" - flattened = flattened or {} - for key, value in dict_.iteritems(): - if hasattr(value, 'iteritems'): - flatten_dict(value, flattened) - else: - flattened[key] = value - return flattened - - -def partition_dict(dict_, keys): - """Return two dicts, one with `keys` the other with everything else.""" - intersection = {} - difference = {} - for key, value in dict_.iteritems(): - if key in keys: - intersection[key] = value - else: - difference[key] = value - return intersection, difference - - -def map_dict_keys(dict_, key_map): - """Return a dict in which the dictionaries keys are mapped to new keys.""" - mapped = {} - for key, value in dict_.iteritems(): - mapped_key = key_map[key] if key in key_map else key - mapped[mapped_key] = value - return mapped - - -def subset_dict(dict_, keys): - """Return a dict that only contains a subset of keys.""" - subset = partition_dict(dict_, keys)[0] - return subset - - -def check_isinstance(obj, cls): - """Checks that obj is of type cls, and lets PyLint infer types.""" - if isinstance(obj, cls): - return obj - raise Exception(_('Expected object of type: %s') % (str(cls))) - # TODO(justinsb): Can we make this better?? - return cls() # Ugly PyLint hack - - -def gen_uuid(): - return uuid.uuid4() - - -def is_uuid_like(val): - """For our purposes, a UUID is a string in canonical form: - - aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa - """ - try: - uuid.UUID(val) - return True - except (TypeError, ValueError, AttributeError): - return False - - -def bool_from_str(val): - """Convert a string representation of a bool into a bool value""" - - if not val: - return False - try: - return True if int(val) else False - except ValueError: - return val.lower() == 'true' - - def is_valid_boolstr(val): - """Check if the provided string is a valid bool string or not. """ + """Check if the provided string is a valid bool string or not.""" val = str(val).lower() - return val == 'true' or val == 'false' or \ - val == 'yes' or val == 'no' or \ - val == 'y' or val == 'n' or \ - val == '1' or val == '0' - - -def is_valid_ipv4(address): - """valid the address strictly as per format xxx.xxx.xxx.xxx. - where xxx is a value between 0 and 255. - """ - parts = address.split(".") - if len(parts) != 4: - return False - for item in parts: - try: - if not 0 <= int(item) <= 255: - return False - except ValueError: - return False - return True + return (val == 'true' or val == 'false' or + val == 'yes' or val == 'no' or + val == 'y' or val == 'n' or + val == '1' or val == '0') def monkey_patch(): - """ If the Flags.monkey_patch set as True, + """If the CONF.monkey_patch set as True, this function patches a decorator for all functions in specified modules. + You can set decorators for each modules - using FLAGS.monkey_patch_modules. + using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'cinder.api.ec2.cloud:' \ cinder.openstack.common.notifier.api.notify_decorator' @@ -740,11 +552,11 @@ def monkey_patch(): name - name of the function function - object of the function """ - # If FLAGS.monkey_patch is not True, this function do nothing. - if not FLAGS.monkey_patch: + # If CONF.monkey_patch is not True, this function do nothing. + if not CONF.monkey_patch: return # Get list of modules and decorators - for module_and_decorator in FLAGS.monkey_patch_modules: + for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = importutils.import_class(decorator_name) @@ -756,68 +568,21 @@ def monkey_patch(): if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) for method, func in inspect.getmembers(clz, inspect.ismethod): - setattr(clz, method, + setattr( + clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, - decorator("%s.%s" % (module, key), func)) - - -def convert_to_list_dict(lst, label): - """Convert a value or list into a list of dicts""" - if not lst: - return None - if not isinstance(lst, list): - lst = [lst] - return [{label: x} for x in lst] - - -def timefunc(func): - """Decorator that logs how long a particular function took to execute""" - @functools.wraps(func) - def inner(*args, **kwargs): - start_time = time.time() - try: - return func(*args, **kwargs) - finally: - total_time = time.time() - start_time - LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") % - dict(name=func.__name__, total_time=total_time)) - return inner + decorator("%s.%s" % (module, key), func)) def generate_glance_url(): """Generate the URL to glance.""" # TODO(jk0): This will eventually need to take SSL into consideration # when supported in glance. - return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port) - - -@contextlib.contextmanager -def logging_error(message): - """Catches exception, write message to the log, re-raise. - This is a common refinement of save_and_reraise that writes a specific - message to the log. - """ - try: - yield - except Exception as error: - with excutils.save_and_reraise_exception(): - LOG.exception(message) - - -@contextlib.contextmanager -def remove_path_on_error(path): - """Protect code that wants to operate on PATH atomically. - Any exception will cause PATH to be removed. - """ - try: - yield - except Exception: - with excutils.save_and_reraise_exception(): - delete_if_exists(path) + return "http://%s:%d" % (CONF.glance_host, CONF.glance_port) def make_dev_path(dev, partition=None, base='/dev'): @@ -836,7 +601,7 @@ def make_dev_path(dev, partition=None, base='/dev'): def total_seconds(td): - """Local total_seconds implementation for compatibility with python 2.6""" + """Local total_seconds implementation for compatibility with python 2.6.""" if hasattr(td, 'total_seconds'): return td.total_seconds() else: @@ -877,18 +642,6 @@ def read_cached_file(filename, cache_info, reload_func=None): return cache_info['data'] -def file_open(*args, **kwargs): - """Open file - - see built-in file() documentation for more details - - Note: The reason this is kept in a separate module is to easily - be able to provide a stub module that doesn't alter system - state at all (for unit tests) - """ - return file(*args, **kwargs) - - def hash_file(file_like_object): """Generate a hash for the contents of a file.""" checksum = hashlib.sha1() @@ -896,55 +649,12 @@ def hash_file(file_like_object): return checksum.hexdigest() -@contextlib.contextmanager -def temporary_mutation(obj, **kwargs): - """Temporarily set the attr on a particular object to a given value then - revert when finished. - - One use of this is to temporarily set the read_deleted flag on a context - object: - - with temporary_mutation(context, read_deleted="yes"): - do_something_that_needed_deleted_objects() - """ - NOT_PRESENT = object() - - old_values = {} - for attr, new_value in kwargs.items(): - old_values[attr] = getattr(obj, attr, NOT_PRESENT) - setattr(obj, attr, new_value) - - try: - yield - finally: - for attr, old_value in old_values.items(): - if old_value is NOT_PRESENT: - del obj[attr] - else: - setattr(obj, attr, old_value) - - def service_is_up(service): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. elapsed = total_seconds(timeutils.utcnow() - last_heartbeat) - return abs(elapsed) <= FLAGS.service_down_time - - -def generate_mac_address(): - """Generate an Ethernet MAC address.""" - # NOTE(vish): We would prefer to use 0xfe here to ensure that linux - # bridge mac addresses don't change, but it appears to - # conflict with libvirt, so we use the next highest octet - # that has the unicast and locally administered bits set - # properly: 0xfa. - # Discussion: https://bugs.launchpad.net/cinder/+bug/921838 - mac = [0xfa, 0x16, 0x3e, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: "%02x" % x, mac)) + return abs(elapsed) <= CONF.service_down_time def read_file_as_root(file_path): @@ -952,7 +662,7 @@ def read_file_as_root(file_path): try: out, _err = execute('cat', file_path, run_as_root=True) return out - except exception.ProcessExecutionError: + except processutils.ProcessExecutionError: raise exception.FileNotFound(file_path=file_path) @@ -984,32 +694,12 @@ def tempdir(**kwargs): finally: try: shutil.rmtree(tmpdir) - except OSError, e: + except OSError as e: LOG.debug(_('Could not remove tmpdir: %s'), str(e)) -def strcmp_const_time(s1, s2): - """Constant-time string comparison. - - :params s1: the first string - :params s2: the second string - - :return: True if the strings are equal. - - This function takes two strings and compares them. It is intended to be - used when doing a comparison for authentication purposes to help guard - against timing attacks. - """ - if len(s1) != len(s2): - return False - result = 0 - for (a, b) in zip(s1, s2): - result |= ord(a) ^ ord(b) - return result == 0 - - def walk_class_hierarchy(clazz, encountered=None): - """Walk class hierarchy, yielding most derived classes first""" + """Walk class hierarchy, yielding most derived classes first.""" if not encountered: encountered = [] for subclass in clazz.__subclasses__(): @@ -1021,43 +711,60 @@ def walk_class_hierarchy(clazz, encountered=None): yield subclass -class UndoManager(object): - """Provides a mechanism to facilitate rolling back a series of actions - when an exception is raised. +def get_root_helper(): + return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config + + +def brick_get_connector_properties(): + """wrapper for the brick calls to automatically set + the root_helper needed for cinder. """ - def __init__(self): - self.undo_stack = [] - def undo_with(self, undo_func): - self.undo_stack.append(undo_func) + root_helper = get_root_helper() + return connector.get_connector_properties(root_helper, + CONF.my_ip) - def _rollback(self): - for undo_func in reversed(self.undo_stack): - undo_func() - def rollback_and_reraise(self, msg=None, **kwargs): - """Rollback a series of actions then re-raise the exception. +def brick_get_connector(protocol, driver=None, + execute=processutils.execute, + use_multipath=False, + device_scan_attempts=3, + *args, **kwargs): + """Wrapper to get a brick connector object. + This automatically populates the required protocol as well + as the root_helper needed to execute commands. + """ - .. note:: (sirp) This should only be called within an - exception handler. - """ - with excutils.save_and_reraise_exception(): - if msg: - LOG.exception(msg, **kwargs) + root_helper = get_root_helper() + return connector.InitiatorConnector.factory(protocol, root_helper, + driver=driver, + execute=execute, + use_multipath=use_multipath, + device_scan_attempts= + device_scan_attempts, + *args, **kwargs) - self._rollback() +def require_driver_initialized(driver): + """Verifies if `driver` is initialized -def ensure_tree(path): - """Create a directory (and any ancestor directories required) + If the driver is not initialized, an exception will be raised. - :param path: Directory to create + :params driver: The driver instance. + :raises: `exception.DriverNotInitialized` """ - try: - os.makedirs(path) - except OSError as exc: - if exc.errno == errno.EEXIST: - if not os.path.isdir(path): - raise - else: - raise + # we can't do anything if the driver didn't init + if not driver.initialized: + driver_name = driver.__class__.__name__ + LOG.error(_("Volume driver %s not initialized") % driver_name) + raise exception.DriverNotInitialized() + + +def get_file_mode(path): + """This primarily exists to make unit testing easier.""" + return stat.S_IMODE(os.stat(path).st_mode) + + +def get_file_gid(path): + """This primarily exists to make unit testing easier.""" + return os.stat(path).st_gid diff --git a/cinder/version.py b/cinder/version.py index 3f50863fcc..e3dbd70d7f 100644 --- a/cinder/version.py +++ b/cinder/version.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC +# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,25 +12,12 @@ # License for the specific language governing permissions and limitations # under the License. -CINDER_VERSION = ['2013', '1', None] -YEAR, COUNT, REVISION = CINDER_VERSION -FINAL = False # This becomes true at Release Candidate time - - -def canonical_version_string(): - return '.'.join(filter(None, CINDER_VERSION)) - - -def version_string(): - if FINAL: - return canonical_version_string() - else: - return '%s-dev' % (canonical_version_string(),) - - -def vcs_version_string(): - return 'LOCALBRANCH:LOCALREVISION' +from pbr import version as pbr_version +CINDER_VENDOR = "OpenStack Foundation" +CINDER_PRODUCT = "OpenStack Cinder" +CINDER_PACKAGE = None # OS distro package version suffix -def version_string_with_vcs(): - return '%s-%s' % (canonical_version_string(), vcs_version_string()) +loaded = False +version_info = pbr_version.VersionInfo('cinder') +version_string = version_info.version_string diff --git a/cinder/volume/__init__.py b/cinder/volume/__init__.py index e810a93d48..e845ea618e 100644 --- a/cinder/volume/__init__.py +++ b/cinder/volume/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -18,8 +16,12 @@ # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from cinder.volume import ' elsewhere. -import cinder.flags -import cinder.openstack.common.importutils -API = cinder.openstack.common.importutils.import_class( - cinder.flags.FLAGS.volume_api_class) + +from cinder.common import config +import cinder.openstack.common.importutils as import_utils + + +CONF = config.CONF + +API = import_utils.import_class(CONF.volume_api_class) diff --git a/cinder/volume/api.py b/cinder/volume/api.py index ece1c9682c..cc4dc55101 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -20,30 +18,48 @@ Handles all requests relating to volumes. """ + +import collections import functools +from oslo.config import cfg + +from cinder import context from cinder.db import base from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg from cinder.image import glance +from cinder import keymgr +from cinder.openstack.common import excutils from cinder.openstack.common import log as logging -from cinder.openstack.common import rpc from cinder.openstack.common import timeutils +from cinder.openstack.common import uuidutils import cinder.policy from cinder import quota +from cinder import quota_utils +from cinder.scheduler import rpcapi as scheduler_rpcapi +from cinder import utils +from cinder.volume.flows.api import create_volume +from cinder.volume import qos_specs +from cinder.volume import rpcapi as volume_rpcapi +from cinder.volume import utils as volume_utils +from cinder.volume import volume_types volume_host_opt = cfg.BoolOpt('snapshot_same_host', - default=True, - help='Create volume from snapshot at the host where snapshot resides') - -FLAGS = flags.FLAGS -FLAGS.register_opt(volume_host_opt) -flags.DECLARE('storage_availability_zone', 'cinder.volume.manager') + default=True, + help='Create volume from snapshot at the host ' + 'where snapshot resides') +volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az', + default=True, + help='Ensure that the new volumes are the ' + 'same AZ as snapshot or source volume') + +CONF = cfg.CONF +CONF.register_opt(volume_host_opt) +CONF.register_opt(volume_same_az_opt) +CONF.import_opt('storage_availability_zone', 'cinder.volume.manager') LOG = logging.getLogger(__name__) -GB = 1048576 * 1024 QUOTAS = quota.QUOTAS @@ -77,150 +93,135 @@ class API(base.Base): def __init__(self, db_driver=None, image_service=None): self.image_service = (image_service or glance.get_default_image_service()) + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + self.volume_rpcapi = volume_rpcapi.VolumeAPI() + self.availability_zone_names = () + self.key_manager = keymgr.API() super(API, self).__init__(db_driver) - def create(self, context, size, name, description, snapshot=None, - image_id=None, volume_type=None, metadata=None, - availability_zone=None): - check_policy(context, 'create') - if snapshot is not None: - if snapshot['status'] != "available": - msg = _("status must be available") - raise exception.InvalidSnapshot(reason=msg) - if not size: - size = snapshot['volume_size'] - - snapshot_id = snapshot['id'] - else: - snapshot_id = None + def _valid_availability_zone(self, availability_zone): + #NOTE(bcwaldon): This approach to caching fails to handle the case + # that an availability zone is disabled/removed. + if availability_zone in self.availability_zone_names: + return True + if CONF.storage_availability_zone == availability_zone: + return True - def as_int(s): - try: - return int(s) - except ValueError: - return s + azs = self.list_availability_zones() + self.availability_zone_names = [az['name'] for az in azs] + return availability_zone in self.availability_zone_names - # tolerate size as stringified int - size = as_int(size) + def list_availability_zones(self): + """Describe the known availability zones - if not isinstance(size, int) or size <= 0: - msg = (_("Volume size '%s' must be an integer and greater than 0") - % size) - raise exception.InvalidInput(reason=msg) - try: - reservations = QUOTAS.reserve(context, volumes=1, gigabytes=size) - except exception.OverQuota as e: - overs = e.kwargs['overs'] - usages = e.kwargs['usages'] - quotas = e.kwargs['quotas'] + :retval list of dicts, each with a 'name' and 'available' key + """ + topic = CONF.volume_topic + ctxt = context.get_admin_context() + services = self.db.service_get_all_by_topic(ctxt, topic) + az_data = [(s['availability_zone'], s['disabled']) for s in services] - def _consumed(name): - return (usages[name]['reserved'] + usages[name]['in_use']) + disabled_map = {} + for (az_name, disabled) in az_data: + tracked_disabled = disabled_map.get(az_name, True) + disabled_map[az_name] = tracked_disabled and disabled - pid = context.project_id - if 'gigabytes' in overs: - consumed = _consumed('gigabytes') - quota = quotas['gigabytes'] - LOG.warn(_("Quota exceeded for %(pid)s, tried to create " - "%(size)sG volume (%(consumed)dG of %(quota)dG " - "already consumed)") % locals()) - raise exception.VolumeSizeExceedsAvailableQuota() - elif 'volumes' in overs: - consumed = _consumed('volumes') - LOG.warn(_("Quota exceeded for %(pid)s, tried to create " - "volume (%(consumed)d volumes already consumed)") - % locals()) - raise exception.VolumeLimitExceeded(allowed=quotas['volumes']) - - if image_id: - # check image existence - image_meta = self.image_service.show(context, image_id) - image_size_in_gb = (int(image_meta['size']) + GB - 1) / GB - #check image size is not larger than volume size. - if image_size_in_gb > size: - msg = _('Size of specified image is larger than volume size.') - raise exception.InvalidInput(reason=msg) - - if availability_zone is None: - availability_zone = FLAGS.storage_availability_zone - - if volume_type is None: - volume_type_id = None - else: - volume_type_id = volume_type.get('id', None) - - options = { - 'size': size, - 'user_id': context.user_id, - 'project_id': context.project_id, - 'snapshot_id': snapshot_id, - 'availability_zone': availability_zone, - 'status': "creating", - 'attach_status': "detached", - 'display_name': name, - 'display_description': description, - 'volume_type_id': volume_type_id, - 'metadata': metadata, - } + azs = [{'name': name, 'available': not disabled} + for (name, disabled) in disabled_map.items()] - volume = self.db.volume_create(context, options) + return tuple(azs) - QUOTAS.commit(context, reservations) + def create(self, context, size, name, description, snapshot=None, + image_id=None, volume_type=None, metadata=None, + availability_zone=None, source_volume=None, + scheduler_hints=None, backup_source_volume=None): - self._cast_create_volume(context, volume['id'], snapshot_id, - image_id) - return volume + def check_volume_az_zone(availability_zone): + try: + return self._valid_availability_zone(availability_zone) + except exception.CinderException: + LOG.exception(_("Unable to query if %s is in the " + "availability zone set"), availability_zone) + return False + + create_what = { + 'context': context, + 'raw_size': size, + 'name': name, + 'description': description, + 'snapshot': snapshot, + 'image_id': image_id, + 'raw_volume_type': volume_type, + 'metadata': metadata, + 'raw_availability_zone': availability_zone, + 'source_volume': source_volume, + 'scheduler_hints': scheduler_hints, + 'key_manager': self.key_manager, + 'backup_source_volume': backup_source_volume, + } - def _cast_create_volume(self, context, volume_id, snapshot_id, - image_id): - - # NOTE(Rongze Zhu): It is a simple solution for bug 1008866 - # If snapshot_id is set, make the call create volume directly to - # the volume host where the snapshot resides instead of passing it - # through the scheduer. So snapshot can be copy to new volume. - - if snapshot_id and FLAGS.snapshot_same_host: - snapshot_ref = self.db.snapshot_get(context, snapshot_id) - src_volume_ref = self.db.volume_get(context, - snapshot_ref['volume_id']) - topic = rpc.queue_get_for(context, - FLAGS.volume_topic, - src_volume_ref['host']) - rpc.cast(context, - topic, - {"method": "create_volume", - "args": {"volume_id": volume_id, - "snapshot_id": snapshot_id, - "image_id": image_id}}) - else: - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "create_volume", - "args": {"topic": FLAGS.volume_topic, - "volume_id": volume_id, - "snapshot_id": snapshot_id, - "image_id": image_id}}) + try: + flow_engine = create_volume.get_flow(self.scheduler_rpcapi, + self.volume_rpcapi, + self.db, + self.image_service, + check_volume_az_zone, + create_what) + except Exception: + LOG.exception(_("Failed to create api volume flow")) + raise exception.CinderException( + _("Failed to create api volume flow")) + + flow_engine.run() + volume = flow_engine.storage.fetch('volume') + return volume @wrap_check_policy def delete(self, context, volume, force=False): + if context.is_admin and context.project_id != volume['project_id']: + project_id = volume['project_id'] + else: + project_id = context.project_id + volume_id = volume['id'] if not volume['host']: + volume_utils.notify_about_volume_usage(context, + volume, "delete.start") # NOTE(vish): scheduling failed, so delete it # Note(zhiteng): update volume quota reservation try: - reservations = QUOTAS.reserve(context, volumes=-1, - gigabytes=-volume['size']) + reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']} + QUOTAS.add_volume_type_opts(context, + reserve_opts, + volume['volume_type_id']) + reservations = QUOTAS.reserve(context, + project_id=project_id, + **reserve_opts) except Exception: reservations = None LOG.exception(_("Failed to update quota for deleting volume")) - - self.db.volume_destroy(context, volume_id) + self.db.volume_destroy(context.elevated(), volume_id) if reservations: - QUOTAS.commit(context, reservations) + QUOTAS.commit(context, reservations, project_id=project_id) + + volume_utils.notify_about_volume_usage(context, + volume, "delete.end") return - if not force and volume['status'] not in ["available", "error"]: - msg = _("Volume status must be available or error") + if not force and volume['status'] not in ["available", "error", + "error_restoring", + "error_extending"]: + msg = _("Volume status must be available or error, " + "but current status is: %s") % volume['status'] + raise exception.InvalidVolume(reason=msg) + + if volume['attach_status'] == "attached": + # Volume is still attached, need to detach first + raise exception.VolumeAttached(volume_id=volume_id) + + if volume['migration_status'] != None: + # Volume is migrating, wait until done + msg = _("Volume cannot be deleted while migrating") raise exception.InvalidVolume(reason=msg) snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) @@ -228,14 +229,18 @@ def delete(self, context, volume, force=False): msg = _("Volume still has %d dependent snapshots") % len(snapshots) raise exception.InvalidVolume(reason=msg) + # If the volume is encrypted, delete its encryption key from the key + # manager. This operation makes volume deletion an irreversible process + # because the volume cannot be decrypted without its key. + encryption_key_id = volume.get('encryption_key_id', None) + if encryption_key_id is not None: + self.key_manager.delete_key(context, encryption_key_id) + now = timeutils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) - host = volume['host'] - rpc.cast(context, - rpc.queue_get_for(context, FLAGS.volume_topic, host), - {"method": "delete_volume", - "args": {"volume_id": volume_id}}) + + self.volume_rpcapi.delete_volume(context, volume) @wrap_check_policy def update(self, context, volume, fields): @@ -247,21 +252,37 @@ def get(self, context, volume_id): check_policy(context, 'get', volume) return volume - def get_all(self, context, search_opts=None): + def get_all(self, context, marker=None, limit=None, sort_key='created_at', + sort_dir='desc', filters={}): check_policy(context, 'get_all') - if search_opts is None: - search_opts = {} + try: + if limit is not None: + limit = int(limit) + if limit < 0: + msg = _('limit param must be positive') + raise exception.InvalidInput(reason=msg) + except ValueError: + msg = _('limit param must be an integer') + raise exception.InvalidInput(reason=msg) - if (context.is_admin and 'all_tenants' in search_opts): + if (context.is_admin and 'all_tenants' in filters): # Need to remove all_tenants to pass the filtering below. - del search_opts['all_tenants'] - volumes = self.db.volume_get_all(context) + del filters['all_tenants'] + volumes = self.db.volume_get_all(context, marker, limit, sort_key, + sort_dir) else: volumes = self.db.volume_get_all_by_project(context, - context.project_id) - if search_opts: - LOG.debug(_("Searching by: %s") % str(search_opts)) + context.project_id, + marker, limit, + sort_key, sort_dir) + + # Non-admin shouldn't see temporary target of a volume migration + if not context.is_admin: + filters['no_migration_targets'] = True + + if filters: + LOG.debug(_("Searching by: %s") % str(filters)) def _check_metadata_match(volume, searchdict): volume_metadata = {} @@ -270,18 +291,25 @@ def _check_metadata_match(volume, searchdict): for k, v in searchdict.iteritems(): if (k not in volume_metadata.keys() or - volume_metadata[k] != v): + volume_metadata[k] != v): return False return True + def _check_migration_target(volume, searchdict): + status = volume['migration_status'] + if status and status.startswith('target:'): + return False + return True + # search_option to filter_name mapping. - filter_mapping = {'metadata': _check_metadata_match} + filter_mapping = {'metadata': _check_metadata_match, + 'no_migration_targets': _check_migration_target} result = [] not_found = object() for volume in volumes: # go over all filters in the list - for opt, values in search_opts.iteritems(): + for opt, values in filters.iteritems(): try: filter_func = filter_mapping[opt] except KeyError: @@ -292,6 +320,7 @@ def filter_func(volume, value): else: # did not break out loop result.append(volume) # volume matches all filters volumes = result + return volumes def get_snapshot(self, context, snapshot_id): @@ -299,6 +328,11 @@ def get_snapshot(self, context, snapshot_id): rv = self.db.snapshot_get(context, snapshot_id) return dict(rv.iteritems()) + def get_volume(self, context, volume_id): + check_policy(context, 'get_volume') + rv = self.db.volume_get(context, volume_id) + return dict(rv.iteritems()) + def get_all_snapshots(self, context, search_opts=None): check_policy(context, 'get_all_snapshots') @@ -327,7 +361,7 @@ def get_all_snapshots(self, context, search_opts=None): return snapshots @wrap_check_policy - def check_attach(self, context, volume): + def check_attach(self, volume): # TODO(vish): abstract status checking? if volume['status'] != "available": msg = _("status must be available") @@ -337,23 +371,23 @@ def check_attach(self, context, volume): raise exception.InvalidVolume(reason=msg) @wrap_check_policy - def check_detach(self, context, volume): + def check_detach(self, volume): # TODO(vish): abstract status checking? - if volume['status'] == "available": - msg = _("already detached") + if volume['status'] != "in-use": + msg = _("status must be in-use to detach") raise exception.InvalidVolume(reason=msg) - def remove_from_compute(self, context, volume, instance_id, host): - """Remove volume from specified compute host.""" - rpc.call(context, - rpc.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "remove_volume_connection", - "args": {'instance_id': instance_id, - 'volume_id': volume['id']}}) - @wrap_check_policy def reserve_volume(self, context, volume): - self.update(context, volume, {"status": "attaching"}) + #NOTE(jdg): check for Race condition bug 1096983 + #explicitly get updated ref and check + volume = self.db.volume_get(context, volume['id']) + if volume['status'] == 'available': + self.update(context, volume, {"status": "attaching"}) + else: + msg = _("Volume status must be available to reserve") + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) @wrap_check_policy def unreserve_volume(self, context, volume): @@ -362,7 +396,11 @@ def unreserve_volume(self, context, volume): @wrap_check_policy def begin_detaching(self, context, volume): - self.update(context, volume, {"status": "detaching"}) + # If we are in the middle of a volume migration, we don't want the user + # to see that the volume is 'detaching'. Having 'migration_status' set + # will have the same effect internally. + if not volume['migration_status']: + self.update(context, volume, {"status": "detaching"}) @wrap_check_policy def roll_detaching(self, context, volume): @@ -370,90 +408,154 @@ def roll_detaching(self, context, volume): self.update(context, volume, {"status": "in-use"}) @wrap_check_policy - def attach(self, context, volume, instance_uuid, mountpoint): - host = volume['host'] - queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) - return rpc.call(context, queue, - {"method": "attach_volume", - "args": {"volume_id": volume['id'], - "instance_uuid": instance_uuid, - "mountpoint": mountpoint}}) + def attach(self, context, volume, instance_uuid, host_name, + mountpoint, mode): + volume_metadata = self.get_volume_admin_metadata(context.elevated(), + volume) + if 'readonly' not in volume_metadata: + # NOTE(zhiyan): set a default value for read-only flag to metadata. + self.update_volume_admin_metadata(context.elevated(), volume, + {'readonly': 'False'}) + volume_metadata['readonly'] = 'False' + + if volume_metadata['readonly'] == 'True' and mode != 'ro': + raise exception.InvalidVolumeAttachMode(mode=mode, + volume_id=volume['id']) + + return self.volume_rpcapi.attach_volume(context, + volume, + instance_uuid, + host_name, + mountpoint, + mode) @wrap_check_policy def detach(self, context, volume): - host = volume['host'] - queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) - return rpc.call(context, queue, - {"method": "detach_volume", - "args": {"volume_id": volume['id']}}) + return self.volume_rpcapi.detach_volume(context, volume) @wrap_check_policy def initialize_connection(self, context, volume, connector): - host = volume['host'] - queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) - return rpc.call(context, queue, - {"method": "initialize_connection", - "args": {"volume_id": volume['id'], - "connector": connector}}) + return self.volume_rpcapi.initialize_connection(context, + volume, + connector) @wrap_check_policy - def terminate_connection(self, context, volume, connector): + def terminate_connection(self, context, volume, connector, force=False): self.unreserve_volume(context, volume) - host = volume['host'] - queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) - return rpc.call(context, queue, - {"method": "terminate_connection", - "args": {"volume_id": volume['id'], - "connector": connector}}) - - def _create_snapshot(self, context, volume, name, description, - force=False): + return self.volume_rpcapi.terminate_connection(context, + volume, + connector, + force) + + @wrap_check_policy + def accept_transfer(self, context, volume, new_user, new_project): + return self.volume_rpcapi.accept_transfer(context, + volume, + new_user, + new_project) + + def _create_snapshot(self, context, + volume, name, description, + force=False, metadata=None): check_policy(context, 'create_snapshot', volume) + if volume['migration_status'] != None: + # Volume is migrating, wait until done + msg = _("Snapshot cannot be created while volume is migrating") + raise exception.InvalidVolume(reason=msg) + if ((not force) and (volume['status'] != "available")): msg = _("must be available") raise exception.InvalidVolume(reason=msg) - options = { - 'volume_id': volume['id'], - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': "creating", - 'progress': '0%', - 'volume_size': volume['size'], - 'display_name': name, - 'display_description': description} - - snapshot = self.db.snapshot_create(context, options) - host = volume['host'] - rpc.cast(context, - rpc.queue_get_for(context, FLAGS.volume_topic, host), - {"method": "create_snapshot", - "args": {"volume_id": volume['id'], - "snapshot_id": snapshot['id']}}) + try: + if CONF.no_snapshot_gb_quota: + reserve_opts = {'snapshots': 1} + else: + reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']} + QUOTAS.add_volume_type_opts(context, + reserve_opts, + volume.get('volume_type_id')) + reservations = QUOTAS.reserve(context, **reserve_opts) + except exception.OverQuota as e: + overs = e.kwargs['overs'] + usages = e.kwargs['usages'] + quotas = e.kwargs['quotas'] + + def _consumed(name): + return (usages[name]['reserved'] + usages[name]['in_use']) + + for over in overs: + if 'gigabytes' in over: + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "%(s_size)sG snapshot (%(d_consumed)dG of " + "%(d_quota)dG already consumed)") + LOG.warn(msg % {'s_pid': context.project_id, + 's_size': volume['size'], + 'd_consumed': _consumed(over), + 'd_quota': quotas[over]}) + raise exception.VolumeSizeExceedsAvailableQuota( + requested=volume['size'], + consumed=_consumed('gigabytes'), + quota=quotas['gigabytes']) + elif 'snapshots' in over: + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "snapshot (%(d_consumed)d snapshots " + "already consumed)") + + LOG.warn(msg % {'s_pid': context.project_id, + 'd_consumed': _consumed(over)}) + raise exception.SnapshotLimitExceeded( + allowed=quotas[over]) + + self._check_metadata_properties(metadata) + options = {'volume_id': volume['id'], + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'progress': '0%', + 'volume_size': volume['size'], + 'display_name': name, + 'display_description': description, + 'volume_type_id': volume['volume_type_id'], + 'encryption_key_id': volume['encryption_key_id'], + 'metadata': metadata} + + try: + snapshot = self.db.snapshot_create(context, options) + QUOTAS.commit(context, reservations) + except Exception: + with excutils.save_and_reraise_exception(): + try: + self.db.snapshot_destroy(context, volume['id']) + finally: + QUOTAS.rollback(context, reservations) + + self.volume_rpcapi.create_snapshot(context, volume, snapshot) + return snapshot - def create_snapshot(self, context, volume, name, description): + def create_snapshot(self, context, + volume, name, + description, metadata=None): return self._create_snapshot(context, volume, name, description, - False) + False, metadata) - def create_snapshot_force(self, context, volume, name, description): + def create_snapshot_force(self, context, + volume, name, + description, metadata=None): return self._create_snapshot(context, volume, name, description, - True) + True, metadata) @wrap_check_policy def delete_snapshot(self, context, snapshot, force=False): if not force and snapshot['status'] not in ["available", "error"]: msg = _("Volume Snapshot status must be available or error") - raise exception.InvalidVolume(reason=msg) + raise exception.InvalidSnapshot(reason=msg) self.db.snapshot_update(context, snapshot['id'], {'status': 'deleting'}) volume = self.db.volume_get(context, snapshot['volume_id']) - host = volume['host'] - rpc.cast(context, - rpc.queue_get_for(context, FLAGS.volume_topic, host), - {"method": "delete_snapshot", - "args": {"snapshot_id": snapshot['id']}}) + self.volume_rpcapi.delete_snapshot(context, snapshot, volume['host']) @wrap_check_policy def update_snapshot(self, context, snapshot, fields): @@ -470,6 +572,24 @@ def delete_volume_metadata(self, context, volume, key): """Delete the given metadata item from a volume.""" self.db.volume_metadata_delete(context, volume['id'], key) + def _check_metadata_properties(self, metadata=None): + if not metadata: + metadata = {} + + for k, v in metadata.iteritems(): + if len(k) == 0: + msg = _("Metadata property key blank") + LOG.warn(msg) + raise exception.InvalidVolumeMetadata(reason=msg) + if len(k) > 255: + msg = _("Metadata property key greater than 255 characters") + LOG.warn(msg) + raise exception.InvalidVolumeMetadataSize(reason=msg) + if len(v) > 255: + msg = _("Metadata property value greater than 255 characters") + LOG.warn(msg) + raise exception.InvalidVolumeMetadataSize(reason=msg) + @wrap_check_policy def update_volume_metadata(self, context, volume, metadata, delete=False): """Updates or creates volume metadata. @@ -481,11 +601,18 @@ def update_volume_metadata(self, context, volume, metadata, delete=False): if delete: _metadata = metadata else: - _metadata = self.get_volume_metadata(context, volume['id']) + orig_meta = self.get_volume_metadata(context, volume) + _metadata = orig_meta.copy() _metadata.update(metadata) - self.db.volume_metadata_update(context, volume['id'], _metadata, True) - return _metadata + self._check_metadata_properties(_metadata) + + db_meta = self.db.volume_metadata_update(context, volume['id'], + _metadata, delete) + + # TODO(jdg): Implement an RPC call for drivers that may use this info + + return db_meta def get_volume_metadata_value(self, volume, key): """Get value of particular metadata key.""" @@ -496,7 +623,98 @@ def get_volume_metadata_value(self, volume, key): return i['value'] return None - def _check_volume_availability(self, context, volume, force): + @wrap_check_policy + def get_volume_admin_metadata(self, context, volume): + """Get all administration metadata associated with a volume.""" + rv = self.db.volume_admin_metadata_get(context, volume['id']) + return dict(rv.iteritems()) + + @wrap_check_policy + def delete_volume_admin_metadata(self, context, volume, key): + """Delete the given administration metadata item from a volume.""" + self.db.volume_admin_metadata_delete(context, volume['id'], key) + + @wrap_check_policy + def update_volume_admin_metadata(self, context, volume, metadata, + delete=False): + """Updates or creates volume administration metadata. + + If delete is True, metadata items that are not specified in the + `metadata` argument will be deleted. + + """ + if delete: + _metadata = metadata + else: + orig_meta = self.get_volume_admin_metadata(context, volume) + _metadata = orig_meta.copy() + _metadata.update(metadata) + + self._check_metadata_properties(_metadata) + + self.db.volume_admin_metadata_update(context, volume['id'], + _metadata, delete) + + # TODO(jdg): Implement an RPC call for drivers that may use this info + + return _metadata + + def get_snapshot_metadata(self, context, snapshot): + """Get all metadata associated with a snapshot.""" + rv = self.db.snapshot_metadata_get(context, snapshot['id']) + return dict(rv.iteritems()) + + def delete_snapshot_metadata(self, context, snapshot, key): + """Delete the given metadata item from a snapshot.""" + self.db.snapshot_metadata_delete(context, snapshot['id'], key) + + def update_snapshot_metadata(self, context, + snapshot, metadata, + delete=False): + """Updates or creates snapshot metadata. + + If delete is True, metadata items that are not specified in the + `metadata` argument will be deleted. + + """ + if delete: + _metadata = metadata + else: + orig_meta = self.get_snapshot_metadata(context, snapshot) + _metadata = orig_meta.copy() + _metadata.update(metadata) + + self._check_metadata_properties(_metadata) + + db_meta = self.db.snapshot_metadata_update(context, + snapshot['id'], + _metadata, + True) + + # TODO(jdg): Implement an RPC call for drivers that may use this info + + return db_meta + + def get_snapshot_metadata_value(self, snapshot, key): + pass + + def get_volumes_image_metadata(self, context): + check_policy(context, 'get_volumes_image_metadata') + db_data = self.db.volume_glance_metadata_get_all(context) + results = collections.defaultdict(dict) + for meta_entry in db_data: + results[meta_entry['volume_id']].update({meta_entry['key']: + meta_entry['value']}) + return results + + @wrap_check_policy + def get_volume_image_metadata(self, context, volume): + db_data = self.db.volume_glance_metadata_get(context, volume['id']) + return dict( + (meta_entry.key, meta_entry.value) for meta_entry in db_data + ) + + def _check_volume_availability(self, volume, force): """Check if the volume can be used.""" if volume['status'] not in ['available', 'in-use']: msg = _('Volume status must be available/in-use.') @@ -508,27 +726,244 @@ def _check_volume_availability(self, context, volume, force): @wrap_check_policy def copy_volume_to_image(self, context, volume, metadata, force): """Create a new image from the specified volume.""" - self._check_volume_availability(context, volume, force) + self._check_volume_availability(volume, force) recv_metadata = self.image_service.create(context, metadata) self.update(context, volume, {'status': 'uploading'}) - rpc.cast(context, - rpc.queue_get_for(context, - FLAGS.volume_topic, - volume['host']), - {"method": "copy_volume_to_image", - "args": {"volume_id": volume['id'], - "image_id": recv_metadata['id']}}) + self.volume_rpcapi.copy_volume_to_image(context, + volume, + recv_metadata) response = {"id": volume['id'], - "updated_at": volume['updated_at'], - "status": 'uploading', - "display_description": volume['display_description'], - "size": volume['size'], - "volume_type": volume['volume_type'], - "image_id": recv_metadata['id'], - "container_format": recv_metadata['container_format'], - "disk_format": recv_metadata['disk_format'], - "image_name": recv_metadata.get('name', None) - } + "updated_at": volume['updated_at'], + "status": 'uploading', + "display_description": volume['display_description'], + "size": volume['size'], + "volume_type": volume['volume_type'], + "image_id": recv_metadata['id'], + "container_format": recv_metadata['container_format'], + "disk_format": recv_metadata['disk_format'], + "image_name": recv_metadata.get('name', None)} return response + + @wrap_check_policy + def extend(self, context, volume, new_size): + if volume['status'] != 'available': + msg = _('Volume status must be available to extend.') + raise exception.InvalidVolume(reason=msg) + + size_increase = (int(new_size)) - volume['size'] + if size_increase <= 0: + msg = (_("New size for extend must be greater " + "than current size. (current: %(size)s, " + "extended: %(new_size)s)") % {'new_size': new_size, + 'size': volume['size']}) + raise exception.InvalidInput(reason=msg) + + self.update(context, volume, {'status': 'extending'}) + self.volume_rpcapi.extend_volume(context, volume, new_size) + + @wrap_check_policy + def migrate_volume(self, context, volume, host, force_host_copy): + """Migrate the volume to the specified host.""" + + # We only handle "available" volumes for now + if volume['status'] not in ['available', 'in-use']: + msg = _('Volume status must be available/in-use.') + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + + # Make sure volume is not part of a migration + if volume['migration_status'] != None: + msg = _("Volume is already part of an active migration") + raise exception.InvalidVolume(reason=msg) + + # We only handle volumes without snapshots for now + snaps = self.db.snapshot_get_all_for_volume(context, volume['id']) + if snaps: + msg = _("volume must not have snapshots") + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + + # Make sure the host is in the list of available hosts + elevated = context.elevated() + topic = CONF.volume_topic + services = self.db.service_get_all_by_topic(elevated, topic) + found = False + for service in services: + if utils.service_is_up(service) and service['host'] == host: + found = True + if not found: + msg = (_('No available service named %s') % host) + LOG.error(msg) + raise exception.InvalidHost(reason=msg) + + # Make sure the destination host is different than the current one + if host == volume['host']: + msg = _('Destination host must be different than current host') + LOG.error(msg) + raise exception.InvalidHost(reason=msg) + + self.update(context, volume, {'migration_status': 'starting'}) + + # Call the scheduler to ensure that the host exists and that it can + # accept the volume + volume_type = {} + volume_type_id = volume['volume_type_id'] + if volume_type_id: + volume_type = volume_types.get_volume_type(context, volume_type_id) + request_spec = {'volume_properties': volume, + 'volume_type': volume_type, + 'volume_id': volume['id']} + self.scheduler_rpcapi.migrate_volume_to_host(context, + CONF.volume_topic, + volume['id'], + host, + force_host_copy, + request_spec) + + @wrap_check_policy + def migrate_volume_completion(self, context, volume, new_volume, error): + # This is a volume swap initiated by Nova, not Cinder. Nova expects + # us to return the new_volume_id. + if not (volume['migration_status'] or new_volume['migration_status']): + return new_volume['id'] + + if not volume['migration_status']: + msg = _('Source volume not mid-migration.') + raise exception.InvalidVolume(reason=msg) + + if not new_volume['migration_status']: + msg = _('Destination volume not mid-migration.') + raise exception.InvalidVolume(reason=msg) + + expected_status = 'target:%s' % volume['id'] + if not new_volume['migration_status'] == expected_status: + msg = (_('Destination has migration_status %(stat)s, expected ' + '%(exp)s.') % {'stat': new_volume['migration_status'], + 'exp': expected_status}) + raise exception.InvalidVolume(reason=msg) + + return self.volume_rpcapi.migrate_volume_completion(context, volume, + new_volume, error) + + @wrap_check_policy + def update_readonly_flag(self, context, volume, flag): + if volume['status'] != 'available': + msg = _('Volume status must be available to update readonly flag.') + raise exception.InvalidVolume(reason=msg) + self.update_volume_admin_metadata(context.elevated(), volume, + {'readonly': str(flag)}) + + @wrap_check_policy + def retype(self, context, volume, new_type, migration_policy=None): + """Attempt to modify the type associated with an existing volume.""" + if volume['status'] not in ['available', 'in-use']: + msg = _('Unable to update type due to incorrect status ' + 'on volume: %s') % volume['id'] + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + + if volume['migration_status'] is not None: + msg = (_("Volume %s is already part of an active migration.") + % volume['id']) + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + + if migration_policy and migration_policy not in ['on-demand', 'never']: + msg = _('migration_policy must be \'on-demand\' or \'never\', ' + 'passed: %s') % str(new_type) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + # Support specifying volume type by ID or name + try: + if uuidutils.is_uuid_like(new_type): + vol_type = volume_types.get_volume_type(context, new_type) + else: + vol_type = volume_types.get_volume_type_by_name(context, + new_type) + except exception.InvalidVolumeType: + msg = _('Invalid volume_type passed: %s') % str(new_type) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + vol_type_id = vol_type['id'] + vol_type_qos_id = vol_type['qos_specs_id'] + + old_vol_type = None + old_vol_type_id = volume['volume_type_id'] + old_vol_type_qos_id = None + + # Error if the original and new type are the same + if volume['volume_type_id'] == vol_type_id: + msg = _('New volume_type same as original: %s') % str(new_type) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + if volume['volume_type_id']: + old_vol_type = volume_types.get_volume_type( + context, old_vol_type_id) + old_vol_type_qos_id = old_vol_type['qos_specs_id'] + + # We don't support changing encryption requirements yet + old_enc = volume_types.get_volume_type_encryption(context, + old_vol_type_id) + new_enc = volume_types.get_volume_type_encryption(context, + vol_type_id) + if old_enc != new_enc: + msg = _('Retype cannot change encryption requirements') + raise exception.InvalidInput(reason=msg) + + # We don't support changing QoS at the front-end yet for in-use volumes + # TODO(avishay): Call Nova to change QoS setting (libvirt has support + # - virDomainSetBlockIoTune() - Nova does not have support yet). + if (volume['status'] != 'available' and + old_vol_type_qos_id != vol_type_qos_id): + for qos_id in [old_vol_type_qos_id, vol_type_qos_id]: + if qos_id: + specs = qos_specs.get_qos_specs(context.elevated(), qos_id) + if specs['qos_specs']['consumer'] != 'back-end': + msg = _('Retype cannot change front-end qos specs for ' + 'in-use volumes') + raise exception.InvalidInput(reason=msg) + + self.update(context, volume, {'status': 'retyping'}) + + # We're checking here in so that we can report any quota issues as + # early as possible, but won't commit until we change the type. We + # pass the reservations onward in case we need to roll back. + reservations = quota_utils.get_volume_type_reservation(context, volume, + vol_type_id) + request_spec = {'volume_properties': volume, + 'volume_id': volume['id'], + 'volume_type': vol_type, + 'migration_policy': migration_policy, + 'quota_reservations': reservations} + + self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'], + request_spec=request_spec, + filter_properties={}) + + +class HostAPI(base.Base): + def __init__(self): + super(HostAPI, self).__init__() + + """Sub-set of the Volume Manager API for managing host operations.""" + def set_host_enabled(self, context, host, enabled): + """Sets the specified host's ability to accept new volumes.""" + raise NotImplementedError() + + def get_host_uptime(self, context, host): + """Returns the result of calling "uptime" on the target host.""" + raise NotImplementedError() + + def host_power_action(self, context, host, action): + raise NotImplementedError() + + def set_host_maintenance(self, context, host, mode): + """Start/Stop host maintenance window. On start, it triggers + volume evacuation. + """ + raise NotImplementedError() diff --git a/cinder/volume/configuration.py b/cinder/volume/configuration.py new file mode 100644 index 0000000000..2599ee9ad9 --- /dev/null +++ b/cinder/volume/configuration.py @@ -0,0 +1,80 @@ +# Copyright (c) 2012 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Configuration support for all drivers. + +This module allows support for setting configurations either from default +or from a particular FLAGS group, to be able to set multiple configurations +for a given set of values. + +For instance, two lvm configurations can be set by naming them in groups as + + [lvm1] + volume_group=lvm-group-1 + ... + + [lvm2] + volume_group=lvm-group-2 + ... + +And the configuration group name will be passed in so that all calls to +configuration.volume_group within that instance will be mapped to the proper +named group. + +This class also ensures the implementation's configuration is grafted into the +option group. This is due to the way cfg works. All cfg options must be defined +and registered in the group in which they are used. +""" + + +from oslo.config import cfg + +from cinder.openstack.common import log as logging + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class Configuration(object): + + def __init__(self, volume_opts, config_group=None): + """This takes care of grafting the implementation's config + values into the config group + """ + self.config_group = config_group + + # set the local conf so that __call__'s know what to use + if self.config_group: + self._ensure_config_values(volume_opts) + self.local_conf = CONF._get(self.config_group) + else: + self.local_conf = CONF + + def _ensure_config_values(self, volume_opts): + CONF.register_opts(volume_opts, group=self.config_group) + + def append_config_values(self, volume_opts): + self._ensure_config_values(volume_opts) + + def safe_get(self, value): + try: + return self.__getattr__(value) + except cfg.NoSuchOptError: + return None + + def __getattr__(self, value): + return getattr(self.local_conf, value) diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index 1f540d1599..5098755122 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -20,268 +18,495 @@ """ -import os -import tempfile import time -import urllib +from oslo.config import cfg + +from cinder.brick.iscsi import iscsi from cinder import exception -from cinder import flags +from cinder.image import image_utils +from cinder.openstack.common import excutils +from cinder.openstack.common import fileutils from cinder.openstack.common import log as logging -from cinder.openstack.common import cfg +from cinder.openstack.common import processutils from cinder import utils -from cinder.volume import iscsi - +from cinder.volume import rpcapi as volume_rpcapi +from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) volume_opts = [ - cfg.StrOpt('volume_group', - default='cinder-volumes', - help='Name for the VG that will contain exported volumes'), cfg.IntOpt('num_shell_tries', default=3, help='number of times to attempt to run flakey shell commands'), - cfg.IntOpt('num_iscsi_scan_tries', - default=3, - help='number of times to rescan iSCSI target to find volume'), + cfg.IntOpt('reserved_percentage', + default=0, + help='The percentage of backend capacity is reserved'), cfg.IntOpt('iscsi_num_targets', default=100, - help='Number of iscsi target ids per host'), + help='The maximum number of iscsi target ids per host'), cfg.StrOpt('iscsi_target_prefix', default='iqn.2010-10.org.openstack:', help='prefix for iscsi volumes'), cfg.StrOpt('iscsi_ip_address', default='$my_ip', - help='use this ip for iscsi'), + help='The IP address that the iSCSI daemon is listening on'), cfg.IntOpt('iscsi_port', default=3260, help='The port that the iSCSI daemon is listening on'), - cfg.StrOpt('rbd_pool', - default='rbd', - help='the RADOS pool in which rbd volumes are stored'), - cfg.StrOpt('rbd_user', - default=None, - help='the RADOS client name for accessing rbd volumes'), - cfg.StrOpt('rbd_secret_uuid', + cfg.IntOpt('num_volume_device_scan_tries', + deprecated_name='num_iscsi_scan_tries', + default=3, + help='The maximum number of times to rescan targets' + ' to find volume'), + cfg.StrOpt('volume_backend_name', default=None, - help='the libvirt uuid of the secret for the rbd_user' + help='The backend name for a given driver implementation'), + cfg.BoolOpt('use_multipath_for_image_xfer', + default=False, + help='Do we attach/detach volumes in cinder using multipath ' + 'for volume to image and image to volume transfers?'), + cfg.StrOpt('volume_clear', + default='zero', + help='Method used to wipe old voumes (valid options are: ' + 'none, zero, shred)'), + cfg.IntOpt('volume_clear_size', + default=0, + help='Size in MiB to wipe at start of old volumes. 0 => all'), + cfg.StrOpt('iscsi_helper', + default='tgtadm', + help='iscsi target user-land tool to use'), + cfg.StrOpt('volumes_dir', + default='$state_path/volumes', + help='Volume configuration file storage ' + 'directory'), + cfg.StrOpt('iet_conf', + default='/etc/iet/ietd.conf', + help='IET configuration file'), + cfg.StrOpt('lio_initiator_iqns', + default='', + help=('Comma-separated list of initiator IQNs ' + 'allowed to connect to the ' + 'iSCSI target. (From Nova compute nodes.)')), + cfg.StrOpt('iscsi_iotype', + default='fileio', + help=('Sets the behavior of the iSCSI target ' + 'to either perform blockio or fileio ' + 'optionally, auto can be set and Cinder ' + 'will autodetect type of backing device')), + cfg.StrOpt('volume_dd_blocksize', + default='1M', + help='The default block size used when copying/clearing ' 'volumes'), - cfg.StrOpt('volume_tmp_dir', - default=None, - help='where to store temporary image files if the volume ' - 'driver does not write them directly to the volume'), - ] +] + +# for backward compatibility +iser_opts = [ + cfg.IntOpt('num_iser_scan_tries', + default=3, + help='The maximum number of times to rescan iSER target' + 'to find volume'), + cfg.IntOpt('iser_num_targets', + default=100, + help='The maximum number of iser target ids per host'), + cfg.StrOpt('iser_target_prefix', + default='iqn.2010-10.org.iser.openstack:', + help='prefix for iser volumes'), + cfg.StrOpt('iser_ip_address', + default='$my_ip', + help='The IP address that the iSER daemon is listening on'), + cfg.IntOpt('iser_port', + default=3260, + help='The port that the iSER daemon is listening on'), + cfg.StrOpt('iser_helper', + default='tgtadm', + help='iser target user-land tool to use'), +] -FLAGS = flags.FLAGS -FLAGS.register_opts(volume_opts) + +CONF = cfg.CONF +CONF.register_opts(volume_opts) +CONF.register_opts(iser_opts) class VolumeDriver(object): """Executes commands relating to Volumes.""" + + VERSION = "N/A" + def __init__(self, execute=utils.execute, *args, **kwargs): # NOTE(vish): db is set by Manager - self.db = None + self.db = kwargs.get('db') + self.configuration = kwargs.get('configuration', None) + if self.configuration: + self.configuration.append_config_values(volume_opts) self.set_execute(execute) + self._stats = {} + + # set True by manager after successful check_for_setup + self._initialized = False def set_execute(self, execute): self._execute = execute + def set_initialized(self): + self._initialized = True + + @property + def initialized(self): + return self._initialized + + def get_version(self): + """Get the current version of this driver.""" + return self.VERSION + + def _is_non_recoverable(self, err, non_recoverable_list): + for item in non_recoverable_list: + if item in err: + return True + + return False + def _try_execute(self, *command, **kwargs): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. + + non_recoverable = kwargs.pop('no_retry_list', []) + tries = 0 while True: try: self._execute(*command, **kwargs) return True - except exception.ProcessExecutionError: + except processutils.ProcessExecutionError as ex: tries = tries + 1 - if tries >= FLAGS.num_shell_tries: + + if tries >= self.configuration.num_shell_tries or\ + self._is_non_recoverable(ex.stderr, non_recoverable): raise + LOG.exception(_("Recovering from a failed execute. " "Try number %s"), tries) time.sleep(tries ** 2) def check_for_setup_error(self): - """Returns an error if prerequisites aren't met""" - out, err = self._execute('vgs', '--noheadings', '-o', 'name', - run_as_root=True) - volume_groups = out.split() - if not FLAGS.volume_group in volume_groups: - exception_message = (_("volume group %s doesn't exist") - % FLAGS.volume_group) - raise exception.VolumeBackendAPIException(data=exception_message) - - def _create_volume(self, volume_name, sizestr): - self._try_execute('lvcreate', '-L', sizestr, '-n', - volume_name, FLAGS.volume_group, run_as_root=True) - - def _copy_volume(self, srcstr, deststr, size_in_g): - # Use O_DIRECT to avoid thrashing the system buffer cache - direct_flags = ('iflag=direct', 'oflag=direct') - - # Check whether O_DIRECT is supported - try: - self._execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr, - *direct_flags, run_as_root=True) - except exception.ProcessExecutionError: - direct_flags = () - - # Perform the copy - self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr, - 'count=%d' % (size_in_g * 1024), 'bs=1M', - *direct_flags, run_as_root=True) - - def _volume_not_present(self, volume_name): - path_name = '%s/%s' % (FLAGS.volume_group, volume_name) - try: - self._try_execute('lvdisplay', path_name, run_as_root=True) - except Exception as e: - # If the volume isn't present - return True - return False - - def _delete_volume(self, volume, size_in_g): - """Deletes a logical volume.""" - # zero out old volumes to prevent data leaking between users - # TODO(ja): reclaiming space should be done lazy and low priority - self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) - dev_path = self.local_path(volume) - if os.path.exists(dev_path): - self._try_execute('dmsetup', 'remove', '-f', dev_path, - run_as_root=True) - self._try_execute('lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, - self._escape_snapshot(volume['name'])), - run_as_root=True) - - def _sizestr(self, size_in_g): - if int(size_in_g) == 0: - return '100M' - return '%sG' % size_in_g - - # Linux LVM reserves name that starts with snapshot, so that - # such volume name can't be created. Mangle it. - def _escape_snapshot(self, snapshot_name): - if not snapshot_name.startswith('snapshot'): - return snapshot_name - return '_' + snapshot_name + raise NotImplementedError() def create_volume(self, volume): - """Creates a logical volume. Can optionally return a Dictionary of - changes to the volume object to be persisted.""" - self._create_volume(volume['name'], self._sizestr(volume['size'])) + """Creates a volume. Can optionally return a Dictionary of + changes to the volume object to be persisted. + """ + raise NotImplementedError() def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" - self._create_volume(volume['name'], self._sizestr(volume['size'])) - self._copy_volume(self.local_path(snapshot), self.local_path(volume), - snapshot['volume_size']) + raise NotImplementedError() + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + raise NotImplementedError() def delete_volume(self, volume): - """Deletes a logical volume.""" - if self._volume_not_present(volume['name']): - # If the volume isn't present, then don't attempt to delete - return True - - # TODO(yamahata): lvm can't delete origin volume only without - # deleting derived snapshots. Can we do something fancy? - out, err = self._execute('lvdisplay', '--noheading', - '-C', '-o', 'Attr', - '%s/%s' % (FLAGS.volume_group, - volume['name']), - run_as_root=True) - # fake_execute returns None resulting unit test error - if out: - out = out.strip() - if (out[0] == 'o') or (out[0] == 'O'): - raise exception.VolumeIsBusy(volume_name=volume['name']) - - self._delete_volume(volume, volume['size']) + """Deletes a volume.""" + raise NotImplementedError() def create_snapshot(self, snapshot): """Creates a snapshot.""" - orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name']) - self._try_execute('lvcreate', '-L', - self._sizestr(snapshot['volume_size']), - '--name', self._escape_snapshot(snapshot['name']), - '--snapshot', orig_lv_name, run_as_root=True) + raise NotImplementedError() def delete_snapshot(self, snapshot): """Deletes a snapshot.""" - if self._volume_not_present(self._escape_snapshot(snapshot['name'])): - # If the snapshot isn't present, then don't attempt to delete - return True - - # TODO(yamahata): zeroing out the whole snapshot triggers COW. - # it's quite slow. - self._delete_volume(snapshot, snapshot['volume_size']) + raise NotImplementedError() def local_path(self, volume): - # NOTE(vish): stops deprecation warning - escaped_group = FLAGS.volume_group.replace('-', '--') - escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') - return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) + raise NotImplementedError() def ensure_export(self, context, volume): - """Synchronously recreates an export for a logical volume.""" + """Synchronously recreates an export for a volume.""" raise NotImplementedError() def create_export(self, context, volume): """Exports the volume. Can optionally return a Dictionary of changes - to the volume object to be persisted.""" + to the volume object to be persisted. + """ raise NotImplementedError() def remove_export(self, context, volume): - """Removes an export for a logical volume.""" + """Removes an export for a volume.""" raise NotImplementedError() def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" raise NotImplementedError() - def terminate_connection(self, volume, connector): + def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector""" raise NotImplementedError() - def attach_volume(self, context, volume_id, instance_uuid, mountpoint): - """ Callback for volume attached to instance.""" + def attach_volume(self, context, volume, instance_uuid, host_name, + mountpoint): + """Callback for volume attached to instance or host.""" pass - def detach_volume(self, context, volume_id): - """ Callback for volume detached.""" + def detach_volume(self, context, volume): + """Callback for volume detached.""" pass def get_volume_stats(self, refresh=False): """Return the current state of the volume service. If 'refresh' is - True, run the update first.""" + True, run the update first. + """ return None def do_setup(self, context): - """Any initialization the volume driver does while starting""" + """Any initialization the volume driver does while starting.""" + pass + + def validate_connector(self, connector): + """Fail if connector doesn't contain all the data needed by driver.""" pass + def _copy_volume_data_cleanup(self, context, volume, properties, + attach_info, remote, force=False): + self._detach_volume(attach_info) + if remote: + rpcapi = volume_rpcapi.VolumeAPI() + rpcapi.terminate_connection(context, volume, properties, + force=force) + else: + self.terminate_connection(volume, properties, force=False) + + def copy_volume_data(self, context, src_vol, dest_vol, remote=None): + """Copy data from src_vol to dest_vol.""" + LOG.debug(_('copy_data_between_volumes %(src)s -> %(dest)s.') + % {'src': src_vol['name'], 'dest': dest_vol['name']}) + + properties = utils.brick_get_connector_properties() + dest_remote = True if remote in ['dest', 'both'] else False + dest_orig_status = dest_vol['status'] + try: + dest_attach_info = self._attach_volume(context, + dest_vol, + properties, + remote=dest_remote) + except Exception: + with excutils.save_and_reraise_exception(): + msg = _("Failed to attach volume %(vol)s") + LOG.error(msg % {'vol': dest_vol['id']}) + self.db.volume_update(context, dest_vol['id'], + {'status': dest_orig_status}) + + src_remote = True if remote in ['src', 'both'] else False + src_orig_status = src_vol['status'] + try: + src_attach_info = self._attach_volume(context, + src_vol, + properties, + remote=src_remote) + except Exception: + with excutils.save_and_reraise_exception(): + msg = _("Failed to attach volume %(vol)s") + LOG.error(msg % {'vol': src_vol['id']}) + self.db.volume_update(context, src_vol['id'], + {'status': src_orig_status}) + self._copy_volume_data_cleanup(context, dest_vol, properties, + dest_attach_info, dest_remote, + force=True) + + try: + size_in_mb = int(src_vol['size']) * 1024 # vol size is in GB + volume_utils.copy_volume( + src_attach_info['device']['path'], + dest_attach_info['device']['path'], + size_in_mb, + self.configuration.volume_dd_blocksize) + copy_error = False + except Exception: + with excutils.save_and_reraise_exception(): + msg = _("Failed to copy volume %(src)s to %(dest)d") + LOG.error(msg % {'src': src_vol['id'], 'dest': dest_vol['id']}) + copy_error = True + finally: + self._copy_volume_data_cleanup(context, dest_vol, properties, + dest_attach_info, dest_remote, + force=copy_error) + self._copy_volume_data_cleanup(context, src_vol, properties, + src_attach_info, src_remote, + force=copy_error) + def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" - raise NotImplementedError() + LOG.debug(_('copy_image_to_volume %s.') % volume['name']) - def copy_volume_to_image(self, context, volume, image_service, image_id): + properties = utils.brick_get_connector_properties() + attach_info = self._attach_volume(context, volume, properties) + + try: + image_utils.fetch_to_raw(context, + image_service, + image_id, + attach_info['device']['path'], + self.configuration.volume_dd_blocksize, + size=volume['size']) + finally: + self._detach_volume(attach_info) + self.terminate_connection(volume, properties) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" - raise NotImplementedError() + LOG.debug(_('copy_volume_to_image %s.') % volume['name']) + + properties = utils.brick_get_connector_properties() + attach_info = self._attach_volume(context, volume, properties) - def clone_image(self, volume, image_location): + try: + image_utils.upload_volume(context, + image_service, + image_meta, + attach_info['device']['path']) + finally: + self._detach_volume(attach_info) + self.terminate_connection(volume, properties) + + def _attach_volume(self, context, volume, properties, remote=False): + """Attach the volume.""" + if remote: + rpcapi = volume_rpcapi.VolumeAPI() + conn = rpcapi.initialize_connection(context, volume, properties) + else: + conn = self.initialize_connection(volume, properties) + + # Use Brick's code to do attach/detach + use_multipath = self.configuration.use_multipath_for_image_xfer + device_scan_attempts = self.configuration.num_volume_device_scan_tries + protocol = conn['driver_volume_type'] + connector = utils.brick_get_connector(protocol, + use_multipath=use_multipath, + device_scan_attempts= + device_scan_attempts, + conn=conn) + device = connector.connect_volume(conn['data']) + host_device = device['path'] + + if not connector.check_valid_device(host_device): + raise exception.DeviceUnavailable(path=host_device, + reason=(_("Unable to access " + "the backend storage " + "via the path " + "%(path)s.") % + {'path': host_device})) + return {'conn': conn, 'device': device, 'connector': connector} + + def _detach_volume(self, attach_info): + """Disconnect the volume from the host.""" + # Use Brick's code to do attach/detach + connector = attach_info['connector'] + connector.disconnect_volume(attach_info['conn']['data'], + attach_info['device']) + + def clone_image(self, volume, image_location, image_id, image_meta): """Create a volume efficiently from an existing image. image_location is a string whose format depends on the image service backend in use. The driver should use it to determine whether cloning is possible. - Returns a boolean indicating whether cloning occurred + image_id is a string which represents id of the image. + It can be used by the driver to introspect internal + stores or registry to do an efficient image clone. + + image_meta is a dictionary that includes 'disk_format' (e.g. + raw, qcow2) and other image attributes that allow drivers to + decide whether they can clone the image without first requiring + conversion. + + Returns a dict of volume properties eg. provider_location, + boolean indicating whether cloning occurred + """ + return None, False + + def backup_volume(self, context, backup, backup_service): + """Create a new backup from an existing volume.""" + volume = self.db.volume_get(context, backup['volume_id']) + + LOG.debug(_('Creating a new backup for volume %s.') % + volume['name']) + + properties = utils.brick_get_connector_properties() + attach_info = self._attach_volume(context, volume, properties) + + try: + volume_path = attach_info['device']['path'] + with utils.temporary_chown(volume_path): + with fileutils.file_open(volume_path) as volume_file: + backup_service.backup(backup, volume_file) + + finally: + self._detach_volume(attach_info) + self.terminate_connection(volume, properties) + + def restore_backup(self, context, backup, volume, backup_service): + """Restore an existing backup to a new or existing volume.""" + LOG.debug(_('Restoring backup %(backup)s to ' + 'volume %(volume)s.') % + {'backup': backup['id'], + 'volume': volume['name']}) + + properties = utils.brick_get_connector_properties() + attach_info = self._attach_volume(context, volume, properties) + + try: + volume_path = attach_info['device']['path'] + with utils.temporary_chown(volume_path): + with fileutils.file_open(volume_path, 'wb') as volume_file: + backup_service.restore(backup, volume['id'], volume_file) + + finally: + self._detach_volume(attach_info) + self.terminate_connection(volume, properties) + + def clear_download(self, context, volume): + """Clean up after an interrupted image copy.""" + pass + + def extend_volume(self, volume, new_size): + msg = _("Extend volume not implemented") + raise NotImplementedError(msg) + + def migrate_volume(self, context, volume, host): + """Migrate the volume to the specified host. + + Returns a boolean indicating whether the migration occurred, as well as + model_update. + + :param ctxt: Context + :param volume: A dictionary describing the volume to migrate + :param host: A dictionary describing the host to migrate to, where + host['host'] is its name, and host['capabilities'] is a + dictionary of its reported capabilities. + """ + return (False, None) + + def retype(self, context, volume, new_type, diff, host): + """Convert the volume to be of the new type. + + Returns a boolean indicating whether the retype occurred. + + :param ctxt: Context + :param volume: A dictionary describing the volume to migrate + :param new_type: A dictionary describing the volume type to convert to + :param diff: A dictionary with the difference between the two types + :param host: A dictionary describing the host to migrate to, where + host['host'] is its name, and host['capabilities'] is a + dictionary of its reported capabilities. """ return False + def accept_transfer(self, context, volume, new_user, new_project): + """Accept the transfer of a volume for a new user/project.""" + pass + class ISCSIDriver(VolumeDriver): """Executes commands relating to ISCSI volumes. @@ -300,118 +525,8 @@ class ISCSIDriver(VolumeDriver): """ def __init__(self, *args, **kwargs): - self.tgtadm = iscsi.get_target_admin() super(ISCSIDriver, self).__init__(*args, **kwargs) - def set_execute(self, execute): - super(ISCSIDriver, self).set_execute(execute) - self.tgtadm.set_execute(execute) - - def ensure_export(self, context, volume): - """Synchronously recreates an export for a logical volume.""" - # NOTE(jdg): tgtadm doesn't use the iscsi_targets table - # TODO(jdg): In the future move all of the dependent stuff into the - # cooresponding target admin class - if not isinstance(self.tgtadm, iscsi.TgtAdm): - try: - iscsi_target = self.db.volume_get_iscsi_target_num(context, - volume['id']) - except exception.NotFound: - LOG.info(_("Skipping ensure_export. No iscsi_target " - "provisioned for volume: %s"), volume['id']) - return - else: - iscsi_target = 1 # dummy value when using TgtAdm - - iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) - volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) - - # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need - # should clean this all up at some point in the future - self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target, - 0, volume_path, - check_exit_code=False) - - def _ensure_iscsi_targets(self, context, host): - """Ensure that target ids have been created in datastore.""" - # NOTE(jdg): tgtadm doesn't use the iscsi_targets table - # TODO(jdg): In the future move all of the dependent stuff into the - # cooresponding target admin class - if not isinstance(self.tgtadm, iscsi.TgtAdm): - host_iscsi_targets = self.db.iscsi_target_count_by_host(context, - host) - if host_iscsi_targets >= FLAGS.iscsi_num_targets: - return - - # NOTE(vish): Target ids start at 1, not 0. - for target_num in xrange(1, FLAGS.iscsi_num_targets + 1): - target = {'host': host, 'target_num': target_num} - self.db.iscsi_target_create_safe(context, target) - - def create_export(self, context, volume): - """Creates an export for a logical volume.""" - #BOOKMARK(jdg) - - iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) - volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) - model_update = {} - - # TODO(jdg): In the future move all of the dependent stuff into the - # cooresponding target admin class - if not isinstance(self.tgtadm, iscsi.TgtAdm): - lun = 0 - self._ensure_iscsi_targets(context, volume['host']) - iscsi_target = self.db.volume_allocate_iscsi_target(context, - volume['id'], - volume['host']) - else: - lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1 - iscsi_target = 0 # NOTE(jdg): Not used by tgtadm - - # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need - # should clean this all up at some point in the future - tid = self.tgtadm.create_iscsi_target(iscsi_name, - iscsi_target, - 0, - volume_path) - model_update['provider_location'] = _iscsi_location( - FLAGS.iscsi_ip_address, tid, iscsi_name, lun) - return model_update - - def remove_export(self, context, volume): - """Removes an export for a logical volume.""" - # NOTE(jdg): tgtadm doesn't use the iscsi_targets table - # TODO(jdg): In the future move all of the dependent stuff into the - # cooresponding target admin class - if not isinstance(self.tgtadm, iscsi.TgtAdm): - try: - iscsi_target = self.db.volume_get_iscsi_target_num(context, - volume['id']) - except exception.NotFound: - LOG.info(_("Skipping remove_export. No iscsi_target " - "provisioned for volume: %s"), volume['id']) - return - else: - iscsi_target = 0 - - try: - - # NOTE: provider_location may be unset if the volume hasn't - # been exported - location = volume['provider_location'].split(' ') - iqn = location[1] - - # ietadm show will exit with an error - # this export has already been removed - self.tgtadm.show_target(iscsi_target, iqn=iqn) - - except Exception as e: - LOG.info(_("Skipping remove_export. No iscsi_target " - "is presently exported for volume: %s"), volume['id']) - return - - self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id']) - def _do_iscsi_discovery(self, volume): #TODO(justinsb): Deprecate discovery and use stored info #NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) @@ -419,11 +534,23 @@ def _do_iscsi_discovery(self, volume): volume_name = volume['name'] - (out, _err) = self._execute('iscsiadm', '-m', 'discovery', - '-t', 'sendtargets', '-p', volume['host'], - run_as_root=True) + try: + # NOTE(griff) We're doing the split straight away which should be + # safe since using '@' in hostname is considered invalid + + (out, _err) = self._execute('iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', + volume['host'].split('@')[0], + run_as_root=True) + except processutils.ProcessExecutionError as ex: + LOG.error(_("ISCSI discovery attempt failed for:%s") % + volume['host'].split('@')[0]) + LOG.debug(_("Error from iscsiadm -m discovery: %s") % ex.stderr) + return None + for target in out.splitlines(): - if FLAGS.iscsi_ip_address in target and volume_name in target: + if (self.configuration.iscsi_ip_address in target + and volume_name in target): return target return None @@ -449,6 +576,9 @@ def _get_iscsi_properties(self, volume): the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. + + :access_mode: the volume access mode allow client used + ('rw' or 'ro' currently supported) """ properties = {} @@ -462,9 +592,9 @@ def _get_iscsi_properties(self, volume): location = self._do_iscsi_discovery(volume) if not location: - raise exception.InvalidVolume(_("Could not find iSCSI export " - " for volume %s") % - (volume['name'])) + msg = (_("Could not find iSCSI export for volume %s") % + (volume['name'])) + raise exception.InvalidVolume(reason=msg) LOG.debug(_("ISCSI Discovery: Found %s") % (location)) properties['target_discovered'] = True @@ -475,7 +605,10 @@ def _get_iscsi_properties(self, volume): try: properties['target_lun'] = int(results[2]) except (IndexError, ValueError): - if FLAGS.iscsi_helper == 'tgtadm': + if (self.configuration.volume_driver in + ['cinder.volume.drivers.lvm.LVMISCSIDriver', + 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver'] and + self.configuration.iscsi_helper == 'tgtadm'): properties['target_lun'] = 1 else: properties['target_lun'] = 0 @@ -490,21 +623,43 @@ def _get_iscsi_properties(self, volume): properties['auth_username'] = auth_username properties['auth_password'] = auth_secret + geometry = volume.get('provider_geometry', None) + if geometry: + (physical_block_size, logical_block_size) = geometry.split() + properties['physical_block_size'] = physical_block_size + properties['logical_block_size'] = logical_block_size + + encryption_key_id = volume.get('encryption_key_id', None) + properties['encrypted'] = encryption_key_id is not None + return properties - def _run_iscsiadm(self, iscsi_properties, iscsi_command): + def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs): + check_exit_code = kwargs.pop('check_exit_code', 0) (out, err) = self._execute('iscsiadm', '-m', 'node', '-T', iscsi_properties['target_iqn'], '-p', iscsi_properties['target_portal'], - *iscsi_command, run_as_root=True) + *iscsi_command, run_as_root=True, + check_exit_code=check_exit_code) LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % (iscsi_command, out, err)) return (out, err) - def _iscsiadm_update(self, iscsi_properties, property_key, property_value): + def _run_iscsiadm_bare(self, iscsi_command, **kwargs): + check_exit_code = kwargs.pop('check_exit_code', 0) + (out, err) = self._execute('iscsiadm', + *iscsi_command, + run_as_root=True, + check_exit_code=check_exit_code) + LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % + (iscsi_command, out, err)) + return (out, err) + + def _iscsiadm_update(self, iscsi_properties, property_key, property_value, + **kwargs): iscsi_command = ('--op', 'update', '-n', property_key, '-v', property_value) - return self._run_iscsiadm(iscsi_properties, iscsi_command) + return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs) def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. @@ -520,33 +675,85 @@ def initialize_connection(self, volume, connector): 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': 1, + 'access_mode': 'rw' } } """ + if CONF.iscsi_helper == 'lioadm': + self.tgtadm.initialize_connection(volume, connector) + iscsi_properties = self._get_iscsi_properties(volume) return { 'driver_volume_type': 'iscsi', 'data': iscsi_properties } - def terminate_connection(self, volume, connector): + def validate_connector(self, connector): + # iSCSI drivers require the initiator information + if 'initiator' not in connector: + err_msg = (_('The volume driver requires the iSCSI initiator ' + 'name in the connector.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def terminate_connection(self, volume, connector, **kwargs): pass - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - volume_path = self.local_path(volume) - with utils.temporary_chown(volume_path): - with utils.file_open(volume_path, "wb") as image_file: - image_service.download(context, image_id, image_file) + def _get_iscsi_initiator(self): + """Get iscsi initiator name for this machine.""" + # NOTE openiscsi stores initiator name in a file that + # needs root permission to read. + contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi') + for l in contents.split('\n'): + if l.startswith('InitiatorName='): + return l[l.index('=') + 1:].strip() - def copy_volume_to_image(self, context, volume, image_service, image_id): - """Copy the volume to the specified image.""" - volume_path = self.local_path(volume) - with utils.temporary_chown(volume_path): - with utils.file_open(volume_path) as volume_file: - image_service.update(context, image_id, {}, volume_file) + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + + LOG.debug(_("Updating volume stats")) + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'Generic_iSCSI' + data["vendor_name"] = 'Open Source' + data["driver_version"] = '1.0' + data["storage_protocol"] = 'iSCSI' + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 100 + data['QoS_support'] = False + self._stats = data + + def get_target_admin(self): + root_helper = utils.get_root_helper() + if CONF.iscsi_helper == 'iseradm': + return iscsi.ISERTgtAdm(root_helper, CONF.volumes_dir, + CONF.iscsi_target_prefix) + elif CONF.iscsi_helper == 'tgtadm': + return iscsi.TgtAdm(root_helper, + CONF.volumes_dir, + CONF.iscsi_target_prefix) + elif CONF.iscsi_helper == 'fake': + return iscsi.FakeIscsiHelper() + elif CONF.iscsi_helper == 'lioadm': + return iscsi.LioAdm(root_helper, + CONF.lio_initiator_iqns, + CONF.iscsi_target_prefix) + else: + return iscsi.IetAdm(root_helper, CONF.iet_conf, CONF.iscsi_iotype) class FakeISCSIDriver(ISCSIDriver): @@ -555,6 +762,9 @@ def __init__(self, *args, **kwargs): super(FakeISCSIDriver, self).__init__(execute=self.fake_execute, *args, **kwargs) + def create_volume(self, volume): + pass + def check_for_setup_error(self): """No setup necessary in fake mode.""" pass @@ -562,10 +772,10 @@ def check_for_setup_error(self): def initialize_connection(self, volume, connector): return { 'driver_volume_type': 'iscsi', - 'data': {} + 'data': {'access_mode': 'rw'} } - def terminate_connection(self, volume, connector): + def terminate_connection(self, volume, connector, **kwargs): pass @staticmethod @@ -575,336 +785,141 @@ def fake_execute(cmd, *_args, **_kwargs): return (None, None) -class RBDDriver(VolumeDriver): - """Implements RADOS block device (RBD) volume commands""" - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met""" - (stdout, stderr) = self._execute('rados', 'lspools') - pools = stdout.split("\n") - if not FLAGS.rbd_pool in pools: - exception_message = (_("rbd has no pool %s") % - FLAGS.rbd_pool) - raise exception.VolumeBackendAPIException(data=exception_message) - - def _supports_layering(self): - stdout, _ = self._execute('rbd', '--help') - return 'clone' in stdout - - def create_volume(self, volume): - """Creates a logical volume.""" - if int(volume['size']) == 0: - size = 100 - else: - size = int(volume['size']) * 1024 - args = ['rbd', 'create', - '--pool', FLAGS.rbd_pool, - '--size', size, - volume['name']] - if self._supports_layering(): - args += ['--new-format'] - self._try_execute(*args) - - def _clone(self, volume, src_pool, src_image, src_snap): - self._try_execute('rbd', 'clone', - '--pool', src_pool, - '--image', src_image, - '--snap', src_snap, - '--dest-pool', FLAGS.rbd_pool, - '--dest', volume['name']) - - def _resize(self, volume): - size = int(volume['size']) * 1024 - self._try_execute('rbd', 'resize', - '--pool', FLAGS.rbd_pool, - '--image', volume['name'], - '--size', size) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - self._clone(volume, FLAGS.rbd_pool, - snapshot['volume_name'], snapshot['name']) - if int(volume['size']): - self._resize(volume) +class ISERDriver(ISCSIDriver): + """Executes commands relating to ISER volumes. - def delete_volume(self, volume): - """Deletes a logical volume.""" - stdout, _ = self._execute('rbd', 'snap', 'ls', - '--pool', FLAGS.rbd_pool, - volume['name']) - if stdout.count('\n') > 1: - raise exception.VolumeIsBusy(volume_name=volume['name']) - self._try_execute('rbd', 'rm', - '--pool', FLAGS.rbd_pool, - volume['name']) + We make use of model provider properties as follows: - def create_snapshot(self, snapshot): - """Creates an rbd snapshot""" - self._try_execute('rbd', 'snap', 'create', - '--pool', FLAGS.rbd_pool, - '--snap', snapshot['name'], - snapshot['volume_name']) - if self._supports_layering(): - self._try_execute('rbd', 'snap', 'protect', - '--pool', FLAGS.rbd_pool, - '--snap', snapshot['name'], - snapshot['volume_name']) + ``provider_location`` + if present, contains the iSER target information in the same + format as an ietadm discovery + i.e. ':, ' - def delete_snapshot(self, snapshot): - """Deletes an rbd snapshot""" - if self._supports_layering(): - try: - self._try_execute('rbd', 'snap', 'unprotect', - '--pool', FLAGS.rbd_pool, - '--snap', snapshot['name'], - snapshot['volume_name']) - except exception.ProcessExecutionError: - raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) - self._try_execute('rbd', 'snap', 'rm', - '--pool', FLAGS.rbd_pool, - '--snap', snapshot['name'], - snapshot['volume_name']) + ``provider_auth`` + if present, contains a space-separated triple: + ' '. + `CHAP` is the only auth_method in use at the moment. + """ + def __init__(self, *args, **kwargs): + super(ISERDriver, self).__init__(*args, **kwargs) + # for backward compatibility + self.configuration.num_iscsi_scan_tries = \ + self.configuration.num_iser_scan_tries + self.configuration.iscsi_num_targets = \ + self.configuration.iser_num_targets + self.configuration.iscsi_target_prefix = \ + self.configuration.iser_target_prefix + self.configuration.iscsi_ip_address = \ + self.configuration.iser_ip_address + self.configuration.iser_port = self.configuration.iser_port - def local_path(self, volume): - """Returns the path of the rbd volume.""" - # This is the same as the remote path - # since qemu accesses it directly. - return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name']) + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info. - def ensure_export(self, context, volume): - """Synchronously recreates an export for a logical volume.""" - pass + The iser driver returns a driver_volume_type of 'iser'. + The format of the driver data is defined in _get_iser_properties. + Example return value:: - def create_export(self, context, volume): - """Exports the volume""" - pass + { + 'driver_volume_type': 'iser' + 'data': { + 'target_discovered': True, + 'target_iqn': + 'iqn.2010-10.org.iser.openstack:volume-00000001', + 'target_portal': '127.0.0.0.1:3260', + 'volume_id': 1, + } + } - def remove_export(self, context, volume): - """Removes an export for a logical volume""" - pass + """ - def initialize_connection(self, volume, connector): + iser_properties = self._get_iscsi_properties(volume) return { - 'driver_volume_type': 'rbd', - 'data': { - 'name': '%s/%s' % (FLAGS.rbd_pool, volume['name']), - 'auth_enabled': FLAGS.rbd_secret_uuid is not None, - 'auth_username': FLAGS.rbd_user, - 'secret_type': 'ceph', - 'secret_uuid': FLAGS.rbd_secret_uuid, - } + 'driver_volume_type': 'iser', + 'data': iser_properties } - def terminate_connection(self, volume, connector): - pass + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" - def _parse_location(self, location): - prefix = 'rbd://' - if not location.startswith(prefix): - reason = _('Image %s is not stored in rbd') % location - raise exception.ImageUnacceptable(reason) - pieces = map(urllib.unquote, location[len(prefix):].split('/')) - if any(map(lambda p: p == '', pieces)): - reason = _('Image %s has blank components') % location - raise exception.ImageUnacceptable(reason) - if len(pieces) != 4: - reason = _('Image %s is not an rbd snapshot') % location - raise exception.ImageUnacceptable(reason) - return pieces - - def _get_fsid(self): - stdout, _ = self._execute('ceph', 'fsid') - return stdout.rstrip('\n') - - def _is_cloneable(self, image_location): - try: - fsid, pool, image, snapshot = self._parse_location(image_location) - except exception.ImageUnacceptable: - return False + LOG.debug(_("Updating volume stats")) + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'Generic_iSER' + data["vendor_name"] = 'Open Source' + data["driver_version"] = '1.0' + data["storage_protocol"] = 'iSER' - if self._get_fsid() != fsid: - reason = _('%s is in a different ceph cluster') % image_location - LOG.debug(reason) - return False + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 100 + data['QoS_support'] = False + self._stats = data - # check that we can read the image - try: - self._execute('rbd', 'info', - '--pool', pool, - '--image', image, - '--snap', snapshot) - except exception.ProcessExecutionError: - LOG.debug(_('Unable to read image %s') % image_location) - return False - - return True - - def clone_image(self, volume, image_location): - if image_location is None or not self._is_cloneable(image_location): - return False - _, pool, image, snapshot = self._parse_location(image_location) - self._clone(volume, pool, image, snapshot) - self._resize(volume) - return True - - def copy_image_to_volume(self, context, volume, image_service, image_id): - # TODO(jdurgin): replace with librbd - # this is a temporary hack, since rewriting this driver - # to use librbd would take too long - if FLAGS.volume_tmp_dir and not os.exists(FLAGS.volume_tmp_dir): - os.makedirs(FLAGS.volume_tmp_dir) - - with tempfile.NamedTemporaryFile(dir=FLAGS.volume_tmp_dir) as tmp: - image_service.download(context, image_id, tmp) - # import creates the image, so we must remove it first - self._try_execute('rbd', 'rm', - '--pool', FLAGS.rbd_pool, - volume['name']) - self._try_execute('rbd', 'import', - '--pool', FLAGS.rbd_pool, - tmp.name, volume['name']) - - -class SheepdogDriver(VolumeDriver): - """Executes commands relating to Sheepdog Volumes""" - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met""" - try: - #NOTE(francois-charlier) Since 0.24 'collie cluster info -r' - # gives short output, but for compatibility reason we won't - # use it and just check if 'running' is in the output. - (out, err) = self._execute('collie', 'cluster', 'info') - if not 'running' in out.split(): - exception_message = (_("Sheepdog is not working: %s") % out) - raise exception.VolumeBackendAPIException( - data=exception_message) - - except exception.ProcessExecutionError: - exception_message = _("Sheepdog is not working") - raise exception.VolumeBackendAPIException(data=exception_message) - - def create_volume(self, volume): - """Creates a sheepdog volume""" - self._try_execute('qemu-img', 'create', - "sheepdog:%s" % volume['name'], - self._sizestr(volume['size'])) + def get_target_admin(self): + root_helper = utils.get_root_helper() - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a sheepdog volume from a snapshot.""" - self._try_execute('qemu-img', 'create', '-b', - "sheepdog:%s:%s" % (snapshot['volume_name'], - snapshot['name']), - "sheepdog:%s" % volume['name']) - - def delete_volume(self, volume): - """Deletes a logical volume""" - self._try_execute('collie', 'vdi', 'delete', volume['name']) - - def create_snapshot(self, snapshot): - """Creates a sheepdog snapshot""" - self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'], - "sheepdog:%s" % snapshot['volume_name']) - - def delete_snapshot(self, snapshot): - """Deletes a sheepdog snapshot""" - self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'], - '-s', snapshot['name']) - - def local_path(self, volume): - return "sheepdog:%s" % volume['name'] - - def ensure_export(self, context, volume): - """Safely and synchronously recreates an export for a logical volume""" - pass + if CONF.iser_helper == 'fake': + return iscsi.FakeIscsiHelper() + else: + return iscsi.ISERTgtAdm(root_helper, + CONF.volumes_dir) - def create_export(self, context, volume): - """Exports the volume""" - pass - def remove_export(self, context, volume): - """Removes an export for a logical volume""" - pass +class FakeISERDriver(FakeISCSIDriver): + """Logs calls instead of executing.""" + def __init__(self, *args, **kwargs): + super(FakeISERDriver, self).__init__(execute=self.fake_execute, + *args, **kwargs) def initialize_connection(self, volume, connector): return { - 'driver_volume_type': 'sheepdog', - 'data': { - 'name': volume['name'] - } + 'driver_volume_type': 'iser', + 'data': {} } - def terminate_connection(self, volume, connector): - pass - - -class LoggingVolumeDriver(VolumeDriver): - """Logs and records calls, for unit tests.""" - - def check_for_setup_error(self): - pass - - def create_volume(self, volume): - self.log_action('create_volume', volume) - - def delete_volume(self, volume): - self.log_action('delete_volume', volume) - - def local_path(self, volume): - print "local_path not implemented" - raise NotImplementedError() - - def ensure_export(self, context, volume): - self.log_action('ensure_export', volume) + @staticmethod + def fake_execute(cmd, *_args, **_kwargs): + """Execute that simply logs the command.""" + LOG.debug(_("FAKE ISER: %s"), cmd) + return (None, None) - def create_export(self, context, volume): - self.log_action('create_export', volume) - def remove_export(self, context, volume): - self.log_action('remove_export', volume) +class FibreChannelDriver(VolumeDriver): + """Executes commands relating to Fibre Channel volumes.""" + def __init__(self, *args, **kwargs): + super(FibreChannelDriver, self).__init__(*args, **kwargs) def initialize_connection(self, volume, connector): - self.log_action('initialize_connection', volume) - - def terminate_connection(self, volume, connector): - self.log_action('terminate_connection', volume) + """Initializes the connection and returns connection info. - _LOGS = [] + The driver returns a driver_volume_type of 'fibre_channel'. + The target_wwn can be a single entry or a list of wwns that + correspond to the list of remote wwn(s) that will export the volume. + Example return values: - @staticmethod - def clear_logs(): - LoggingVolumeDriver._LOGS = [] + { + 'driver_volume_type': 'fibre_channel' + 'data': { + 'target_discovered': True, + 'target_lun': 1, + 'target_wwn': '1234567890123', + 'access_mode': 'rw' + } + } - @staticmethod - def log_action(action, parameters): - """Logs the command.""" - LOG.debug(_("LoggingVolumeDriver: %s") % (action)) - log_dictionary = {} - if parameters: - log_dictionary = dict(parameters) - log_dictionary['action'] = action - LOG.debug(_("LoggingVolumeDriver: %s") % (log_dictionary)) - LoggingVolumeDriver._LOGS.append(log_dictionary) + or - @staticmethod - def all_logs(): - return LoggingVolumeDriver._LOGS + { + 'driver_volume_type': 'fibre_channel' + 'data': { + 'target_discovered': True, + 'target_lun': 1, + 'target_wwn': ['1234567890123', '0987654321321'], + 'access_mode': 'rw' + } + } - @staticmethod - def logs_like(action, **kwargs): - matches = [] - for entry in LoggingVolumeDriver._LOGS: - if entry['action'] != action: - continue - match = True - for k, v in kwargs.iteritems(): - if entry.get(k) != v: - match = False - break - if match: - matches.append(entry) - return matches - - -def _iscsi_location(ip, target, iqn, lun=None): - return "%s:%s,%s %s %s" % (ip, FLAGS.iscsi_port, target, iqn, lun) + """ + msg = _("Driver must implement initialize_connection") + raise NotImplementedError(msg) diff --git a/cinder/volume/drivers/__init__.py b/cinder/volume/drivers/__init__.py new file mode 100644 index 0000000000..850085b23b --- /dev/null +++ b/cinder/volume/drivers/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.volume.driver` -- Cinder Drivers +===================================================== + +.. automodule:: cinder.volume.driver + :platform: Unix + :synopsis: Module containing all the Cinder drivers. +""" diff --git a/cinder/volume/drivers/block_device.py b/cinder/volume/drivers/block_device.py new file mode 100644 index 0000000000..0cdfe506ca --- /dev/null +++ b/cinder/volume/drivers/block_device.py @@ -0,0 +1,366 @@ +# Copyright (c) 2013 Mirantis, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo.config import cfg + +from cinder.brick.iscsi import iscsi +from cinder import context +from cinder.db.sqlalchemy import api +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import driver +from cinder.volume import utils as volutils + + +LOG = logging.getLogger(__name__) + +volume_opts = [ + cfg.ListOpt('available_devices', + default=[], + help='List of all available devices'), +] + +CONF = cfg.CONF +CONF.register_opts(volume_opts) + + +class BlockDeviceDriver(driver.ISCSIDriver): + VERSION = '1.0.0' + + def __init__(self, *args, **kwargs): + self.tgtadm = self.get_target_admin() + + super(BlockDeviceDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(volume_opts) + + def set_execute(self, execute): + super(BlockDeviceDriver, self).set_execute(execute) + self.tgtadm.set_execute(execute) + + def check_for_setup_error(self): + pass + + def create_volume(self, volume): + device = self.find_appropriate_size_device(volume['size']) + LOG.info("Create %s on %s" % (volume['name'], device)) + return { + 'provider_location': self._iscsi_location(None, None, None, None, + device), + } + + def initialize_connection(self, volume, connector): + if connector['host'] != volume['host']: + return super(BlockDeviceDriver, self). \ + initialize_connection(volume, connector) + else: + return { + 'driver_volume_type': 'local', + 'data': {'device_path': self.local_path(volume)}, + } + + def terminate_connection(self, volume, connector, **kwargs): + pass + + def create_export(self, context, volume): + """Creates an export for a logical volume.""" + + iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, + volume['name']) + volume_path = self.local_path(volume) + model_update = {} + + # TODO(jdg): In the future move all of the dependent stuff into the + # corresponding target admin class + if not isinstance(self.tgtadm, iscsi.TgtAdm): + lun = 0 + self._ensure_iscsi_targets(context, volume['host']) + iscsi_target = self.db.volume_allocate_iscsi_target(context, + volume['id'], + volume['host']) + else: + lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1 + iscsi_target = 0 # NOTE(jdg): Not used by tgtadm + + # Use the same method to generate the username and the password. + chap_username = utils.generate_username() + chap_password = utils.generate_password() + chap_auth = self._iscsi_authentication('IncomingUser', chap_username, + chap_password) + # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need + # should clean this all up at some point in the future + tid = self.tgtadm.create_iscsi_target(iscsi_name, + iscsi_target, + 0, + volume_path, + chap_auth) + model_update['provider_location'] = self._iscsi_location( + self.configuration.iscsi_ip_address, tid, iscsi_name, lun, + volume_path) + model_update['provider_auth'] = self._iscsi_authentication( + 'CHAP', chap_username, chap_password) + return model_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + # NOTE(jdg): tgtadm doesn't use the iscsi_targets table + # TODO(jdg): In the future move all of the dependent stuff into the + # corresponding target admin class + + if isinstance(self.tgtadm, iscsi.LioAdm): + try: + iscsi_target = self.db.volume_get_iscsi_target_num( + context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) + return + self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'], + volume['name']) + return + elif not isinstance(self.tgtadm, iscsi.TgtAdm): + try: + iscsi_target = self.db.volume_get_iscsi_target_num( + context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) + return + else: + iscsi_target = 0 + try: + # NOTE: provider_location may be unset if the volume hasn't + # been exported + location = volume['provider_location'].split(' ') + iqn = location[1] + # ietadm show will exit with an error + # this export has already been removed + self.tgtadm.show_target(iscsi_target, iqn=iqn) + except Exception: + LOG.info(_("Skipping remove_export. No iscsi_target " + "is presently exported for volume: %s"), volume['id']) + return + self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'], + volume['name']) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume. + :param context: + :param volume: + """ + # NOTE(jdg): tgtadm doesn't use the iscsi_targets table + # TODO(jdg): In the future move all of the dependent stuff into the + # corresponding target admin class + + if isinstance(self.tgtadm, iscsi.LioAdm): + try: + volume_info = self.db.volume_get(context, volume['id']) + (auth_method, + auth_user, + auth_pass) = volume_info['provider_auth'].split(' ', 3) + chap_auth = self._iscsi_authentication(auth_method, + auth_user, + auth_pass) + except exception.NotFound: + LOG.debug("volume_info:", volume_info) + LOG.info(_("Skipping ensure_export. No iscsi_target " + "provision for volume: %s"), volume['id']) + return + iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, + volume['name']) + volume_path = self.local_path(volume) + iscsi_target = 1 + self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target, + 0, volume_path, chap_auth, + check_exit_code=False) + return + if not isinstance(self.tgtadm, iscsi.TgtAdm): + try: + iscsi_target = self.db.volume_get_iscsi_target_num( + context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping ensure_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) + return + else: + iscsi_target = 1 # dummy value when using TgtAdm + + chap_auth = None + + # Check for https://bugs.launchpad.net/cinder/+bug/1065702 + old_name = None + volume_name = volume['name'] + + iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, + volume_name) + volume_path = self.local_path(volume) + + # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need + # should clean this all up at some point in the future + self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target, + 0, volume_path, chap_auth, + check_exit_code=False, + old_name=old_name) + + def _iscsi_location(self, ip, target, iqn, lun=None, device=None): + return "%s:%s,%s %s %s %s" % (ip, self.configuration.iscsi_port, + target, iqn, lun, device) + + def _iscsi_authentication(self, chap, name, password): + return "%s %s %s" % (chap, name, password) + + def _ensure_iscsi_targets(self, context, host): + """Ensure that target ids have been created in datastore.""" + # NOTE(jdg): tgtadm doesn't use the iscsi_targets table + # TODO(jdg): In the future move all of the dependent stuff into the + # corresponding target admin class + if not isinstance(self.tgtadm, iscsi.TgtAdm): + host_iscsi_targets = self.db.iscsi_target_count_by_host(context, + host) + if host_iscsi_targets >= self.configuration.iscsi_num_targets: + return + + # NOTE(vish): Target ids start at 1, not 0. + target_end = self.configuration.iscsi_num_targets + 1 + for target_num in xrange(1, target_end): + target = {'host': host, 'target_num': target_num} + self.db.iscsi_target_create_safe(context, target) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + dev_path = self.local_path(volume) + if not dev_path or dev_path not in \ + self.configuration.available_devices: + return + if os.path.exists(dev_path) and \ + self.configuration.volume_clear != 'none': + volutils.clear_volume( + self._get_device_size(dev_path), dev_path, + volume_clear=self.configuration.volume_clear, + volume_clear_size=self.configuration.volume_clear_size) + + def local_path(self, volume): + if volume['provider_location']: + path = volume['provider_location'].split(" ") + return path[3] + else: + return None + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + image_utils.fetch_to_raw(context, + image_service, + image_id, + self.local_path(volume), + self.configuration.volume_dd_blocksize, + size=volume['size']) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + image_utils.upload_volume(context, + image_service, + image_meta, + self.local_path(volume)) + + def create_cloned_volume(self, volume, src_vref): + LOG.info(_('Creating clone of volume: %s') % src_vref['id']) + device = self.find_appropriate_size_device(src_vref['size']) + volutils.copy_volume( + self.local_path(src_vref), device, + self._get_device_size(device) * 2048, + self.configuration.volume_dd_blocksize, + execute=self._execute) + return { + 'provider_location': self._iscsi_location(None, None, None, None, + device), + } + + def get_volume_stats(self, refresh=False): + if refresh: + self._update_volume_stats() + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + dict_of_devices_sizes = self._devices_sizes() + used_devices = self._get_used_devices() + total_size = 0 + free_size = 0 + for device, size in dict_of_devices_sizes.iteritems(): + if device not in used_devices: + free_size += size + total_size += size + + LOG.debug("Updating volume stats") + backend_name = self.configuration.safe_get('volume_backend_name') + data = {'total_capacity_gb': total_size / 1024, + 'free_capacity_gb': free_size / 1024, + 'reserved_percentage': self.configuration.reserved_percentage, + 'QoS_support': False, + 'volume_backend_name': backend_name or self.__class__.__name__, + 'vendor_name': "Open Source", + 'driver_version': self.VERSION, + 'storage_protocol': 'unknown'} + + self._stats = data + + def _get_used_devices(self): + lst = api.volume_get_all_by_host(context.get_admin_context(), + self.configuration.host) + used_devices = set() + for volume in lst: + local_path = self.local_path(volume) + if local_path: + used_devices.add(local_path) + return used_devices + + def _get_device_size(self, dev_path): + out, err = self._execute('blockdev', '--getsz', dev_path, + run_as_root=True) + size_in_m = int(out) + return size_in_m / 2048 + + def _devices_sizes(self): + available_devices = self.configuration.available_devices + dict_of_devices_sizes = {} + for device in available_devices: + dict_of_devices_sizes[device] = self._get_device_size(device) + return dict_of_devices_sizes + + def find_appropriate_size_device(self, size): + dict_of_devices_sizes = self._devices_sizes() + free_devices = (set(self.configuration.available_devices) - + self._get_used_devices()) + if not free_devices: + raise exception.CinderException(_("No free disk")) + possible_device = None + possible_device_size = None + for device in free_devices: + dev_size = dict_of_devices_sizes[device] + if size * 1024 <= dev_size and (possible_device is None or + dev_size < possible_device_size): + possible_device = device + possible_device_size = dev_size + + if possible_device: + return possible_device + else: + raise exception.CinderException(_("No big enough free disk")) diff --git a/cinder/volume/drivers/coraid.py b/cinder/volume/drivers/coraid.py new file mode 100644 index 0000000000..8bbf0c872e --- /dev/null +++ b/cinder/volume/drivers/coraid.py @@ -0,0 +1,544 @@ +# Copyright 2012 Alyseo. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Desc : Driver to store volumes on Coraid Appliances. +Require : Coraid EtherCloud ESM, Coraid VSX and Coraid SRX. +Author : Jean-Baptiste RANSY +Author : Alex Zasimov +Author : Nikolay Sobolevsky +Contrib : Larry Matter +""" + +import cookielib +import math +import urllib +import urllib2 +import urlparse + +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common import jsonutils +from cinder.openstack.common import lockutils +from cinder.openstack.common import log as logging +from cinder import units +from cinder.volume import driver +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +coraid_opts = [ + cfg.StrOpt('coraid_esm_address', + default='', + help='IP address of Coraid ESM'), + cfg.StrOpt('coraid_user', + default='admin', + help='User name to connect to Coraid ESM'), + cfg.StrOpt('coraid_group', + default='admin', + help='Name of group on Coraid ESM to which coraid_user belongs' + ' (must have admin privilege)'), + cfg.StrOpt('coraid_password', + default='password', + help='Password to connect to Coraid ESM'), + cfg.StrOpt('coraid_repository_key', + default='coraid_repository', + help='Volume Type key name to store ESM Repository Name'), +] + +CONF = cfg.CONF +CONF.register_opts(coraid_opts) + + +ESM_SESSION_EXPIRED_STATES = ['GeneralAdminFailure', + 'passwordInactivityTimeout', + 'passwordAbsoluteTimeout'] + + +class CoraidRESTClient(object): + """Executes REST RPC requests on Coraid ESM EtherCloud Appliance.""" + + def __init__(self, esm_url): + self._check_esm_url(esm_url) + self._esm_url = esm_url + self._cookie_jar = cookielib.CookieJar() + self._url_opener = urllib2.build_opener( + urllib2.HTTPCookieProcessor(self._cookie_jar)) + + def _check_esm_url(self, esm_url): + splitted = urlparse.urlsplit(esm_url) + if splitted.scheme != 'https': + raise ValueError( + _('Invalid ESM url scheme "%s". Supported https only.') % + splitted.scheme) + + @lockutils.synchronized('coraid_rpc', 'cinder-', False) + def rpc(self, handle, url_params, data, allow_empty_response=False): + return self._rpc(handle, url_params, data, allow_empty_response) + + def _rpc(self, handle, url_params, data, allow_empty_response): + """Execute REST RPC using url /handle?url_params. + + Send JSON encoded data in body of POST request. + + Exceptions: + urllib2.URLError + 1. Name or service not found (e.reason is socket.gaierror) + 2. Socket blocking operation timeout (e.reason is + socket.timeout) + 3. Network IO error (e.reason is socket.error) + + urllib2.HTTPError + 1. HTTP 404, HTTP 500 etc. + + CoraidJsonEncodeFailure - bad REST response + """ + # Handle must be simple path, for example: + # /configure + if '?' in handle or '&' in handle: + raise ValueError(_('Invalid REST handle name. Expected path.')) + + # Request url includes base ESM url, handle path and optional + # URL params. + rest_url = urlparse.urljoin(self._esm_url, handle) + encoded_url_params = urllib.urlencode(url_params) + if encoded_url_params: + rest_url += '?' + encoded_url_params + + if data is None: + json_request = None + else: + json_request = jsonutils.dumps(data) + + request = urllib2.Request(rest_url, json_request) + response = self._url_opener.open(request).read() + + try: + if not response and allow_empty_response: + reply = {} + else: + reply = jsonutils.loads(response) + except (TypeError, ValueError) as exc: + msg = (_('Call to json.loads() failed: %(ex)s.' + ' Response: %(resp)s') % + {'ex': exc, 'resp': response}) + raise exception.CoraidJsonEncodeFailure(msg) + + return reply + + +def to_coraid_kb(gb): + return math.ceil(float(gb) * units.GiB / 1000) + + +def coraid_volume_size(gb): + return '{0}K'.format(to_coraid_kb(gb)) + + +class CoraidAppliance(object): + def __init__(self, rest_client, username, password, group): + self._rest_client = rest_client + self._username = username + self._password = password + self._group = group + self._logined = False + + def _login(self): + """Login into ESM. + + Perform login request and return available groups. + + :returns: dict -- map with group_name to group_id + """ + ADMIN_GROUP_PREFIX = 'admin group:' + + url_params = {'op': 'login', + 'username': self._username, + 'password': self._password} + reply = self._rest_client.rpc('admin', url_params, 'Login') + if reply['state'] != 'adminSucceed': + raise exception.CoraidESMBadCredentials() + + # Read groups map from login reply. + groups_map = {} + for group_info in reply.get('values', []): + full_group_name = group_info['fullPath'] + if full_group_name.startswith(ADMIN_GROUP_PREFIX): + group_name = full_group_name[len(ADMIN_GROUP_PREFIX):] + groups_map[group_name] = group_info['groupId'] + + return groups_map + + def _set_effective_group(self, groups_map, group): + """Set effective group. + + Use groups_map returned from _login method. + """ + try: + group_id = groups_map[group] + except KeyError: + raise exception.CoraidESMBadGroup(group_name=group) + + url_params = {'op': 'setRbacGroup', + 'groupId': group_id} + reply = self._rest_client.rpc('admin', url_params, 'Group') + if reply['state'] != 'adminSucceed': + raise exception.CoraidESMBadCredentials() + + self._logined = True + + def _ensure_session(self): + if not self._logined: + groups_map = self._login() + self._set_effective_group(groups_map, self._group) + + def _relogin(self): + self._logined = False + self._ensure_session() + + def rpc(self, handle, url_params, data, allow_empty_response=False): + self._ensure_session() + + relogin_attempts = 3 + # Do action, relogin if needed and repeat action. + while True: + reply = self._rest_client.rpc(handle, url_params, data, + allow_empty_response) + + if self._is_session_expired(reply): + relogin_attempts -= 1 + if relogin_attempts <= 0: + raise exception.CoraidESMReloginFailed() + LOG.debug(_('Session is expired. Relogin on ESM.')) + self._relogin() + else: + return reply + + def _is_session_expired(self, reply): + return ('state' in reply and + reply['state'] in ESM_SESSION_EXPIRED_STATES and + reply['metaCROp'] == 'reboot') + + def _is_bad_config_state(self, reply): + return (not reply or + 'configState' not in reply or + reply['configState'] != 'completedSuccessfully') + + def configure(self, json_request): + reply = self.rpc('configure', {}, json_request) + if self._is_bad_config_state(reply): + # Calculate error message + if not reply: + message = _('Reply is empty.') + else: + message = reply.get('message', _('Error message is empty.')) + raise exception.CoraidESMConfigureError(message=message) + return reply + + def esm_command(self, request): + request['data'] = jsonutils.dumps(request['data']) + return self.configure([request]) + + def get_volume_info(self, volume_name): + """Retrieve volume information for a given volume name.""" + url_params = {'shelf': 'cms', + 'orchStrRepo': '', + 'lv': volume_name} + reply = self.rpc('fetch', url_params, None) + try: + volume_info = reply[0][1]['reply'][0] + except (IndexError, KeyError): + raise exception.VolumeNotFound(volume_id=volume_name) + return {'pool': volume_info['lv']['containingPool'], + 'repo': volume_info['repoName'], + 'lun': volume_info['lv']['lvStatus']['exportedLun']['lun'], + 'shelf': volume_info['lv']['lvStatus']['exportedLun']['shelf']} + + def get_volume_repository(self, volume_name): + volume_info = self.get_volume_info(volume_name) + return volume_info['repo'] + + def get_all_repos(self): + reply = self.rpc('fetch', {'orchStrRepo': ''}, None) + try: + return reply[0][1]['reply'] + except (IndexError, KeyError): + return [] + + def ping(self): + try: + self.rpc('fetch', {}, None, allow_empty_response=True) + except Exception as e: + LOG.debug(_('Coraid Appliance ping failed: %s'), str(e)) + raise exception.CoraidESMNotAvailable(reason=str(e)) + + def create_lun(self, repository_name, volume_name, volume_size_in_gb): + request = {'addr': 'cms', + 'data': { + 'servers': [], + 'repoName': repository_name, + 'lvName': volume_name, + 'size': coraid_volume_size(volume_size_in_gb)}, + 'op': 'orchStrLun', + 'args': 'add'} + esm_result = self.esm_command(request) + LOG.debug(_('Volume "%(name)s" created with VSX LUN "%(lun)s"') % + {'name': volume_name, + 'lun': esm_result['firstParam']}) + return esm_result + + def delete_lun(self, volume_name): + repository_name = self.get_volume_repository(volume_name) + request = {'addr': 'cms', + 'data': { + 'repoName': repository_name, + 'lvName': volume_name}, + 'op': 'orchStrLun/verified', + 'args': 'delete'} + esm_result = self.esm_command(request) + LOG.debug(_('Volume "%s" deleted.'), volume_name) + return esm_result + + def resize_volume(self, volume_name, new_volume_size_in_gb): + LOG.debug(_('Resize volume "%(name)s" to %(size)s GB.') % + {'name': volume_name, + 'size': new_volume_size_in_gb}) + repository = self.get_volume_repository(volume_name) + LOG.debug(_('Repository for volume "%(name)s" found: "%(repo)s"') % + {'name': volume_name, + 'repo': repository}) + + request = {'addr': 'cms', + 'data': { + 'lvName': volume_name, + 'newLvName': volume_name + '-resize', + 'size': coraid_volume_size(new_volume_size_in_gb), + 'repoName': repository}, + 'op': 'orchStrLunMods', + 'args': 'resize'} + esm_result = self.esm_command(request) + + LOG.debug(_('Volume "%(name)s" resized. New size is %(size)s GB.') % + {'name': volume_name, + 'size': new_volume_size_in_gb}) + return esm_result + + def create_snapshot(self, volume_name, snapshot_name): + volume_repository = self.get_volume_repository(volume_name) + request = {'addr': 'cms', + 'data': { + 'repoName': volume_repository, + 'lvName': volume_name, + 'newLvName': snapshot_name}, + 'op': 'orchStrLunMods', + 'args': 'addClSnap'} + esm_result = self.esm_command(request) + return esm_result + + def delete_snapshot(self, snapshot_name): + repository_name = self.get_volume_repository(snapshot_name) + request = {'addr': 'cms', + 'data': { + 'repoName': repository_name, + 'lvName': snapshot_name}, + 'op': 'orchStrLunMods', + 'args': 'delClSnap'} + esm_result = self.esm_command(request) + return esm_result + + def create_volume_from_snapshot(self, + snapshot_name, + volume_name, + dest_repository_name): + snapshot_repo = self.get_volume_repository(snapshot_name) + request = {'addr': 'cms', + 'data': { + 'lvName': snapshot_name, + 'repoName': snapshot_repo, + 'newLvName': volume_name, + 'newRepoName': dest_repository_name}, + 'op': 'orchStrLunMods', + 'args': 'addClone'} + esm_result = self.esm_command(request) + return esm_result + + def clone_volume(self, + src_volume_name, + dst_volume_name, + dst_repository_name): + src_volume_info = self.get_volume_info(src_volume_name) + + if src_volume_info['repo'] != dst_repository_name: + raise exception.CoraidException( + _('Cannot create clone volume in different repository.')) + + request = {'addr': 'cms', + 'data': { + 'shelfLun': '{0}.{1}'.format(src_volume_info['shelf'], + src_volume_info['lun']), + 'lvName': src_volume_name, + 'repoName': src_volume_info['repo'], + 'newLvName': dst_volume_name, + 'newRepoName': dst_repository_name}, + 'op': 'orchStrLunMods', + 'args': 'addClone'} + return self.esm_command(request) + + +class CoraidDriver(driver.VolumeDriver): + """This is the Class to set in cinder.conf (volume_driver).""" + + VERSION = '1.0.0' + + def __init__(self, *args, **kwargs): + super(CoraidDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(coraid_opts) + + self._stats = {'driver_version': self.VERSION, + 'free_capacity_gb': 'unknown', + 'reserved_percentage': 0, + 'storage_protocol': 'aoe', + 'total_capacity_gb': 'unknown', + 'vendor_name': 'Coraid'} + backend_name = self.configuration.safe_get('volume_backend_name') + self._stats['volume_backend_name'] = backend_name or 'EtherCloud ESM' + + @property + def appliance(self): + # NOTE(nsobolevsky): This is workaround for bug in the ESM appliance. + # If there is a lot of request with the same session/cookie/connection, + # the appliance could corrupt all following request in session. + # For that purpose we just create a new appliance. + esm_url = "https://{0}:8443".format( + self.configuration.coraid_esm_address) + + return CoraidAppliance(CoraidRESTClient(esm_url), + self.configuration.coraid_user, + self.configuration.coraid_password, + self.configuration.coraid_group) + + def check_for_setup_error(self): + """Return an error if prerequisites aren't met.""" + self.appliance.ping() + + def _get_repository(self, volume_type): + """Get the ESM Repository from the Volume Type. + + The ESM Repository is stored into a volume_type_extra_specs key. + """ + volume_type_id = volume_type['id'] + repository_key_name = self.configuration.coraid_repository_key + repository = volume_types.get_volume_type_extra_specs( + volume_type_id, repository_key_name) + # Remove keyword from repository name if needed + if repository.startswith(' '): + return repository[len(' '):] + else: + return repository + + def create_volume(self, volume): + """Create a Volume.""" + repository = self._get_repository(volume['volume_type']) + self.appliance.create_lun(repository, volume['name'], volume['size']) + + def create_cloned_volume(self, volume, src_vref): + dst_volume_repository = self._get_repository(volume['volume_type']) + + self.appliance.clone_volume(src_vref['name'], + volume['name'], + dst_volume_repository) + + if volume['size'] != src_vref['size']: + self.appliance.resize_volume(volume['name'], volume['size']) + + def delete_volume(self, volume): + """Delete a Volume.""" + try: + self.appliance.delete_lun(volume['name']) + except exception.VolumeNotFound: + self.appliance.ping() + + def create_snapshot(self, snapshot): + """Create a Snapshot.""" + volume_name = snapshot['volume_name'] + snapshot_name = snapshot['name'] + self.appliance.create_snapshot(volume_name, snapshot_name) + + def delete_snapshot(self, snapshot): + """Delete a Snapshot.""" + snapshot_name = snapshot['name'] + self.appliance.delete_snapshot(snapshot_name) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a Volume from a Snapshot.""" + snapshot_name = snapshot['name'] + repository = self._get_repository(volume['volume_type']) + self.appliance.create_volume_from_snapshot(snapshot_name, + volume['name'], + repository) + if volume['size'] > snapshot['volume_size']: + self.appliance.resize_volume(volume['name'], volume['size']) + + def extend_volume(self, volume, new_size): + """Extend an existing volume.""" + self.appliance.resize_volume(volume['name'], new_size) + + def initialize_connection(self, volume, connector): + """Return connection information.""" + volume_info = self.appliance.get_volume_info(volume['name']) + + shelf = volume_info['shelf'] + lun = volume_info['lun'] + + LOG.debug(_('Initialize connection %(shelf)s/%(lun)s for %(name)s') % + {'shelf': shelf, + 'lun': lun, + 'name': volume['name']}) + + aoe_properties = {'target_shelf': shelf, + 'target_lun': lun} + + return {'driver_volume_type': 'aoe', + 'data': aoe_properties} + + def _get_repository_capabilities(self): + repos_list = map(lambda i: i['profile']['fullName'] + ':' + i['name'], + self.appliance.get_all_repos()) + return ' '.join(repos_list) + + def update_volume_stats(self): + capabilities = self._get_repository_capabilities() + self._stats[self.configuration.coraid_repository_key] = capabilities + + def get_volume_stats(self, refresh=False): + """Return Volume Stats.""" + if refresh: + self.update_volume_stats() + return self._stats + + def local_path(self, volume): + pass + + def create_export(self, context, volume): + pass + + def remove_export(self, context, volume): + pass + + def terminate_connection(self, volume, connector, **kwargs): + pass + + def ensure_export(self, context, volume): + pass diff --git a/cinder/volume/drivers/emc/__init__.py b/cinder/volume/drivers/emc/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/drivers/emc/emc_smis_common.py b/cinder/volume/drivers/emc/emc_smis_common.py new file mode 100644 index 0000000000..f5aefeb7bc --- /dev/null +++ b/cinder/volume/drivers/emc/emc_smis_common.py @@ -0,0 +1,1571 @@ +# Copyright (c) 2012 EMC Corporation. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Common class for SMI-S based EMC volume drivers. + +This common class is for EMC volume drivers based on SMI-S. +It supports VNX and VMAX arrays. + +""" + +import time + +from oslo.config import cfg +from xml.dom.minidom import parseString + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import units + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + +try: + import pywbem +except ImportError: + LOG.info(_('Module PyWBEM not installed. ' + 'Install PyWBEM using the python-pywbem package.')) + +CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml' + +emc_opts = [ + cfg.StrOpt('cinder_emc_config_file', + default=CINDER_EMC_CONFIG_FILE, + help='use this file for cinder emc plugin ' + 'config data'), ] + + +CONF.register_opts(emc_opts) + + +class EMCSMISCommon(): + """Common code that can be used by ISCSI and FC drivers.""" + + stats = {'driver_version': '1.0', + 'free_capacity_gb': 0, + 'reserved_percentage': 0, + 'storage_protocol': None, + 'total_capacity_gb': 0, + 'vendor_name': 'EMC', + 'volume_backend_name': None} + + def __init__(self, prtcl, configuration=None): + self.protocol = prtcl + self.configuration = configuration + self.configuration.append_config_values(emc_opts) + + ip, port = self._get_ecom_server() + self.user, self.passwd = self._get_ecom_cred() + self.url = 'http://' + ip + ':' + port + self.conn = self._get_ecom_connection() + + def create_volume(self, volume): + """Creates a EMC(VMAX/VNX) volume.""" + + LOG.debug(_('Entering create_volume.')) + volumesize = int(volume['size']) * units.GiB + volumename = volume['name'] + + LOG.info(_('Create Volume: %(volume)s Size: %(size)lu') + % {'volume': volumename, + 'size': volumesize}) + + self.conn = self._get_ecom_connection() + + storage_type = self._get_storage_type() + + LOG.debug(_('Create Volume: %(volume)s ' + 'Storage type: %(storage_type)s') + % {'volume': volumename, + 'storage_type': storage_type}) + + pool, storage_system = self._find_pool(storage_type) + + LOG.debug(_('Create Volume: %(volume)s Pool: %(pool)s ' + 'Storage System: %(storage_system)s') + % {'volume': volumename, + 'pool': str(pool), + 'storage_system': storage_system}) + + configservice = self._find_storage_configuration_service( + storage_system) + if configservice is None: + exception_message = (_("Error Create Volume: %(volumename)s. " + "Storage Configuration Service not found for " + "pool %(storage_type)s.") + % {'volumename': volumename, + 'storage_type': storage_type}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + LOG.debug(_('Create Volume: %(name)s Method: ' + 'CreateOrModifyElementFromStoragePool ConfigServicie: ' + '%(service)s ElementName: %(name)s InPool: %(pool)s ' + 'ElementType: 5 Size: %(size)lu') + % {'service': str(configservice), + 'name': volumename, + 'pool': str(pool), + 'size': volumesize}) + + rc, job = self.conn.InvokeMethod( + 'CreateOrModifyElementFromStoragePool', + configservice, ElementName=volumename, InPool=pool, + ElementType=self._getnum(5, '16'), + Size=self._getnum(volumesize, '64')) + + LOG.debug(_('Create Volume: %(volumename)s Return code: %(rc)lu') + % {'volumename': volumename, + 'rc': rc}) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + LOG.error(_('Error Create Volume: %(volumename)s. ' + 'Return code: %(rc)lu. Error: %(error)s') + % {'volumename': volumename, + 'rc': rc, + 'error': errordesc}) + raise exception.VolumeBackendAPIException(data=errordesc) + + LOG.debug(_('Leaving create_volume: %(volumename)s ' + 'Return code: %(rc)lu') + % {'volumename': volumename, + 'rc': rc}) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + + LOG.debug(_('Entering create_volume_from_snapshot.')) + + snapshotname = snapshot['name'] + volumename = volume['name'] + + LOG.info(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s') + % {'volumename': volumename, + 'snapshotname': snapshotname}) + + self.conn = self._get_ecom_connection() + + snapshot_instance = self._find_lun(snapshot) + storage_system = snapshot_instance['SystemName'] + + LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s Snapshot Instance: ' + '%(snapshotinstance)s Storage System: %(storage_system)s.') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'snapshotinstance': str(snapshot_instance.path), + 'storage_system': storage_system}) + + isVMAX = storage_system.find('SYMMETRIX') + if isVMAX > -1: + exception_message = (_('Error Create Volume from Snapshot: ' + 'Volume: %(volumename)s Snapshot: ' + '%(snapshotname)s. Create Volume ' + 'from Snapshot is NOT supported on VMAX.') + % {'volumename': volumename, + 'snapshotname': snapshotname}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + repservice = self._find_replication_service(storage_system) + if repservice is None: + exception_message = (_('Error Create Volume from Snapshot: ' + 'Volume: %(volumename)s Snapshot: ' + '%(snapshotname)s. Cannot find Replication ' + 'Service to create volume from snapshot.') + % {'volumename': volumename, + 'snapshotname': snapshotname}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s Method: CreateElementReplica ' + 'ReplicationService: %(service)s ElementName: ' + '%(elementname)s SyncType: 8 SourceElement: ' + '%(sourceelement)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'service': str(repservice), + 'elementname': volumename, + 'sourceelement': str(snapshot_instance.path)}) + + # Create a Clone from snapshot + rc, job = self.conn.InvokeMethod( + 'CreateElementReplica', repservice, + ElementName=volumename, + SyncType=self._getnum(8, '16'), + SourceElement=snapshot_instance.path) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Create Volume from Snapshot: ' + 'Volume: %(volumename)s Snapshot:' + '%(snapshotname)s. Return code: %(rc)lu.' + 'Error: %(error)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s. Successfully clone volume ' + 'from snapshot. Finding the clone relationship.') + % {'volumename': volumename, + 'snapshotname': snapshotname}) + + sync_name, storage_system = self._find_storage_sync_sv_sv( + volumename, snapshotname) + + # Remove the Clone relationshop so it can be used as a regular lun + # 8 - Detach operation + LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s. Remove the clone ' + 'relationship. Method: ModifyReplicaSynchronization ' + 'ReplicationService: %(service)s Operation: 8 ' + 'Synchronization: %(sync_name)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'service': str(repservice), + 'sync_name': str(sync_name)}) + + rc, job = self.conn.InvokeMethod( + 'ModifyReplicaSynchronization', + repservice, + Operation=self._getnum(8, '16'), + Synchronization=sync_name) + + LOG.debug(_('Create Volume from Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s Return code: %(rc)lu') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc}) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Create Volume from Snapshot: ' + 'Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s. ' + 'Return code: %(rc)lu. Error: %(error)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Leaving create_volume_from_snapshot: Volume: ' + '%(volumename)s Snapshot: %(snapshotname)s ' + 'Return code: %(rc)lu.') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc}) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + LOG.debug(_('Entering create_cloned_volume.')) + + srcname = src_vref['name'] + volumename = volume['name'] + + LOG.info(_('Create a Clone from Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s') + % {'volumename': volumename, + 'srcname': srcname}) + + self.conn = self._get_ecom_connection() + + src_instance = self._find_lun(src_vref) + storage_system = src_instance['SystemName'] + + LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s Source Instance: ' + '%(src_instance)s Storage System: %(storage_system)s.') + % {'volumename': volumename, + 'srcname': srcname, + 'src_instance': str(src_instance.path), + 'storage_system': storage_system}) + + repservice = self._find_replication_service(storage_system) + if repservice is None: + exception_message = (_('Error Create Cloned Volume: ' + 'Volume: %(volumename)s Source Volume: ' + '%(srcname)s. Cannot find Replication ' + 'Service to create cloned volume.') + % {'volumename': volumename, + 'srcname': srcname}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s Method: CreateElementReplica ' + 'ReplicationService: %(service)s ElementName: ' + '%(elementname)s SyncType: 8 SourceElement: ' + '%(sourceelement)s') + % {'volumename': volumename, + 'srcname': srcname, + 'service': str(repservice), + 'elementname': volumename, + 'sourceelement': str(src_instance.path)}) + + # Create a Clone from source volume + rc, job = self.conn.InvokeMethod( + 'CreateElementReplica', repservice, + ElementName=volumename, + SyncType=self._getnum(8, '16'), + SourceElement=src_instance.path) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Create Cloned Volume: ' + 'Volume: %(volumename)s Source Volume:' + '%(srcname)s. Return code: %(rc)lu.' + 'Error: %(error)s') + % {'volumename': volumename, + 'srcname': srcname, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s. Successfully cloned volume ' + 'from source volume. Finding the clone relationship.') + % {'volumename': volumename, + 'srcname': srcname}) + + sync_name, storage_system = self._find_storage_sync_sv_sv( + volumename, srcname) + + # Remove the Clone relationshop so it can be used as a regular lun + # 8 - Detach operation + LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s. Remove the clone ' + 'relationship. Method: ModifyReplicaSynchronization ' + 'ReplicationService: %(service)s Operation: 8 ' + 'Synchronization: %(sync_name)s') + % {'volumename': volumename, + 'srcname': srcname, + 'service': str(repservice), + 'sync_name': str(sync_name)}) + + rc, job = self.conn.InvokeMethod( + 'ModifyReplicaSynchronization', + repservice, + Operation=self._getnum(8, '16'), + Synchronization=sync_name) + + LOG.debug(_('Create Cloned Volume: Volume: %(volumename)s ' + 'Source Volume: %(srcname)s Return code: %(rc)lu') + % {'volumename': volumename, + 'srcname': srcname, + 'rc': rc}) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Create Cloned Volume: ' + 'Volume: %(volumename)s ' + 'Source Volume: %(srcname)s. ' + 'Return code: %(rc)lu. Error: %(error)s') + % {'volumename': volumename, + 'srcname': srcname, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Leaving create_cloned_volume: Volume: ' + '%(volumename)s Source Volume: %(srcname)s ' + 'Return code: %(rc)lu.') + % {'volumename': volumename, + 'srcname': srcname, + 'rc': rc}) + + def delete_volume(self, volume): + """Deletes an EMC volume.""" + LOG.debug(_('Entering delete_volume.')) + volumename = volume['name'] + LOG.info(_('Delete Volume: %(volume)s') + % {'volume': volumename}) + + self.conn = self._get_ecom_connection() + + vol_instance = self._find_lun(volume) + if vol_instance is None: + LOG.error(_('Volume %(name)s not found on the array. ' + 'No volume to delete.') + % {'name': volumename}) + return + + storage_system = vol_instance['SystemName'] + + configservice =\ + self._find_storage_configuration_service(storage_system) + if configservice is None: + exception_message = (_("Error Delete Volume: %(volumename)s. " + "Storage Configuration Service not found.") + % {'volumename': volumename}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + device_id = vol_instance['DeviceID'] + + LOG.debug(_('Delete Volume: %(name)s DeviceID: %(deviceid)s') + % {'name': volumename, + 'deviceid': device_id}) + + LOG.debug(_('Delete Volume: %(name)s Method: EMCReturnToStoragePool ' + 'ConfigServic: %(service)s TheElement: %(vol_instance)s') + % {'service': str(configservice), + 'name': volumename, + 'vol_instance': str(vol_instance.path)}) + + rc, job =\ + self.conn.InvokeMethod('EMCReturnToStoragePool', + configservice, + TheElements=[vol_instance.path]) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Delete Volume: %(volumename)s. ' + 'Return code: %(rc)lu. Error: %(error)s') + % {'volumename': volumename, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Leaving delete_volume: %(volumename)s Return code: ' + '%(rc)lu') + % {'volumename': volumename, + 'rc': rc}) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + LOG.debug(_('Entering create_snapshot.')) + + snapshotname = snapshot['name'] + volumename = snapshot['volume_name'] + LOG.info(_('Create snapshot: %(snapshot)s: volume: %(volume)s') + % {'snapshot': snapshotname, + 'volume': volumename}) + + self.conn = self._get_ecom_connection() + + volume = {} + volume['name'] = volumename + volume['provider_location'] = None + vol_instance = self._find_lun(volume) + device_id = vol_instance['DeviceID'] + storage_system = vol_instance['SystemName'] + LOG.debug(_('Device ID: %(deviceid)s: Storage System: ' + '%(storagesystem)s') + % {'deviceid': device_id, + 'storagesystem': storage_system}) + + repservice = self._find_replication_service(storage_system) + if repservice is None: + LOG.error(_("Cannot find Replication Service to create snapshot " + "for volume %s.") % volumename) + exception_message = (_("Cannot find Replication Service to " + "create snapshot for volume %s.") + % volumename) + raise exception.VolumeBackendAPIException(data=exception_message) + + LOG.debug(_("Create Snapshot: Method: CreateElementReplica: " + "Target: %(snapshot)s Source: %(volume)s Replication " + "Service: %(service)s ElementName: %(elementname)s Sync " + "Type: 7 SourceElement: %(sourceelement)s.") + % {'snapshot': snapshotname, + 'volume': volumename, + 'service': str(repservice), + 'elementname': snapshotname, + 'sourceelement': str(vol_instance.path)}) + + rc, job =\ + self.conn.InvokeMethod('CreateElementReplica', repservice, + ElementName=snapshotname, + SyncType=self._getnum(7, '16'), + SourceElement=vol_instance.path) + + LOG.debug(_('Create Snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s Return code: %(rc)lu') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc}) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Create Snapshot: %(snapshot)s ' + 'Volume: %(volume)s Error: %(errordesc)s') + % {'snapshot': snapshotname, 'volume': + volumename, 'errordesc': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Leaving create_snapshot: Snapshot: %(snapshot)s ' + 'Volume: %(volume)s Return code: %(rc)lu.') % + {'snapshot': snapshotname, 'volume': volumename, 'rc': rc}) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + LOG.debug(_('Entering delete_snapshot.')) + + snapshotname = snapshot['name'] + volumename = snapshot['volume_name'] + LOG.info(_('Delete Snapshot: %(snapshot)s: volume: %(volume)s') + % {'snapshot': snapshotname, + 'volume': volumename}) + + self.conn = self._get_ecom_connection() + + LOG.debug(_('Delete Snapshot: %(snapshot)s: volume: %(volume)s. ' + 'Finding StorageSychronization_SV_SV.') + % {'snapshot': snapshotname, + 'volume': volumename}) + + sync_name, storage_system =\ + self._find_storage_sync_sv_sv(snapshotname, volumename, False) + if sync_name is None: + LOG.error(_('Snapshot: %(snapshot)s: volume: %(volume)s ' + 'not found on the array. No snapshot to delete.') + % {'snapshot': snapshotname, + 'volume': volumename}) + return + + repservice = self._find_replication_service(storage_system) + if repservice is None: + exception_message = (_("Cannot find Replication Service to " + "create snapshot for volume %s.") + % volumename) + raise exception.VolumeBackendAPIException(data=exception_message) + + # Delete snapshot - deletes both the target element + # and the snap session + LOG.debug(_("Delete Snapshot: Target: %(snapshot)s " + "Source: %(volume)s. Method: " + "ModifyReplicaSynchronization: " + "Replication Service: %(service)s Operation: 19 " + "Synchronization: %(sync_name)s.") + % {'snapshot': snapshotname, + 'volume': volumename, + 'service': str(repservice), + 'sync_name': str(sync_name)}) + + rc, job =\ + self.conn.InvokeMethod('ModifyReplicaSynchronization', + repservice, + Operation=self._getnum(19, '16'), + Synchronization=sync_name) + + LOG.debug(_('Delete Snapshot: Volume: %(volumename)s Snapshot: ' + '%(snapshotname)s Return code: %(rc)lu') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc}) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + exception_message = (_('Error Delete Snapshot: Volume: ' + '%(volumename)s Snapshot: ' + '%(snapshotname)s. Return code: %(rc)lu.' + ' Error: %(error)s') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc, + 'error': errordesc}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + LOG.debug(_('Leaving delete_snapshot: Volume: %(volumename)s ' + 'Snapshot: %(snapshotname)s Return code: %(rc)lu.') + % {'volumename': volumename, + 'snapshotname': snapshotname, + 'rc': rc}) + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + self.conn = self._get_ecom_connection() + volumename = volume['name'] + LOG.info(_('Create export: %(volume)s') + % {'volume': volumename}) + vol_instance = self._find_lun(volume) + device_id = vol_instance['DeviceID'] + + LOG.debug(_('create_export: Volume: %(volume)s Device ID: ' + '%(device_id)s') + % {'volume': volumename, + 'device_id': device_id}) + + return {'provider_location': device_id} + + # Mapping method for VNX + def _expose_paths(self, configservice, vol_instance, + connector): + """This method maps a volume to a host. + + It adds a volume and initiator to a Storage Group + and therefore maps the volume to the host. + """ + volumename = vol_instance['ElementName'] + lun_name = vol_instance['DeviceID'] + initiators = self._find_initiator_names(connector) + storage_system = vol_instance['SystemName'] + lunmask_ctrl = self._find_lunmasking_scsi_protocol_controller( + storage_system, connector) + + LOG.debug(_('ExposePaths: %(vol)s ConfigServicie: %(service)s ' + 'LUNames: %(lun_name)s InitiatorPortIDs: %(initiator)s ' + 'DeviceAccesses: 2') + % {'vol': str(vol_instance.path), + 'service': str(configservice), + 'lun_name': lun_name, + 'initiator': initiators}) + + if lunmask_ctrl is None: + rc, controller =\ + self.conn.InvokeMethod('ExposePaths', + configservice, LUNames=[lun_name], + InitiatorPortIDs=initiators, + DeviceAccesses=[self._getnum(2, '16')]) + else: + LOG.debug(_('ExposePaths parameter ' + 'LunMaskingSCSIProtocolController: ' + '%(lunmasking)s') + % {'lunmasking': str(lunmask_ctrl)}) + rc, controller =\ + self.conn.InvokeMethod('ExposePaths', + configservice, LUNames=[lun_name], + DeviceAccesses=[self._getnum(2, '16')], + ProtocolControllers=[lunmask_ctrl]) + + if rc != 0L: + msg = (_('Error mapping volume %s.') % volumename) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_('ExposePaths for volume %s completed successfully.') + % volumename) + + # Unmapping method for VNX + def _hide_paths(self, configservice, vol_instance, + connector): + """This method unmaps a volume from the host. + + Removes a volume from the Storage Group + and therefore unmaps the volume from the host. + """ + volumename = vol_instance['ElementName'] + device_id = vol_instance['DeviceID'] + lunmask_ctrl = self._find_lunmasking_scsi_protocol_controller_for_vol( + vol_instance, connector) + + LOG.debug(_('HidePaths: %(vol)s ConfigServicie: %(service)s ' + 'LUNames: %(device_id)s LunMaskingSCSIProtocolController: ' + '%(lunmasking)s') + % {'vol': str(vol_instance.path), + 'service': str(configservice), + 'device_id': device_id, + 'lunmasking': str(lunmask_ctrl)}) + + rc, controller = self.conn.InvokeMethod( + 'HidePaths', configservice, + LUNames=[device_id], ProtocolControllers=[lunmask_ctrl]) + + if rc != 0L: + msg = (_('Error unmapping volume %s.') % volumename) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_('HidePaths for volume %s completed successfully.') + % volumename) + + # Mapping method for VMAX + def _add_members(self, configservice, vol_instance): + """This method maps a volume to a host. + + Add volume to the Device Masking Group that belongs to + a Masking View. + """ + volumename = vol_instance['ElementName'] + masking_group = self._find_device_masking_group() + + LOG.debug(_('AddMembers: ConfigServicie: %(service)s MaskingGroup: ' + '%(masking_group)s Members: %(vol)s') + % {'service': str(configservice), + 'masking_group': str(masking_group), + 'vol': str(vol_instance.path)}) + + rc, job =\ + self.conn.InvokeMethod('AddMembers', + configservice, + MaskingGroup=masking_group, + Members=[vol_instance.path]) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + msg = (_('Error mapping volume %(vol)s. %(error)s') % + {'vol': volumename, 'error': errordesc}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_('AddMembers for volume %s completed successfully.') + % volumename) + + # Unmapping method for VMAX + def _remove_members(self, configservice, vol_instance): + """This method unmaps a volume from a host. + + Removes volume from the Device Masking Group that belongs to + a Masking View. + """ + volumename = vol_instance['ElementName'] + masking_group = self._find_device_masking_group() + + LOG.debug(_('RemoveMembers: ConfigServicie: %(service)s ' + 'MaskingGroup: %(masking_group)s Members: %(vol)s') + % {'service': str(configservice), + 'masking_group': str(masking_group), + 'vol': str(vol_instance.path)}) + + rc, job = self.conn.InvokeMethod('RemoveMembers', configservice, + MaskingGroup=masking_group, + Members=[vol_instance.path]) + + if rc != 0L: + rc, errordesc = self._wait_for_job_complete(job) + if rc != 0L: + msg = (_('Error unmapping volume %(vol)s. %(error)s') + % {'vol': volumename, 'error': errordesc}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_('RemoveMembers for volume %s completed successfully.') + % volumename) + + def _map_lun(self, volume, connector): + """Maps a volume to the host.""" + volumename = volume['name'] + LOG.info(_('Map volume: %(volume)s') + % {'volume': volumename}) + + vol_instance = self._find_lun(volume) + storage_system = vol_instance['SystemName'] + + configservice = self._find_controller_configuration_service( + storage_system) + if configservice is None: + exception_message = (_("Cannot find Controller Configuration " + "Service for storage system %s") + % storage_system) + raise exception.VolumeBackendAPIException(data=exception_message) + + isVMAX = storage_system.find('SYMMETRIX') + if isVMAX > -1: + self._add_members(configservice, vol_instance) + else: + self._expose_paths(configservice, vol_instance, connector) + + def _unmap_lun(self, volume, connector): + """Unmaps a volume from the host.""" + volumename = volume['name'] + LOG.info(_('Unmap volume: %(volume)s') + % {'volume': volumename}) + + device_info = self.find_device_number(volume) + device_number = device_info['hostlunid'] + if device_number is None: + LOG.info(_("Volume %s is not mapped. No volume to unmap.") + % (volumename)) + return + + vol_instance = self._find_lun(volume) + storage_system = vol_instance['SystemName'] + + configservice = self._find_controller_configuration_service( + storage_system) + if configservice is None: + exception_message = (_("Cannot find Controller Configuration " + "Service for storage system %s") + % storage_system) + raise exception.VolumeBackendAPIException(data=exception_message) + + isVMAX = storage_system.find('SYMMETRIX') + if isVMAX > -1: + self._remove_members(configservice, vol_instance) + else: + self._hide_paths(configservice, vol_instance, connector) + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info.""" + volumename = volume['name'] + LOG.info(_('Initialize connection: %(volume)s') + % {'volume': volumename}) + self.conn = self._get_ecom_connection() + device_info = self.find_device_number(volume) + device_number = device_info['hostlunid'] + if device_number is not None: + LOG.info(_("Volume %s is already mapped.") + % (volumename)) + else: + self._map_lun(volume, connector) + # Find host lun id again after the volume is exported to the host + device_info = self.find_device_number(volume) + + return device_info + + def terminate_connection(self, volume, connector): + """Disallow connection from connector.""" + volumename = volume['name'] + LOG.info(_('Terminate connection: %(volume)s') + % {'volume': volumename}) + self.conn = self._get_ecom_connection() + self._unmap_lun(volume, connector) + + def update_volume_stats(self): + """Retrieve stats info.""" + LOG.debug(_("Updating volume stats")) + self.conn = self._get_ecom_connection() + storage_type = self._get_storage_type() + + pool, storagesystem = self._find_pool(storage_type, True) + + self.stats['total_capacity_gb'] = pool['TotalManagedSpace'] + self.stats['free_capacity_gb'] = pool['RemainingManagedSpace'] + + return self.stats + + def _get_storage_type(self, filename=None): + """Get the storage type from the config file.""" + if filename is None: + filename = self.configuration.cinder_emc_config_file + + file = open(filename, 'r') + data = file.read() + file.close() + dom = parseString(data) + storageTypes = dom.getElementsByTagName('StorageType') + if storageTypes is not None and len(storageTypes) > 0: + storageType = storageTypes[0].toxml() + storageType = storageType.replace('', '') + storageType = storageType.replace('', '') + LOG.debug(_("Found Storage Type: %s") % (storageType)) + return storageType + else: + exception_message = (_("Storage type not found.")) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + def _get_masking_view(self, filename=None): + if filename is None: + filename = self.configuration.cinder_emc_config_file + + file = open(filename, 'r') + data = file.read() + file.close() + dom = parseString(data) + views = dom.getElementsByTagName('MaskingView') + if views is not None and len(views) > 0: + view = views[0].toxml().replace('', '') + view = view.replace('', '') + LOG.debug(_("Found Masking View: %s") % (view)) + return view + else: + LOG.debug(_("Masking View not found.")) + return None + + def _get_ecom_cred(self, filename=None): + if filename is None: + filename = self.configuration.cinder_emc_config_file + + file = open(filename, 'r') + data = file.read() + file.close() + dom = parseString(data) + ecomUsers = dom.getElementsByTagName('EcomUserName') + if ecomUsers is not None and len(ecomUsers) > 0: + ecomUser = ecomUsers[0].toxml().replace('', '') + ecomUser = ecomUser.replace('', '') + ecomPasswds = dom.getElementsByTagName('EcomPassword') + if ecomPasswds is not None and len(ecomPasswds) > 0: + ecomPasswd = ecomPasswds[0].toxml().replace('', '') + ecomPasswd = ecomPasswd.replace('', '') + if ecomUser is not None and ecomPasswd is not None: + return ecomUser, ecomPasswd + else: + LOG.debug(_("Ecom user not found.")) + return None + + def _get_ecom_server(self, filename=None): + if filename is None: + filename = self.configuration.cinder_emc_config_file + + file = open(filename, 'r') + data = file.read() + file.close() + dom = parseString(data) + ecomIps = dom.getElementsByTagName('EcomServerIp') + if ecomIps is not None and len(ecomIps) > 0: + ecomIp = ecomIps[0].toxml().replace('', '') + ecomIp = ecomIp.replace('', '') + ecomPorts = dom.getElementsByTagName('EcomServerPort') + if ecomPorts is not None and len(ecomPorts) > 0: + ecomPort = ecomPorts[0].toxml().replace('', '') + ecomPort = ecomPort.replace('', '') + if ecomIp is not None and ecomPort is not None: + LOG.debug(_("Ecom IP: %(ecomIp)s Port: %(ecomPort)s"), + {'ecomIp': ecomIp, 'ecomPort': ecomPort}) + return ecomIp, ecomPort + else: + LOG.debug(_("Ecom server not found.")) + return None + + def _get_ecom_connection(self, filename=None): + conn = pywbem.WBEMConnection(self.url, (self.user, self.passwd), + default_namespace='root/emc') + if conn is None: + exception_message = (_("Cannot connect to ECOM server")) + raise exception.VolumeBackendAPIException(data=exception_message) + + return conn + + def _find_replication_service(self, storage_system): + foundRepService = None + repservices = self.conn.EnumerateInstanceNames( + 'EMC_ReplicationService') + for repservice in repservices: + if storage_system == repservice['SystemName']: + foundRepService = repservice + LOG.debug(_("Found Replication Service: %s") + % (str(repservice))) + break + + return foundRepService + + def _find_storage_configuration_service(self, storage_system): + foundConfigService = None + configservices = self.conn.EnumerateInstanceNames( + 'EMC_StorageConfigurationService') + for configservice in configservices: + if storage_system == configservice['SystemName']: + foundConfigService = configservice + LOG.debug(_("Found Storage Configuration Service: %s") + % (str(configservice))) + break + + return foundConfigService + + def _find_controller_configuration_service(self, storage_system): + foundConfigService = None + configservices = self.conn.EnumerateInstanceNames( + 'EMC_ControllerConfigurationService') + for configservice in configservices: + if storage_system == configservice['SystemName']: + foundConfigService = configservice + LOG.debug(_("Found Controller Configuration Service: %s") + % (str(configservice))) + break + + return foundConfigService + + def _find_storage_hardwareid_service(self, storage_system): + foundConfigService = None + configservices = self.conn.EnumerateInstanceNames( + 'EMC_StorageHardwareIDManagementService') + for configservice in configservices: + if storage_system == configservice['SystemName']: + foundConfigService = configservice + LOG.debug(_("Found Storage Hardware ID Management Service: %s") + % (str(configservice))) + break + + return foundConfigService + + # Find pool based on storage_type + def _find_pool(self, storage_type, details=False): + foundPool = None + systemname = None + # Only get instance names if details flag is False; + # Otherwise get the whole instances + if details is False: + vpools = self.conn.EnumerateInstanceNames( + 'EMC_VirtualProvisioningPool') + upools = self.conn.EnumerateInstanceNames( + 'EMC_UnifiedStoragePool') + else: + vpools = self.conn.EnumerateInstances( + 'EMC_VirtualProvisioningPool') + upools = self.conn.EnumerateInstances( + 'EMC_UnifiedStoragePool') + + for upool in upools: + poolinstance = upool['InstanceID'] + # Example: CLARiiON+APM00115204878+U+Pool 0 + poolname, systemname = self._parse_pool_instance_id(poolinstance) + if poolname is not None and systemname is not None: + if str(storage_type) == str(poolname): + foundPool = upool + break + + if foundPool is None: + for vpool in vpools: + poolinstance = vpool['InstanceID'] + # Example: SYMMETRIX+000195900551+TP+Sol_Innov + poolname, systemname = self._parse_pool_instance_id( + poolinstance) + if poolname is not None and systemname is not None: + if str(storage_type) == str(poolname): + foundPool = vpool + break + + if foundPool is None: + exception_message = (_("Pool %(storage_type)s is not found.") + % {'storage_type': storage_type}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + if systemname is None: + exception_message = (_("Storage system not found for pool " + "%(storage_type)s.") + % {'storage_type': storage_type}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + LOG.debug(_("Pool: %(pool)s SystemName: %(systemname)s.") + % {'pool': str(foundPool), 'systemname': systemname}) + return foundPool, systemname + + def _parse_pool_instance_id(self, instanceid): + # Example of pool InstanceId: CLARiiON+APM00115204878+U+Pool 0 + poolname = None + systemname = None + endp = instanceid.rfind('+') + if endp > -1: + poolname = instanceid[endp + 1:] + + idarray = instanceid.split('+') + if len(idarray) > 2: + systemname = idarray[0] + '+' + idarray[1] + + LOG.debug(_("Pool name: %(poolname)s System name: %(systemname)s.") + % {'poolname': poolname, 'systemname': systemname}) + return poolname, systemname + + def _find_lun(self, volume): + foundinstance = None + try: + device_id = volume['provider_location'] + except Exception: + device_id = None + + volumename = volume['name'] + + names = self.conn.EnumerateInstanceNames('EMC_StorageVolume') + + for n in names: + if device_id is not None: + if n['DeviceID'] == device_id: + vol_instance = self.conn.GetInstance(n) + foundinstance = vol_instance + break + else: + continue + + else: + vol_instance = self.conn.GetInstance(n) + if vol_instance['ElementName'] == volumename: + foundinstance = vol_instance + volume['provider_location'] = foundinstance['DeviceID'] + break + + if foundinstance is None: + LOG.debug(_("Volume %(volumename)s not found on the array.") + % {'volumename': volumename}) + else: + LOG.debug(_("Volume name: %(volumename)s Volume instance: " + "%(vol_instance)s.") + % {'volumename': volumename, + 'vol_instance': str(foundinstance.path)}) + + return foundinstance + + def _find_storage_sync_sv_sv(self, snapshotname, volumename, + waitforsync=True): + foundsyncname = None + storage_system = None + percent_synced = 0 + + LOG.debug(_("Source: %(volumename)s Target: %(snapshotname)s.") + % {'volumename': volumename, 'snapshotname': snapshotname}) + + names = self.conn.EnumerateInstanceNames( + 'SE_StorageSynchronized_SV_SV') + + for n in names: + snapshot_instance = self.conn.GetInstance(n['SyncedElement'], + LocalOnly=False) + if snapshotname != snapshot_instance['ElementName']: + continue + + vol_instance = self.conn.GetInstance(n['SystemElement'], + LocalOnly=False) + if vol_instance['ElementName'] == volumename: + foundsyncname = n + storage_system = vol_instance['SystemName'] + if waitforsync: + sync_instance = self.conn.GetInstance(n, LocalOnly=False) + percent_synced = sync_instance['PercentSynced'] + break + + if foundsyncname is None: + LOG.debug(_("Source: %(volumename)s Target: %(snapshotname)s. " + "Storage Synchronized not found. ") + % {'volumename': volumename, + 'snapshotname': snapshotname}) + else: + LOG.debug(_("Storage system: %(storage_system)s " + "Storage Synchronized instance: %(sync)s.") + % {'storage_system': storage_system, + 'sync': str(foundsyncname)}) + # Wait for SE_StorageSynchronized_SV_SV to be fully synced + while waitforsync and percent_synced < 100: + time.sleep(10) + sync_instance = self.conn.GetInstance(foundsyncname, + LocalOnly=False) + percent_synced = sync_instance['PercentSynced'] + + return foundsyncname, storage_system + + def _find_initiator_names(self, connector): + foundinitiatornames = [] + iscsi = 'iscsi' + fc = 'fc' + name = 'initiator name' + if self.protocol.lower() == iscsi and connector['initiator']: + foundinitiatornames.append(connector['initiator']) + elif self.protocol.lower() == fc and connector['wwpns']: + for wwn in connector['wwpns']: + foundinitiatornames.append(wwn) + name = 'world wide port names' + + if foundinitiatornames is None or len(foundinitiatornames) == 0: + msg = (_('Error finding %s.') % name) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_("Found %(name)s: %(initiator)s.") + % {'name': name, + 'initiator': foundinitiatornames}) + return foundinitiatornames + + def _wait_for_job_complete(self, job): + jobinstancename = job['Job'] + + while True: + jobinstance = self.conn.GetInstance(jobinstancename, + LocalOnly=False) + jobstate = jobinstance['JobState'] + # From ValueMap of JobState in CIM_ConcreteJob + # 2L=New, 3L=Starting, 4L=Running, 32767L=Queue Pending + # ValueMap("2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13..32767, + # 32768..65535"), + # Values("New, Starting, Running, Suspended, Shutting Down, + # Completed, Terminated, Killed, Exception, Service, + # Query Pending, DMTF Reserved, Vendor Reserved")] + if jobstate in [2L, 3L, 4L, 32767L]: + time.sleep(10) + else: + break + + rc = jobinstance['ErrorCode'] + errordesc = jobinstance['ErrorDescription'] + + return rc, errordesc + + # Find LunMaskingSCSIProtocolController for the local host on the + # specified storage system + def _find_lunmasking_scsi_protocol_controller(self, storage_system, + connector): + foundCtrl = None + initiators = self._find_initiator_names(connector) + controllers = self.conn.EnumerateInstanceNames( + 'EMC_LunMaskingSCSIProtocolController') + for ctrl in controllers: + if storage_system != ctrl['SystemName']: + continue + associators =\ + self.conn.Associators(ctrl, + resultClass='EMC_StorageHardwareID') + for assoc in associators: + # if EMC_StorageHardwareID matches the initiator, + # we found the existing EMC_LunMaskingSCSIProtocolController + # (Storage Group for VNX) + # we can use for masking a new LUN + hardwareid = assoc['StorageID'] + for initiator in initiators: + if hardwareid.lower() == initiator.lower(): + foundCtrl = ctrl + break + + if foundCtrl is not None: + break + + if foundCtrl is not None: + break + + LOG.debug(_("LunMaskingSCSIProtocolController for storage system " + "%(storage_system)s and initiator %(initiator)s is " + "%(ctrl)s.") + % {'storage_system': storage_system, + 'initiator': initiators, + 'ctrl': str(foundCtrl)}) + return foundCtrl + + # Find LunMaskingSCSIProtocolController for the local host and the + # specified storage volume + def _find_lunmasking_scsi_protocol_controller_for_vol(self, vol_instance, + connector): + foundCtrl = None + initiators = self._find_initiator_names(connector) + controllers =\ + self.conn.AssociatorNames( + vol_instance.path, + resultClass='EMC_LunMaskingSCSIProtocolController') + + for ctrl in controllers: + associators =\ + self.conn.Associators( + ctrl, + resultClass='EMC_StorageHardwareID') + for assoc in associators: + # if EMC_StorageHardwareID matches the initiator, + # we found the existing EMC_LunMaskingSCSIProtocolController + # (Storage Group for VNX) + # we can use for masking a new LUN + hardwareid = assoc['StorageID'] + for initiator in initiators: + if hardwareid.lower() == initiator.lower(): + foundCtrl = ctrl + break + + if foundCtrl is not None: + break + + if foundCtrl is not None: + break + + LOG.debug(_("LunMaskingSCSIProtocolController for storage volume " + "%(vol)s and initiator %(initiator)s is %(ctrl)s.") + % {'vol': str(vol_instance.path), 'initiator': initiators, + 'ctrl': str(foundCtrl)}) + return foundCtrl + + # Find out how many volumes are mapped to a host + # associated to the LunMaskingSCSIProtocolController + def get_num_volumes_mapped(self, volume, connector): + numVolumesMapped = 0 + volumename = volume['name'] + vol_instance = self._find_lun(volume) + if vol_instance is None: + msg = (_('Volume %(name)s not found on the array. ' + 'Cannot determine if there are volumes mapped.') + % {'name': volumename}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + storage_system = vol_instance['SystemName'] + + ctrl = self._find_lunmasking_scsi_protocol_controller( + storage_system, + connector) + + LOG.debug(_("LunMaskingSCSIProtocolController for storage system " + "%(storage)s and %(connector)s is %(ctrl)s.") + % {'storage': storage_system, + 'connector': connector, + 'ctrl': str(ctrl)}) + + associators = self.conn.Associators( + ctrl, + resultClass='EMC_StorageVolume') + + numVolumesMapped = len(associators) + + LOG.debug(_("Found %(numVolumesMapped)d volumes on storage system " + "%(storage)s mapped to %(initiator)s.") + % {'numVolumesMapped': numVolumesMapped, + 'storage': storage_system, + 'connector': connector}) + + return numVolumesMapped + + # Find an available device number that a host can see + def _find_avail_device_number(self, storage_system): + out_device_number = '000000' + out_num_device_number = 0 + numlist = [] + myunitnames = [] + + unitnames = self.conn.EnumerateInstanceNames( + 'CIM_ProtocolControllerForUnit') + for unitname in unitnames: + controller = unitname['Antecedent'] + if storage_system != controller['SystemName']: + continue + classname = controller['CreationClassName'] + index = classname.find('LunMaskingSCSIProtocolController') + if index > -1: + unitinstance = self.conn.GetInstance(unitname, + LocalOnly=False) + numDeviceNumber = int(unitinstance['DeviceNumber']) + numlist.append(numDeviceNumber) + myunitnames.append(unitname) + + maxnum = max(numlist) + out_num_device_number = maxnum + 1 + + out_device_number = '%06d' % out_num_device_number + + LOG.debug(_("Available device number on %(storage)s: %(device)s.") + % {'storage': storage_system, 'device': out_device_number}) + return out_device_number + + # Find a device number that a host can see for a volume + def find_device_number(self, volume): + out_num_device_number = None + + volumename = volume['name'] + vol_instance = self._find_lun(volume) + storage_system = vol_instance['SystemName'] + sp = None + try: + sp = vol_instance['EMCCurrentOwningStorageProcessor'] + except KeyError: + # VMAX LUN doesn't have this property + pass + + unitnames = self.conn.ReferenceNames( + vol_instance.path, + ResultClass='CIM_ProtocolControllerForUnit') + + for unitname in unitnames: + controller = unitname['Antecedent'] + classname = controller['CreationClassName'] + index = classname.find('LunMaskingSCSIProtocolController') + if index > -1: # VNX + # Get an instance of CIM_ProtocolControllerForUnit + unitinstance = self.conn.GetInstance(unitname, + LocalOnly=False) + numDeviceNumber = int(unitinstance['DeviceNumber'], 16) + out_num_device_number = numDeviceNumber + break + else: + index = classname.find('Symm_LunMaskingView') + if index > -1: # VMAX + unitinstance = self.conn.GetInstance(unitname, + LocalOnly=False) + numDeviceNumber = int(unitinstance['DeviceNumber'], 16) + out_num_device_number = numDeviceNumber + break + + if out_num_device_number is None: + LOG.info(_("Device number not found for volume " + "%(volumename)s %(vol_instance)s.") % + {'volumename': volumename, + 'vol_instance': str(vol_instance.path)}) + else: + LOG.debug(_("Found device number %(device)d for volume " + "%(volumename)s %(vol_instance)s.") % + {'device': out_num_device_number, + 'volumename': volumename, + 'vol_instance': str(vol_instance.path)}) + + data = {'hostlunid': out_num_device_number, + 'storagesystem': storage_system, + 'owningsp': sp} + + LOG.debug(_("Device info: %(data)s.") % {'data': data}) + + return data + + def _find_device_masking_group(self): + """Finds the Device Masking Group in a masking view.""" + foundMaskingGroup = None + maskingview_name = self._get_masking_view() + + maskingviews = self.conn.EnumerateInstanceNames( + 'EMC_LunMaskingSCSIProtocolController') + for view in maskingviews: + instance = self.conn.GetInstance(view, LocalOnly=False) + if maskingview_name == instance['ElementName']: + foundView = view + break + + groups = self.conn.AssociatorNames( + foundView, + ResultClass='SE_DeviceMaskingGroup') + foundMaskingGroup = groups[0] + + LOG.debug(_("Masking view: %(view)s DeviceMaskingGroup: %(masking)s.") + % {'view': maskingview_name, + 'masking': str(foundMaskingGroup)}) + + return foundMaskingGroup + + # Find a StorageProcessorSystem given sp and storage system + def _find_storage_processor_system(self, owningsp, storage_system): + foundSystem = None + systems = self.conn.EnumerateInstanceNames( + 'EMC_StorageProcessorSystem') + for system in systems: + # Clar_StorageProcessorSystem.CreationClassName= + # "Clar_StorageProcessorSystem",Name="CLARiiON+APM00123907237+SP_A" + idarray = system['Name'].split('+') + if len(idarray) > 2: + storsystemname = idarray[0] + '+' + idarray[1] + sp = idarray[2] + + if (storage_system == storsystemname and + owningsp == sp): + foundSystem = system + LOG.debug(_("Found Storage Processor System: %s") + % (str(system))) + break + + return foundSystem + + # Find EMC_iSCSIProtocolEndpoint for the specified sp + def _find_iscsi_protocol_endpoints(self, owningsp, storage_system): + foundEndpoints = [] + + processor = self._find_storage_processor_system( + owningsp, + storage_system) + + associators = self.conn.Associators( + processor, + resultClass='EMC_iSCSIProtocolEndpoint') + for assoc in associators: + # Name = iqn.1992-04.com.emc:cx.apm00123907237.a8,t,0x0001 + # SystemName = CLARiiON+APM00123907237+SP_A+8 + arr = assoc['SystemName'].split('+') + if len(arr) > 2: + processor_name = arr[0] + '+' + arr[1] + '+' + arr[2] + if processor_name == processor['Name']: + arr2 = assoc['Name'].split(',') + if len(arr2) > 1: + foundEndpoints.append(arr2[0]) + + LOG.debug(_("iSCSIProtocolEndpoint for storage system " + "%(storage_system)s and SP %(sp)s is " + "%(endpoint)s.") + % {'storage_system': storage_system, + 'sp': owningsp, + 'endpoint': str(foundEndpoints)}) + return foundEndpoints + + def _getnum(self, num, datatype): + try: + result = { + '8': pywbem.Uint8(num), + '16': pywbem.Uint16(num), + '32': pywbem.Uint32(num), + '64': pywbem.Uint64(num) + } + result = result.get(datatype, num) + except NameError: + result = num + + return result + + # Find target WWNs + def get_target_wwns(self, storage_system, connector): + target_wwns = [] + + configservice = self._find_storage_hardwareid_service( + storage_system) + if configservice is None: + exception_msg = (_("Error finding Storage Hardware ID Service.")) + LOG.error(exception_msg) + raise exception.VolumeBackendAPIException(data=exception_msg) + + hardwareids = self._find_storage_hardwareids(connector) + + LOG.debug(_('EMCGetTargetEndpoints: Service: %(service)s ' + 'Storage HardwareIDs: %(hardwareids)s.') + % {'service': str(configservice), + 'hardwareids': str(hardwareids)}) + + for hardwareid in hardwareids: + rc, targetendpoints = self.conn.InvokeMethod( + 'EMCGetTargetEndpoints', + configservice, + HardwareId=hardwareid) + + if rc != 0L: + msg = (_('Error finding Target WWNs.')) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + endpoints = targetendpoints['TargetEndpoints'] + for targetendpoint in endpoints: + wwn = targetendpoint['Name'] + # Add target wwn to the list if it is not already there + if not any(d.get('wwn', None) == wwn for d in target_wwns): + target_wwns.append({'wwn': wwn}) + LOG.debug(_('Add target WWN: %s.') % wwn) + + LOG.debug(_('Target WWNs: %s.') % target_wwns) + + return target_wwns + + # Find Storage Hardware IDs + def _find_storage_hardwareids(self, connector): + foundInstances = [] + wwpns = self._find_initiator_names(connector) + hardwareids = self.conn.EnumerateInstances( + 'SE_StorageHardwareID') + for hardwareid in hardwareids: + storid = hardwareid['StorageID'] + for wwpn in wwpns: + if wwpn.lower() == storid.lower(): + foundInstances.append(hardwareid.path) + + LOG.debug(_("Storage Hardware IDs for %(wwpns)s is " + "%(foundInstances)s.") + % {'wwpns': str(wwpns), + 'foundInstances': str(foundInstances)}) + + return foundInstances diff --git a/cinder/volume/drivers/emc/emc_smis_iscsi.py b/cinder/volume/drivers/emc/emc_smis_iscsi.py new file mode 100644 index 0000000000..550d64ed4c --- /dev/null +++ b/cinder/volume/drivers/emc/emc_smis_iscsi.py @@ -0,0 +1,240 @@ +# Copyright (c) 2012 EMC Corporation. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +ISCSI Drivers for EMC VNX and VMAX arrays based on SMI-S. + +""" + + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.volume import driver +from cinder.volume.drivers.emc import emc_smis_common + +LOG = logging.getLogger(__name__) + + +class EMCSMISISCSIDriver(driver.ISCSIDriver): + """EMC ISCSI Drivers for VMAX and VNX using SMI-S.""" + + VERSION = "1.0.0" + + def __init__(self, *args, **kwargs): + + super(EMCSMISISCSIDriver, self).__init__(*args, **kwargs) + self.common =\ + emc_smis_common.EMCSMISCommon('iSCSI', + configuration=self.configuration) + + def check_for_setup_error(self): + pass + + def create_volume(self, volume): + """Creates a EMC(VMAX/VNX) volume.""" + self.common.create_volume(volume) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self.common.create_volume_from_snapshot(volume, snapshot) + + def create_cloned_volume(self, volume, src_vref): + """Creates a cloned volume.""" + self.common.create_cloned_volume(volume, src_vref) + + def delete_volume(self, volume): + """Deletes an EMC volume.""" + self.common.delete_volume(volume) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + self.common.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + self.common.delete_snapshot(snapshot) + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + pass + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + return self.common.create_export(context, volume) + + def remove_export(self, context, volume): + """Driver entry point to remove an export for a volume.""" + pass + + def check_for_export(self, context, volume_id): + """Make sure volume is exported.""" + pass + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info. + + The iscsi driver returns a driver_volume_type of 'iscsi'. + the format of the driver data is defined in _get_iscsi_properties. + Example return value:: + + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_portal': '127.0.0.0.1:3260', + 'volume_id': 1, + } + } + + """ + self.common.initialize_connection(volume, connector) + + iscsi_properties = self._get_iscsi_properties(volume) + return { + 'driver_volume_type': 'iscsi', + 'data': iscsi_properties + } + + def _do_iscsi_discovery(self, volume): + + LOG.warn(_("ISCSI provider_location not stored, using discovery")) + + (out, _err) = self._execute('iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', + self.configuration.iscsi_ip_address, + run_as_root=True) + targets = [] + for target in out.splitlines(): + targets.append(target) + + return targets + + def _get_iscsi_properties(self, volume): + """Gets iscsi configuration. + + We ideally get saved information in the volume entity, but fall back + to discovery if need be. Discovery may be completely removed in future + The properties are: + + :target_discovered: boolean indicating whether discovery was used + + :target_iqn: the IQN of the iSCSI target + + :target_portal: the portal of the iSCSI target + + :target_lun: the lun of the iSCSI target + + :volume_id: the id of the volume (currently used by xen) + + :auth_method:, :auth_username:, :auth_password: + + the authentication details. Right now, either auth_method is not + present meaning no authentication, or auth_method == `CHAP` + meaning use CHAP with the specified credentials. + """ + properties = {} + + location = self._do_iscsi_discovery(volume) + if not location: + raise exception.InvalidVolume(_("Could not find iSCSI export " + " for volume %s") % + (volume['name'])) + + LOG.debug(_("ISCSI Discovery: Found %s") % (location)) + properties['target_discovered'] = True + + device_info = self.common.find_device_number(volume) + if device_info is None or device_info['hostlunid'] is None: + exception_message = (_("Cannot find device number for volume %s") + % volume['name']) + raise exception.VolumeBackendAPIException(data=exception_message) + + device_number = device_info['hostlunid'] + storage_system = device_info['storagesystem'] + + # sp is "SP_A" or "SP_B" + sp = device_info['owningsp'] + endpoints = [] + if sp: + # endpoints example: + # [iqn.1992-04.com.emc:cx.apm00123907237.a8, + # iqn.1992-04.com.emc:cx.apm00123907237.a9] + endpoints = self.common._find_iscsi_protocol_endpoints( + sp, storage_system) + + foundEndpoint = False + for loc in location: + results = loc.split(" ") + properties['target_portal'] = results[0].split(",")[0] + properties['target_iqn'] = results[1] + # owning sp is None for VMAX + # for VNX, find the target_iqn that matches the endpoint + # target_iqn example: iqn.1992-04.com.emc:cx.apm00123907237.a8 + # or iqn.1992-04.com.emc:cx.apm00123907237.b8 + if not sp: + break + for endpoint in endpoints: + if properties['target_iqn'] == endpoint: + LOG.debug(_("Found iSCSI endpoint: %s") % endpoint) + foundEndpoint = True + break + if foundEndpoint: + break + + if sp and not foundEndpoint: + LOG.warn(_("ISCSI endpoint not found for SP %(sp)s on " + "storage system %(storage)s.") + % {'sp': sp, + 'storage': storage_system}) + + properties['target_lun'] = device_number + + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + LOG.debug(_("ISCSI properties: %s") % (properties)) + + return properties + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + self.common.terminate_connection(volume, connector) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self.update_volume_stats() + + return self._stats + + def update_volume_stats(self): + """Retrieve stats info from volume group.""" + LOG.debug(_("Updating volume stats")) + data = self.common.update_volume_stats() + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or 'EMCSMISISCSIDriver' + data['storage_protocol'] = 'iSCSI' + self._stats = data diff --git a/cinder/volume/drivers/eqlx.py b/cinder/volume/drivers/eqlx.py new file mode 100644 index 0000000000..b5e7fa4f2f --- /dev/null +++ b/cinder/volume/drivers/eqlx.py @@ -0,0 +1,466 @@ +# Copyright (c) 2013 Dell Inc. +# Copyright 2013 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Volume driver for Dell EqualLogic Storage.""" + +import functools +import random + +import eventlet +from eventlet import greenthread +import greenlet +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import utils +from cinder.volume.drivers.san import SanISCSIDriver + +LOG = logging.getLogger(__name__) + +eqlx_opts = [ + cfg.StrOpt('eqlx_group_name', + default='group-0', + help='Group name to use for creating volumes'), + cfg.IntOpt('eqlx_cli_timeout', + default=30, + help='Timeout for the Group Manager cli command execution'), + cfg.IntOpt('eqlx_cli_max_retries', + default=5, + help='Maximum retry count for reconnection'), + cfg.BoolOpt('eqlx_use_chap', + default=False, + help='Use CHAP authentication for targets?'), + cfg.StrOpt('eqlx_chap_login', + default='admin', + help='Existing CHAP account name'), + cfg.StrOpt('eqlx_chap_password', + default='password', + help='Password for specified CHAP account name', + secret=True), + cfg.StrOpt('eqlx_pool', + default='default', + help='Pool in which volumes will be created') +] + + +CONF = cfg.CONF +CONF.register_opts(eqlx_opts) + + +def with_timeout(f): + @functools.wraps(f) + def __inner(self, *args, **kwargs): + timeout = kwargs.pop('timeout', None) + gt = eventlet.spawn(f, self, *args, **kwargs) + if timeout is None: + return gt.wait() + else: + kill_thread = eventlet.spawn_after(timeout, gt.kill) + try: + res = gt.wait() + except greenlet.GreenletExit: + raise exception.VolumeBackendAPIException( + data="Command timed out") + else: + kill_thread.cancel() + return res + + return __inner + + +class DellEQLSanISCSIDriver(SanISCSIDriver): + """Implements commands for Dell EqualLogic SAN ISCSI management. + + To enable the driver add the following line to the cinder configuration: + volume_driver=cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver + + Driver's prerequisites are: + - a separate volume group set up and running on the SAN + - SSH access to the SAN + - a special user must be created which must be able to + - create/delete volumes and snapshots; + - clone snapshots into volumes; + - modify volume access records; + + The access credentials to the SAN are provided by means of the following + flags + san_ip= + san_login= + san_password= + san_private_key= + + Thin provision of volumes is enabled by default, to disable it use: + san_thin_provision=false + + In order to use target CHAP authentication (which is disabled by default) + SAN administrator must create a local CHAP user and specify the following + flags for the driver: + eqlx_use_chap=true + eqlx_chap_login= + eqlx_chap_password= + + eqlx_group_name parameter actually represents the CLI prompt message + without '>' ending. E.g. if prompt looks like 'group-0>', then the + parameter must be set to 'group-0' + + Also, the default CLI command execution timeout is 30 secs. Adjustable by + eqlx_cli_timeout= + """ + + VERSION = "1.0.0" + + def __init__(self, *args, **kwargs): + super(DellEQLSanISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(eqlx_opts) + self._group_ip = None + self.sshpool = None + + def _get_output(self, chan): + out = '' + ending = '%s> ' % self.configuration.eqlx_group_name + while not out.endswith(ending): + out += chan.recv(102400) + + LOG.debug(_("CLI output\n%s"), out) + return out.splitlines() + + def _get_prefixed_value(self, lines, prefix): + for line in lines: + if line.startswith(prefix): + return line[len(prefix):] + return + + @with_timeout + def _ssh_execute(self, ssh, command, *arg, **kwargs): + transport = ssh.get_transport() + chan = transport.open_session() + chan.invoke_shell() + + LOG.debug(_("Reading CLI MOTD")) + self._get_output(chan) + + cmd = 'stty columns 255' + LOG.debug(_("Setting CLI terminal width: '%s'"), cmd) + chan.send(cmd + '\r') + out = self._get_output(chan) + + LOG.debug(_("Sending CLI command: '%s'"), command) + chan.send(command + '\r') + out = self._get_output(chan) + + chan.close() + + if any(line.startswith(('% Error', 'Error:')) for line in out): + desc = _("Error executing EQL command") + cmdout = '\n'.join(out) + LOG.error(cmdout) + raise processutils.ProcessExecutionError( + stdout=cmdout, cmd=command, description=desc) + return out + + def _run_ssh(self, cmd_list, attempts=1): + utils.check_ssh_injection(cmd_list) + command = ' '. join(cmd_list) + + if not self.sshpool: + password = self.configuration.san_password + privatekey = self.configuration.san_private_key + min_size = self.configuration.ssh_min_pool_conn + max_size = self.configuration.ssh_max_pool_conn + self.sshpool = utils.SSHPool(self.configuration.san_ip, + self.configuration.san_ssh_port, + self.configuration.ssh_conn_timeout, + self.configuration.san_login, + password=password, + privatekey=privatekey, + min_size=min_size, + max_size=max_size) + try: + total_attempts = attempts + with self.sshpool.item() as ssh: + while attempts > 0: + attempts -= 1 + try: + LOG.info(_('EQL-driver: executing "%s"') % command) + return self._ssh_execute( + ssh, command, + timeout=self.configuration.eqlx_cli_timeout) + except processutils.ProcessExecutionError: + raise + except Exception as e: + LOG.exception(e) + greenthread.sleep(random.randint(20, 500) / 100.0) + msg = (_("SSH Command failed after '%(total_attempts)r' " + "attempts : '%(command)s'") % + {'total_attempts': total_attempts, 'command': command}) + raise exception.VolumeBackendAPIException(data=msg) + + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_("Error running SSH command: %s") % command) + + def _eql_execute(self, *args, **kwargs): + return self._run_ssh( + args, attempts=self.configuration.eqlx_cli_max_retries) + + def _get_volume_data(self, lines): + prefix = 'iSCSI target name is ' + target_name = self._get_prefixed_value(lines, prefix)[:-1] + lun_id = "%s:%s,1 %s 0" % (self._group_ip, '3260', target_name) + model_update = {} + model_update['provider_location'] = lun_id + if self.configuration.eqlx_use_chap: + model_update['provider_auth'] = 'CHAP %s %s' % \ + (self.configuration.eqlx_chap_login, + self.configuration.eqlx_chap_password) + return model_update + + def _get_space_in_gb(self, val): + scale = 1.0 + part = 'GB' + if val.endswith('MB'): + scale = 1.0 / 1024 + part = 'MB' + elif val.endswith('TB'): + scale = 1.0 * 1024 + part = 'TB' + return scale * float(val.partition(part)[0]) + + def _update_volume_stats(self): + """Retrieve stats info from eqlx group.""" + + LOG.debug(_("Updating volume stats")) + data = {} + backend_name = "eqlx" + if self.configuration: + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'eqlx' + data["vendor_name"] = 'Dell' + data["driver_version"] = self.VERSION + data["storage_protocol"] = 'iSCSI' + + data['reserved_percentage'] = 0 + data['QoS_support'] = False + + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + + for line in self._eql_execute('pool', 'select', + self.configuration.eqlx_pool, 'show'): + if line.startswith('TotalCapacity:'): + out_tup = line.rstrip().partition(' ') + data['total_capacity_gb'] = self._get_space_in_gb(out_tup[-1]) + if line.startswith('FreeSpace:'): + out_tup = line.rstrip().partition(' ') + data['free_capacity_gb'] = self._get_space_in_gb(out_tup[-1]) + + self._stats = data + + def _check_volume(self, volume): + """Check if the volume exists on the Array.""" + command = ['volume', 'select', volume['name'], 'show'] + try: + self._eql_execute(*command) + except processutils.ProcessExecutionError as err: + with excutils.save_and_reraise_exception(): + if err.stdout.find('does not exist.\n') > -1: + LOG.debug(_('Volume %s does not exist, ' + 'it may have already been deleted'), + volume['name']) + raise exception.VolumeNotFound(volume_id=volume['id']) + + def do_setup(self, context): + """Disable cli confirmation and tune output format.""" + try: + disabled_cli_features = ('confirmation', 'paging', 'events', + 'formatoutput') + for feature in disabled_cli_features: + self._eql_execute('cli-settings', feature, 'off') + + for line in self._eql_execute('grpparams', 'show'): + if line.startswith('Group-Ipaddress:'): + out_tup = line.rstrip().partition(' ') + self._group_ip = out_tup[-1] + + LOG.info(_("EQL-driver: Setup is complete, group IP is %s"), + self._group_ip) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to setup the Dell EqualLogic driver')) + + def create_volume(self, volume): + """Create a volume.""" + try: + cmd = ['volume', 'create', + volume['name'], "%sG" % (volume['size'])] + if self.configuration.eqlx_pool != 'default': + cmd.append('pool') + cmd.append(self.configuration.eqlx_pool) + if self.configuration.san_thin_provision: + cmd.append('thin-provision') + out = self._eql_execute(*cmd) + return self._get_volume_data(out) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to create volume %s'), volume['name']) + + def delete_volume(self, volume): + """Delete a volume.""" + try: + self._check_volume(volume) + self._eql_execute('volume', 'select', volume['name'], 'offline') + self._eql_execute('volume', 'delete', volume['name']) + except exception.VolumeNotFound: + LOG.warn(_('Volume %s was not found while trying to delete it'), + volume['name']) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to delete volume %s'), volume['name']) + + def create_snapshot(self, snapshot): + """"Create snapshot of existing volume on appliance.""" + try: + out = self._eql_execute('volume', 'select', + snapshot['volume_name'], + 'snapshot', 'create-now') + prefix = 'Snapshot name is ' + snap_name = self._get_prefixed_value(out, prefix) + self._eql_execute('volume', 'select', snapshot['volume_name'], + 'snapshot', 'rename', snap_name, + snapshot['name']) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to create snapshot of volume %s'), + snapshot['volume_name']) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create new volume from other volume's snapshot on appliance.""" + try: + out = self._eql_execute('volume', 'select', + snapshot['volume_name'], 'snapshot', + 'select', snapshot['name'], + 'clone', volume['name']) + return self._get_volume_data(out) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to create volume from snapshot %s'), + snapshot['name']) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + try: + src_volume_name = self.configuration.\ + volume_name_template % src_vref['id'] + out = self._eql_execute('volume', 'select', src_volume_name, + 'clone', volume['name']) + return self._get_volume_data(out) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to create clone of volume %s'), + volume['name']) + + def delete_snapshot(self, snapshot): + """Delete volume's snapshot.""" + try: + self._eql_execute('volume', 'select', snapshot['volume_name'], + 'snapshot', 'delete', snapshot['name']) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to delete snapshot %(snap)s of ' + 'volume %(vol)s'), + {'snap': snapshot['name'], + 'vol': snapshot['volume_name']}) + + def initialize_connection(self, volume, connector): + """Restrict access to a volume.""" + try: + cmd = ['volume', 'select', volume['name'], 'access', 'create', + 'initiator', connector['initiator']] + if self.configuration.eqlx_use_chap: + cmd.extend(['authmethod chap', 'username', + self.configuration.eqlx_chap_login]) + self._eql_execute(*cmd) + iscsi_properties = self._get_iscsi_properties(volume) + return { + 'driver_volume_type': 'iscsi', + 'data': iscsi_properties + } + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to initialize connection to volume %s'), + volume['name']) + + def terminate_connection(self, volume, connector, force=False, **kwargs): + """Remove access restrictions from a volume.""" + try: + self._eql_execute('volume', 'select', volume['name'], + 'access', 'delete', '1') + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to terminate connection to volume %s'), + volume['name']) + + def create_export(self, context, volume): + """Create an export of a volume. + + Driver has nothing to do here for the volume has been exported + already by the SAN, right after it's creation. + """ + pass + + def ensure_export(self, context, volume): + """Ensure an export of a volume. + + Driver has nothing to do here for the volume has been exported + already by the SAN, right after it's creation. We will just make + sure that the volume exists on the array and issue a warning. + """ + try: + self._check_volume(volume) + except exception.VolumeNotFound: + LOG.warn(_('Volume %s is not found!, it may have been deleted'), + volume['name']) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to ensure export of volume %s'), + volume['name']) + + def remove_export(self, context, volume): + """Remove an export of a volume. + + Driver has nothing to do here for the volume has been exported + already by the SAN, right after it's creation. + Nothing to remove since there's nothing exported. + """ + pass + + def extend_volume(self, volume, new_size): + """Extend the size of the volume.""" + try: + self._eql_execute('volume', 'select', volume['name'], + 'size', "%sG" % new_size) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to extend_volume %(name)s from ' + '%(current_size)sGB to %(new_size)sGB'), + {'name': volume['name'], + 'current_size': volume['size'], + 'new_size': new_size}) + + def local_path(self, volume): + raise NotImplementedError() diff --git a/cinder/volume/drivers/glusterfs.py b/cinder/volume/drivers/glusterfs.py new file mode 100644 index 0000000000..c738be5a7b --- /dev/null +++ b/cinder/volume/drivers/glusterfs.py @@ -0,0 +1,1129 @@ +# Copyright (c) 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno +import hashlib +import json +import os +import stat +import tempfile +import time + +from oslo.config import cfg + +from cinder import compute +from cinder import db +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder import units +from cinder import utils +from cinder.volume.drivers import nfs + +LOG = logging.getLogger(__name__) + +volume_opts = [ + cfg.StrOpt('glusterfs_shares_config', + default='/etc/cinder/glusterfs_shares', + help='File with the list of available gluster shares'), + cfg.BoolOpt('glusterfs_sparsed_volumes', + default=True, + help=('Create volumes as sparsed files which take no space.' + 'If set to False volume is created as regular file.' + 'In such case volume creation takes a lot of time.')), + cfg.BoolOpt('glusterfs_qcow2_volumes', + default=False, + help=('Create volumes as QCOW2 files rather than raw files.')), + cfg.StrOpt('glusterfs_mount_point_base', + default='$state_path/mnt', + help='Base dir containing mount points for gluster shares.'), +] + +CONF = cfg.CONF +CONF.register_opts(volume_opts) +CONF.import_opt('volume_name_template', 'cinder.db') + + +class GlusterfsDriver(nfs.RemoteFsDriver): + """Gluster based cinder driver. Creates file on Gluster share for using it + as block device on hypervisor. + + Operations such as create/delete/extend volume/snapshot use locking on a + per-process basis to prevent multiple threads from modifying qcow2 chains + or the snapshot .info file simultaneously. + """ + + driver_volume_type = 'glusterfs' + driver_prefix = 'glusterfs' + volume_backend_name = 'GlusterFS' + VERSION = '1.1.0' + + def __init__(self, *args, **kwargs): + super(GlusterfsDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(volume_opts) + self._nova = None + + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + super(GlusterfsDriver, self).do_setup(context) + + self._nova = compute.API() + + config = self.configuration.glusterfs_shares_config + if not config: + msg = (_("There's no Gluster config file configured (%s)") % + 'glusterfs_shares_config') + LOG.warn(msg) + raise exception.GlusterfsException(msg) + if not os.path.exists(config): + msg = (_("Gluster config file at %(config)s doesn't exist") % + {'config': config}) + LOG.warn(msg) + raise exception.GlusterfsException(msg) + + self.shares = {} + + try: + self._execute('mount.glusterfs', check_exit_code=False) + except OSError as exc: + if exc.errno == errno.ENOENT: + raise exception.GlusterfsException( + _('mount.glusterfs is not installed')) + else: + raise + + self._ensure_shares_mounted() + + def check_for_setup_error(self): + """Just to override parent behavior.""" + pass + + def _local_volume_dir(self, volume): + hashed = self._get_hash_str(volume['provider_location']) + path = '%s/%s' % (self.configuration.glusterfs_mount_point_base, + hashed) + return path + + def _local_path_volume(self, volume): + path_to_disk = '%s/%s' % ( + self._local_volume_dir(volume), + volume['name']) + + return path_to_disk + + def _local_path_volume_info(self, volume): + return '%s%s' % (self._local_path_volume(volume), '.info') + + def _qemu_img_info(self, path): + """Sanitize image_utils' qemu_img_info. + + This code expects to deal only with relative filenames. + """ + + info = image_utils.qemu_img_info(path) + if info.image: + info.image = os.path.basename(info.image) + if info.backing_file: + info.backing_file = os.path.basename(info.backing_file) + + return info + + def get_active_image_from_info(self, volume): + """Returns filename of the active image from the info file.""" + + info_file = self._local_path_volume_info(volume) + + snap_info = self._read_info_file(info_file, empty_if_missing=True) + + if snap_info == {}: + # No info file = no snapshots exist + vol_path = os.path.basename(self._local_path_volume(volume)) + return vol_path + + return snap_info['active'] + + @utils.synchronized('glusterfs', external=False) + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + + LOG.info(_('Cloning volume %(src)s to volume %(dst)s') % + {'src': src_vref['id'], + 'dst': volume['id']}) + + if src_vref['status'] != 'available': + msg = _("Volume status must be 'available'.") + raise exception.InvalidVolume(msg) + + volume_name = CONF.volume_name_template % volume['id'] + + volume_info = {'provider_location': src_vref['provider_location'], + 'size': src_vref['size'], + 'id': volume['id'], + 'name': volume_name, + 'status': src_vref['status']} + temp_snapshot = {'volume_name': volume_name, + 'size': src_vref['size'], + 'volume_size': src_vref['size'], + 'name': 'clone-snap-%s' % src_vref['id'], + 'volume_id': src_vref['id'], + 'id': 'tmp-snap-%s' % src_vref['id'], + 'volume': src_vref} + self._create_snapshot(temp_snapshot) + try: + self._copy_volume_from_snapshot(temp_snapshot, + volume_info, + src_vref['size']) + + finally: + self._delete_snapshot(temp_snapshot) + + return {'provider_location': src_vref['provider_location']} + + @utils.synchronized('glusterfs', external=False) + def create_volume(self, volume): + """Creates a volume.""" + + self._ensure_shares_mounted() + + volume['provider_location'] = self._find_share(volume['size']) + + LOG.info(_('casted to %s') % volume['provider_location']) + + self._do_create_volume(volume) + + return {'provider_location': volume['provider_location']} + + @utils.synchronized('glusterfs', external=False) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot. + + Snapshot must not be the active snapshot. (offline) + """ + + if snapshot['status'] != 'available': + msg = _('Snapshot status must be "available" to clone.') + raise exception.InvalidSnapshot(msg) + + self._ensure_shares_mounted() + + volume['provider_location'] = self._find_share(volume['size']) + + self._do_create_volume(volume) + + self._copy_volume_from_snapshot(snapshot, + volume, + snapshot['volume_size']) + + return {'provider_location': volume['provider_location']} + + def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): + """Copy data from snapshot to destination volume. + + This is done with a qemu-img convert to raw/qcow2 from the snapshot + qcow2. + """ + + LOG.debug(_("snapshot: %(snap)s, volume: %(vol)s, " + "volume_size: %(size)s") + % {'snap': snapshot['id'], + 'vol': volume['id'], + 'size': volume_size}) + + info_path = self._local_path_volume_info(snapshot['volume']) + snap_info = self._read_info_file(info_path) + vol_path = self._local_volume_dir(snapshot['volume']) + forward_file = snap_info[snapshot['id']] + forward_path = os.path.join(vol_path, forward_file) + + # Find the file which backs this file, which represents the point + # when this snapshot was created. + img_info = self._qemu_img_info(forward_path) + path_to_snap_img = os.path.join(vol_path, img_info.backing_file) + + path_to_new_vol = self._local_path_volume(volume) + + LOG.debug(_("will copy from snapshot at %s") % path_to_snap_img) + + if self.configuration.glusterfs_qcow2_volumes: + out_format = 'qcow2' + else: + out_format = 'raw' + + image_utils.convert_image(path_to_snap_img, + path_to_new_vol, + out_format) + + self._set_rw_permissions_for_all(path_to_new_vol) + + @utils.synchronized('glusterfs', external=False) + def delete_volume(self, volume): + """Deletes a logical volume.""" + + if not volume['provider_location']: + LOG.warn(_('Volume %s does not have provider_location specified, ' + 'skipping'), volume['name']) + return + + self._ensure_share_mounted(volume['provider_location']) + + mounted_path = self.local_path(volume) + + self._execute('rm', '-f', mounted_path, run_as_root=True) + + @utils.synchronized('glusterfs', external=False) + def create_snapshot(self, snapshot): + """Apply locking to the create snapshot operation.""" + + return self._create_snapshot(snapshot) + + def _create_snapshot(self, snapshot): + """Create a snapshot. + + If volume is attached, call to Nova to create snapshot, + providing a qcow2 file. + Otherwise, create locally with qemu-img. + + A file named volume-.info is stored with the volume + data and is a JSON table which contains a mapping between + Cinder snapshot UUIDs and filenames, as these associations + will change as snapshots are deleted. + + + Basic snapshot operation: + + 1. Initial volume file: + volume-1234 + + 2. Snapshot created: + volume-1234 <- volume-1234.aaaa + + volume-1234.aaaa becomes the new "active" disk image. + If the volume is not attached, this filename will be used to + attach the volume to a VM at volume-attach time. + If the volume is attached, the VM will switch to this file as + part of the snapshot process. + + Note that volume-1234.aaaa represents changes after snapshot + 'aaaa' was created. So the data for snapshot 'aaaa' is actually + in the backing file(s) of volume-1234.aaaa. + + This file has a qcow2 header recording the fact that volume-1234 is + its backing file. Delta changes since the snapshot was created are + stored in this file, and the backing file (volume-1234) does not + change. + + info file: { 'active': 'volume-1234.aaaa', + 'aaaa': 'volume-1234.aaaa' } + + 3. Second snapshot created: + volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb + + volume-1234.bbbb now becomes the "active" disk image, recording + changes made to the volume. + + info file: { 'active': 'volume-1234.bbbb', + 'aaaa': 'volume-1234.aaaa', + 'bbbb': 'volume-1234.bbbb' } + + 4. First snapshot deleted: + volume-1234 <- volume-1234.aaaa(* now with bbbb's data) + + volume-1234.aaaa is removed (logically) from the snapshot chain. + The data from volume-1234.bbbb is merged into it. + + (*) Since bbbb's data was committed into the aaaa file, we have + "removed" aaaa's snapshot point but the .aaaa file now + represents snapshot with id "bbbb". + + + info file: { 'active': 'volume-1234.bbbb', + 'bbbb': 'volume-1234.aaaa' (* changed!) + } + + 5. Second snapshot deleted: + volume-1234 + + volume-1234.bbbb is removed from the snapshot chain, as above. + The base image, volume-1234, becomes the active image for this + volume again. If in-use, the VM begins using the volume-1234.bbbb + file immediately as part of the snapshot delete process. + + info file: { 'active': 'volume-1234' } + + For the above operations, Cinder handles manipulation of qcow2 files + when the volume is detached. When attached, Cinder creates and deletes + qcow2 files, but Nova is responsible for transitioning the VM between + them and handling live transfers of data between files as required. + """ + + status = snapshot['volume']['status'] + if status not in ['available', 'in-use']: + msg = _('Volume status must be "available" or "in-use"' + ' for snapshot. (is %s)') % status + raise exception.InvalidVolume(msg) + + if status == 'in-use': + # Perform online snapshot via Nova + context = snapshot['context'] + + backing_filename = self.get_active_image_from_info( + snapshot['volume']) + path_to_disk = self._local_path_volume(snapshot['volume']) + new_snap_path = '%s.%s' % ( + self._local_path_volume(snapshot['volume']), + snapshot['id']) + + self._create_qcow2_snap_file(snapshot, + backing_filename, + new_snap_path) + + connection_info = { + 'type': 'qcow2', + 'new_file': os.path.basename(new_snap_path), + 'snapshot_id': snapshot['id'] + } + + try: + result = self._nova.create_volume_snapshot( + context, + snapshot['volume_id'], + connection_info) + LOG.debug(_('nova call result: %s') % result) + except Exception as e: + LOG.error(_('Call to Nova to create snapshot failed')) + LOG.exception(e) + raise e + + # Loop and wait for result + # Nova will call Cinderclient to update the status in the database + # An update of progress = '90%' means that Nova is done + seconds_elapsed = 0 + increment = 1 + timeout = 600 + while True: + s = db.snapshot_get(context, snapshot['id']) + + if s['status'] == 'creating': + if s['progress'] == '90%': + # Nova tasks completed successfully + break + + time.sleep(increment) + seconds_elapsed += increment + elif s['status'] == 'error': + + msg = _('Nova returned "error" status ' + 'while creating snapshot.') + raise exception.GlusterfsException(msg) + + LOG.debug(_('Status of snapshot %(id)s is now %(status)s') % { + 'id': snapshot['id'], + 'status': s['status'] + }) + + if 10 < seconds_elapsed <= 20: + increment = 2 + elif 20 < seconds_elapsed <= 60: + increment = 5 + elif 60 < seconds_elapsed: + increment = 10 + + if seconds_elapsed > timeout: + msg = _('Timed out while waiting for Nova update ' + 'for creation of snapshot %s.') % snapshot['id'] + raise exception.GlusterfsException(msg) + + info_path = self._local_path_volume(snapshot['volume']) + '.info' + snap_info = self._read_info_file(info_path, empty_if_missing=True) + snap_info['active'] = os.path.basename(new_snap_path) + snap_info[snapshot['id']] = os.path.basename(new_snap_path) + self._write_info_file(info_path, snap_info) + + return + + LOG.debug(_('create snapshot: %s') % snapshot) + LOG.debug(_('volume id: %s') % snapshot['volume_id']) + + path_to_disk = self._local_path_volume(snapshot['volume']) + self._create_snapshot_offline(snapshot, path_to_disk) + + def _create_qcow2_snap_file(self, snapshot, backing_filename, + new_snap_path): + """Create a QCOW2 file backed by another file. + + :param snapshot: snapshot reference + :param backing_filename: filename of file that will back the + new qcow2 file + :param new_snap_path: filename of new qcow2 file + """ + + backing_path_full_path = '%s/%s' % ( + self._local_volume_dir(snapshot['volume']), + backing_filename) + + command = ['qemu-img', 'create', '-f', 'qcow2', '-o', + 'backing_file=%s' % backing_path_full_path, new_snap_path] + self._execute(*command, run_as_root=True) + + info = self._qemu_img_info(backing_path_full_path) + backing_fmt = info.file_format + + command = ['qemu-img', 'rebase', '-u', + '-b', backing_filename, + '-F', backing_fmt, + new_snap_path] + self._execute(*command, run_as_root=True) + + def _create_snapshot_offline(self, snapshot, path_to_disk): + """Create snapshot (offline case).""" + + # Requires volume status = 'available' + + new_snap_path = '%s.%s' % (path_to_disk, snapshot['id']) + + backing_filename = self.get_active_image_from_info(snapshot['volume']) + + self._create_qcow2_snap_file(snapshot, + backing_filename, + new_snap_path) + + # Update info file + + info_path = self._local_path_volume_info(snapshot['volume']) + snap_info = self._read_info_file(info_path, + empty_if_missing=True) + + snap_info['active'] = os.path.basename(new_snap_path) + snap_info[snapshot['id']] = os.path.basename(new_snap_path) + self._write_info_file(info_path, snap_info) + + def _read_file(self, filename): + """This method is to make it easier to stub out code for testing. + + Returns a string representing the contents of the file. + """ + + with open(filename, 'r') as f: + return f.read() + + def _read_info_file(self, info_path, empty_if_missing=False): + """Return dict of snapshot information.""" + + if not os.path.exists(info_path): + if empty_if_missing is True: + return {} + + return json.loads(self._read_file(info_path)) + + def _write_info_file(self, info_path, snap_info): + if 'active' not in snap_info.keys(): + msg = _("'active' must be present when writing snap_info.") + raise exception.GlusterfsException(msg) + + with open(info_path, 'w') as f: + json.dump(snap_info, f, indent=1, sort_keys=True) + + def _get_matching_backing_file(self, backing_chain, snapshot_file): + return next(f for f in backing_chain + if f.get('backing-filename', '') == snapshot_file) + + @utils.synchronized('glusterfs', external=False) + def delete_snapshot(self, snapshot): + """Apply locking to the delete snapshot operation.""" + self._delete_snapshot(snapshot) + + def _delete_snapshot(self, snapshot): + """Delete a snapshot. + + If volume status is 'available', delete snapshot here in Cinder + using qemu-img. + + If volume status is 'in-use', calculate what qcow2 files need to + merge, and call to Nova to perform this operation. + + :raises: InvalidVolume if status not acceptable + :raises: GlusterfsException(msg) if operation fails + :returns: None + + """ + + LOG.debug(_('deleting snapshot %s') % snapshot['id']) + + volume_status = snapshot['volume']['status'] + if volume_status not in ['available', 'in-use']: + msg = _('Volume status must be "available" or "in-use".') + raise exception.InvalidVolume(msg) + + self._ensure_share_writable( + self._local_volume_dir(snapshot['volume'])) + + # Determine the true snapshot file for this snapshot + # based on the .info file + info_path = self._local_path_volume(snapshot['volume']) + '.info' + snap_info = self._read_info_file(info_path, empty_if_missing=True) + + if snapshot['id'] not in snap_info: + # If snapshot info file is present, but snapshot record does not + # exist, do not attempt to delete. + # (This happens, for example, if snapshot_create failed due to lack + # of permission to write to the share.) + LOG.info(_('Snapshot record for %s is not present, allowing ' + 'snapshot_delete to proceed.') % snapshot['id']) + return + + snapshot_file = snap_info[snapshot['id']] + LOG.debug(_('snapshot_file for this snap is %s') % snapshot_file) + + snapshot_path = '%s/%s' % (self._local_volume_dir(snapshot['volume']), + snapshot_file) + + snapshot_path_img_info = self._qemu_img_info(snapshot_path) + + vol_path = self._local_volume_dir(snapshot['volume']) + + # Find what file has this as its backing file + active_file = self.get_active_image_from_info(snapshot['volume']) + active_file_path = '%s/%s' % (vol_path, active_file) + + if volume_status == 'in-use': + # Online delete + context = snapshot['context'] + + base_file = snapshot_path_img_info.backing_file + if base_file is None: + # There should always be at least the original volume + # file as base. + msg = _('No base file found for %s.') % snapshot_path + raise exception.GlusterfsException(msg) + + base_path = os.path.join( + self._local_volume_dir(snapshot['volume']), base_file) + base_file_img_info = self._qemu_img_info(base_path) + new_base_file = base_file_img_info.backing_file + + base_id = None + info_path = self._local_path_volume(snapshot['volume']) + '.info' + snap_info = self._read_info_file(info_path) + for key, value in snap_info.iteritems(): + if value == base_file and key != 'active': + base_id = key + break + if base_id is None: + # This means we are deleting the oldest snapshot + msg = _('No %(base_id)s found for %(file)s') % { + 'base_id': 'base_id', + 'file': snapshot_file} + LOG.debug(msg) + + online_delete_info = { + 'active_file': active_file, + 'snapshot_file': snapshot_file, + 'base_file': base_file, + 'base_id': base_id, + 'new_base_file': new_base_file + } + + return self._delete_snapshot_online(context, + snapshot, + online_delete_info) + + if snapshot_file == active_file: + # Need to merge snapshot_file into its backing file + # There is no top file + # T0 | T1 | + # base | snapshot_file | None + # (guaranteed to| (being deleted) | + # exist) | | + + base_file = snapshot_path_img_info.backing_file + + self._qemu_img_commit(snapshot_path) + self._execute('rm', '-f', snapshot_path, run_as_root=True) + + # Remove snapshot_file from info + info_path = self._local_path_volume(snapshot['volume']) + '.info' + snap_info = self._read_info_file(info_path) + + del(snap_info[snapshot['id']]) + # Active file has changed + snap_info['active'] = base_file + self._write_info_file(info_path, snap_info) + else: + # T0 | T1 | T2 | T3 + # base | snapshot_file | higher_file | highest_file + #(guaranteed to | (being deleted)|(guaranteed to | (may exist, + # exist, not | | exist, being |needs ptr update + # used here) | | committed down)| if so) + + backing_chain = self._get_backing_chain_for_path( + snapshot['volume'], active_file_path) + # This file is guaranteed to exist since we aren't operating on + # the active file. + higher_file = next((os.path.basename(f['filename']) + for f in backing_chain + if f.get('backing-filename', '') == + snapshot_file), + None) + if higher_file is None: + msg = _('No file found with %s as backing file.') %\ + snapshot_file + raise exception.GlusterfsException(msg) + + snap_info = self._read_info_file(info_path) + higher_id = next((i for i in snap_info + if snap_info[i] == higher_file + and i != 'active'), + None) + if higher_id is None: + msg = _('No snap found with %s as backing file.') %\ + higher_file + raise exception.GlusterfsException(msg) + + # Is there a file depending on higher_file? + highest_file = next((os.path.basename(f['filename']) + for f in backing_chain + if f.get('backing-filename', '') == + higher_file), + None) + if highest_file is None: + msg = _('No file depends on %s.') % higher_file + LOG.debug(msg) + + # Committing higher_file into snapshot_file + # And update pointer in highest_file + higher_file_path = '%s/%s' % (vol_path, higher_file) + self._qemu_img_commit(higher_file_path) + if highest_file is not None: + highest_file_path = '%s/%s' % (vol_path, highest_file) + info = self._qemu_img_info(snapshot_path) + snapshot_file_fmt = info.file_format + + backing_fmt = ('-F', snapshot_file_fmt) + self._execute('qemu-img', 'rebase', '-u', + '-b', snapshot_file, + highest_file_path, *backing_fmt, + run_as_root=True) + self._execute('rm', '-f', higher_file_path, run_as_root=True) + + # Remove snapshot_file from info + info_path = self._local_path_volume(snapshot['volume']) + '.info' + snap_info = self._read_info_file(info_path) + del(snap_info[snapshot['id']]) + snap_info[higher_id] = snapshot_file + if higher_file == active_file: + if highest_file is not None: + msg = _('Check condition failed: ' + '%s expected to be None.') % 'highest_file' + raise exception.GlusterfsException(msg) + # Active file has changed + snap_info['active'] = snapshot_file + self._write_info_file(info_path, snap_info) + + def _delete_snapshot_online(self, context, snapshot, info): + # Update info over the course of this method + # active file never changes + info_path = self._local_path_volume(snapshot['volume']) + '.info' + snap_info = self._read_info_file(info_path) + + if info['active_file'] == info['snapshot_file']: + # blockRebase/Pull base into active + # info['base'] => snapshot_file + + file_to_delete = info['base_file'] + if info['base_id'] is None: + # Passing base=none to blockRebase ensures that + # libvirt blanks out the qcow2 backing file pointer + new_base = None + else: + new_base = info['new_base_file'] + snap_info[info['base_id']] = info['snapshot_file'] + + delete_info = {'file_to_merge': new_base, + 'merge_target_file': None, # current + 'type': 'qcow2', + 'volume_id': snapshot['volume']['id']} + + del(snap_info[snapshot['id']]) + else: + # blockCommit snapshot into base + # info['base'] <= snapshot_file + # delete record of snapshot + file_to_delete = info['snapshot_file'] + + delete_info = {'file_to_merge': info['snapshot_file'], + 'merge_target_file': info['base_file'], + 'type': 'qcow2', + 'volume_id': snapshot['volume']['id']} + + del(snap_info[snapshot['id']]) + + try: + self._nova.delete_volume_snapshot( + context, + snapshot['id'], + delete_info) + except Exception as e: + LOG.error(_('Call to Nova delete snapshot failed')) + LOG.exception(e) + raise e + + # Loop and wait for result + # Nova will call Cinderclient to update the status in the database + # An update of progress = '90%' means that Nova is done + seconds_elapsed = 0 + increment = 1 + timeout = 600 + while True: + s = db.snapshot_get(context, snapshot['id']) + + if s['status'] == 'deleting': + if s['progress'] == '90%': + # Nova tasks completed successfully + break + else: + msg = _('status of snapshot %s is ' + 'still "deleting"... waiting') % snapshot['id'] + LOG.debug(msg) + time.sleep(increment) + seconds_elapsed += increment + else: + msg = _('Unable to delete snapshot %(id)s, ' + 'status: %(status)s.') % {'id': snapshot['id'], + 'status': s['status']} + raise exception.GlusterfsException(msg) + + if 10 < seconds_elapsed <= 20: + increment = 2 + elif 20 < seconds_elapsed <= 60: + increment = 5 + elif 60 < seconds_elapsed: + increment = 10 + + if seconds_elapsed > timeout: + msg = _('Timed out while waiting for Nova update ' + 'for deletion of snapshot %(id)s.') %\ + {'id': snapshot['id']} + raise exception.GlusterfsException(msg) + + # Write info file updated above + self._write_info_file(info_path, snap_info) + + # Delete stale file + path_to_delete = os.path.join( + self._local_volume_dir(snapshot['volume']), file_to_delete) + self._execute('rm', '-f', path_to_delete, run_as_root=True) + + def _get_backing_chain_for_path(self, volume, path): + """Returns list of dicts containing backing-chain information. + + Includes 'filename', and 'backing-filename' for each + applicable entry. + + Consider converting this to use --backing-chain and --output=json + when environment supports qemu-img 1.5.0. + + :param volume: volume reference + :param path: path to image file at top of chain + + """ + + output = [] + + info = self._qemu_img_info(path) + new_info = {} + new_info['filename'] = os.path.basename(path) + new_info['backing-filename'] = info.backing_file + + output.append(new_info) + + while new_info['backing-filename']: + filename = new_info['backing-filename'] + path = os.path.join(self._local_volume_dir(volume), filename) + info = self._qemu_img_info(path) + backing_filename = info.backing_file + new_info = {} + new_info['filename'] = filename + new_info['backing-filename'] = backing_filename + + output.append(new_info) + + return output + + def _qemu_img_commit(self, path): + return self._execute('qemu-img', 'commit', path, run_as_root=True) + + def ensure_export(self, ctx, volume): + """Synchronously recreates an export for a logical volume.""" + + self._ensure_share_mounted(volume['provider_location']) + + def create_export(self, ctx, volume): + """Exports the volume.""" + + pass + + def remove_export(self, ctx, volume): + """Removes an export for a logical volume.""" + + pass + + def validate_connector(self, connector): + pass + + @utils.synchronized('glusterfs', external=False) + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + + # Find active qcow2 file + active_file = self.get_active_image_from_info(volume) + path = '%s/%s/%s' % (self.configuration.glusterfs_mount_point_base, + self._get_hash_str(volume['provider_location']), + active_file) + + data = {'export': volume['provider_location'], + 'name': active_file} + if volume['provider_location'] in self.shares: + data['options'] = self.shares[volume['provider_location']] + + # Test file for raw vs. qcow2 format + info = self._qemu_img_info(path) + data['format'] = info.file_format + if data['format'] not in ['raw', 'qcow2']: + msg = _('%s must be a valid raw or qcow2 image.') % path + raise exception.InvalidVolume(msg) + + return { + 'driver_volume_type': 'glusterfs', + 'data': data + } + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + pass + + @utils.synchronized('glusterfs', external=False) + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + + # If snapshots exist, flatten to a temporary image, and upload it + + active_file = self.get_active_image_from_info(volume) + active_file_path = '%s/%s' % (self._local_volume_dir(volume), + active_file) + info = self._qemu_img_info(active_file_path) + backing_file = info.backing_file + if backing_file: + snapshots_exist = True + else: + snapshots_exist = False + + root_file_fmt = info.file_format + + temp_path = None + + try: + if snapshots_exist or (root_file_fmt != 'raw'): + # Convert due to snapshots + # or volume data not being stored in raw format + # (upload_volume assumes raw format input) + temp_path = '%s/%s.temp_image.%s' % ( + self._local_volume_dir(volume), + volume['id'], + image_meta['id']) + + image_utils.convert_image(active_file_path, temp_path, 'raw') + upload_path = temp_path + else: + upload_path = active_file_path + + image_utils.upload_volume(context, + image_service, + image_meta, + upload_path) + finally: + if temp_path is not None: + self._execute('rm', '-f', temp_path) + + @utils.synchronized('glusterfs', external=False) + def extend_volume(self, volume, size_gb): + volume_path = self.local_path(volume) + volume_filename = os.path.basename(volume_path) + + # Ensure no snapshots exist for the volume + active_image = self.get_active_image_from_info(volume) + if volume_filename != active_image: + msg = _('Extend volume is only supported for this' + ' driver when no snapshots exist.') + raise exception.InvalidVolume(msg) + + info = self._qemu_img_info(volume_path) + backing_fmt = info.file_format + + if backing_fmt not in ['raw', 'qcow2']: + msg = _('Unrecognized backing format: %s') + raise exception.InvalidVolume(msg % backing_fmt) + + # qemu-img can resize both raw and qcow2 files + image_utils.resize_image(volume_path, size_gb) + + def _do_create_volume(self, volume): + """Create a volume on given glusterfs_share. + + :param volume: volume reference + """ + + volume_path = self.local_path(volume) + volume_size = volume['size'] + + LOG.debug(_("creating new volume at %s") % volume_path) + + if os.path.exists(volume_path): + msg = _('file already exists at %s') % volume_path + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + + if self.configuration.glusterfs_qcow2_volumes: + self._create_qcow2_file(volume_path, volume_size) + else: + if self.configuration.glusterfs_sparsed_volumes: + self._create_sparsed_file(volume_path, volume_size) + else: + self._create_regular_file(volume_path, volume_size) + + self._set_rw_permissions_for_all(volume_path) + + def _ensure_shares_mounted(self): + """Mount all configured GlusterFS shares.""" + + self._mounted_shares = [] + + self._load_shares_config(self.configuration.glusterfs_shares_config) + + for share in self.shares.keys(): + try: + self._ensure_share_mounted(share) + self._mounted_shares.append(share) + except Exception as exc: + LOG.warning(_('Exception during mounting %s') % (exc,)) + + LOG.debug(_('Available shares: %s') % str(self._mounted_shares)) + + def _ensure_share_writable(self, path): + """Ensure that the Cinder user can write to the share. + + If not, raise an exception. + + :param path: path to test + :raises: GlusterfsException + :returns: None + """ + + prefix = '.cinder-write-test-' + str(os.getpid()) + '-' + + try: + tempfile.NamedTemporaryFile(prefix=prefix, dir=path) + except OSError: + msg = _('GlusterFS share at %(dir)s is not writable by the ' + 'Cinder volume service. Snapshot operations will not be ' + 'supported.') % {'dir': path} + raise exception.GlusterfsException(msg) + + def _ensure_share_mounted(self, glusterfs_share): + """Mount GlusterFS share. + :param glusterfs_share: string + """ + mount_path = self._get_mount_point_for_share(glusterfs_share) + self._mount_glusterfs(glusterfs_share, mount_path, ensure=True) + + # Ensure we can write to this share + group_id = os.getegid() + current_group_id = utils.get_file_gid(mount_path) + current_mode = utils.get_file_mode(mount_path) + + if group_id != current_group_id: + cmd = ['chgrp', group_id, mount_path] + self._execute(*cmd, run_as_root=True) + + if not (current_mode & stat.S_IWGRP): + cmd = ['chmod', 'g+w', mount_path] + self._execute(*cmd, run_as_root=True) + + self._ensure_share_writable(mount_path) + + def _find_share(self, volume_size_for): + """Choose GlusterFS share among available ones for given volume size. + Current implementation looks for greatest capacity. + :param volume_size_for: int size in GB + """ + + if not self._mounted_shares: + raise exception.GlusterfsNoSharesMounted() + + greatest_size = 0 + greatest_share = None + + for glusterfs_share in self._mounted_shares: + capacity = self._get_available_capacity(glusterfs_share)[0] + if capacity > greatest_size: + greatest_share = glusterfs_share + greatest_size = capacity + + if volume_size_for * units.GiB > greatest_size: + raise exception.GlusterfsNoSuitableShareFound( + volume_size=volume_size_for) + return greatest_share + + def _get_hash_str(self, base_str): + """Return a string that represents hash of base_str + (in a hex format). + """ + return hashlib.md5(base_str).hexdigest() + + def _get_mount_point_for_share(self, glusterfs_share): + """Return mount point for share. + :param glusterfs_share: example 172.18.194.100:/var/glusterfs + """ + return os.path.join(self.configuration.glusterfs_mount_point_base, + self._get_hash_str(glusterfs_share)) + + def _get_available_capacity(self, glusterfs_share): + """Calculate available space on the GlusterFS share. + :param glusterfs_share: example 172.18.194.100:/var/glusterfs + """ + mount_point = self._get_mount_point_for_share(glusterfs_share) + + out, _ = self._execute('df', '--portability', '--block-size', '1', + mount_point, run_as_root=True) + out = out.splitlines()[1] + + size = int(out.split()[1]) + available = int(out.split()[3]) + + return available, size + + def _get_capacity_info(self, glusterfs_share): + available, size = self._get_available_capacity(glusterfs_share) + return size, available, size - available + + def _mount_glusterfs(self, glusterfs_share, mount_path, ensure=False): + """Mount GlusterFS share to mount path.""" + self._execute('mkdir', '-p', mount_path) + + command = ['mount', '-t', 'glusterfs', glusterfs_share, + mount_path] + if self.shares.get(glusterfs_share) is not None: + command.extend(self.shares[glusterfs_share].split()) + + self._do_mount(command, ensure, glusterfs_share) diff --git a/cinder/volume/drivers/gpfs.py b/cinder/volume/drivers/gpfs.py new file mode 100644 index 0000000000..e965b8df32 --- /dev/null +++ b/cinder/volume/drivers/gpfs.py @@ -0,0 +1,640 @@ +# Copyright IBM Corp. 2013 All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +GPFS Volume Driver. + +""" +import math +import os +import re +import shutil + +from oslo.config import cfg + +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import units +from cinder.volume import driver + +GPFS_CLONE_MIN_RELEASE = 1200 + +LOG = logging.getLogger(__name__) + +gpfs_opts = [ + cfg.StrOpt('gpfs_mount_point_base', + default=None, + help='Specifies the path of the GPFS directory where Block ' + 'Storage volume and snapshot files are stored.'), + cfg.StrOpt('gpfs_images_dir', + default=None, + help='Specifies the path of the Image service repository in ' + 'GPFS. Leave undefined if not storing images in GPFS.'), + cfg.StrOpt('gpfs_images_share_mode', + default=None, + help='Specifies the type of image copy to be used. Set this ' + 'when the Image service repository also uses GPFS so ' + 'that image files can be transferred efficiently from ' + 'the Image service to the Block Storage service. There ' + 'are two valid values: "copy" specifies that a full copy ' + 'of the image is made; "copy_on_write" specifies that ' + 'copy-on-write optimization strategy is used and ' + 'unmodified blocks of the image file are shared ' + 'efficiently.'), + cfg.IntOpt('gpfs_max_clone_depth', + default=0, + help='Specifies an upper limit on the number of indirections ' + 'required to reach a specific block due to snapshots or ' + 'clones. A lengthy chain of copy-on-write snapshots or ' + 'clones can have a negative impact on performance, but ' + 'improves space utilization. 0 indicates unlimited ' + 'clone depth.'), + cfg.BoolOpt('gpfs_sparse_volumes', + default=True, + help=('Specifies that volumes are created as sparse files ' + 'which initially consume no space. If set to False, the ' + 'volume is created as a fully allocated file, in which ' + 'case, creation may take a significantly longer time.')), +] +CONF = cfg.CONF +CONF.register_opts(gpfs_opts) + + +class GPFSDriver(driver.VolumeDriver): + + """Implements volume functions using GPFS primitives.""" + + VERSION = "1.0.0" + + def __init__(self, *args, **kwargs): + super(GPFSDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(gpfs_opts) + + def _get_gpfs_state(self): + (out, _) = self._execute('mmgetstate', '-Y', run_as_root=True) + return out + + def _check_gpfs_state(self): + out = self._get_gpfs_state() + lines = out.splitlines() + state_token = lines[0].split(':').index('state') + gpfs_state = lines[1].split(':')[state_token] + if gpfs_state != 'active': + LOG.error(_('GPFS is not active. Detailed output: %s') % out) + exception_message = (_("GPFS is not running - state: %s") % + gpfs_state) + raise exception.VolumeBackendAPIException(data=exception_message) + + def _get_filesystem_from_path(self, path): + (out, _) = self._execute('df', path, run_as_root=True) + lines = out.splitlines() + fs = lines[1].split()[0] + return fs + + def _get_gpfs_filesystem_release_level(self, path): + fs = self._get_filesystem_from_path(path) + (out, _) = self._execute('mmlsfs', fs, '-V', '-Y', + run_as_root=True) + lines = out.splitlines() + value_token = lines[0].split(':').index('data') + fs_release_level_str = lines[1].split(':')[value_token] + # at this point, release string looks like "13.23 (3.5.0.7)" + # extract first token and convert to whole number value + fs_release_level = int(float(fs_release_level_str.split()[0]) * 100) + return fs, fs_release_level + + def _get_gpfs_cluster_release_level(self): + (out, _) = self._execute('mmlsconfig', 'minreleaseLeveldaemon', '-Y', + run_as_root=True) + lines = out.splitlines() + value_token = lines[0].split(':').index('value') + min_release_level = lines[1].split(':')[value_token] + return int(min_release_level) + + def _is_gpfs_path(self, directory): + self._execute('mmlsattr', directory, run_as_root=True) + + def _is_samefs(self, p1, p2): + if os.lstat(p1).st_dev == os.lstat(p2).st_dev: + return True + return False + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_gpfs_state() + + if(self.configuration.gpfs_mount_point_base is None): + msg = _('Option gpfs_mount_point_base is not set correctly.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if(self.configuration.gpfs_images_share_mode and + self.configuration.gpfs_images_share_mode not in ['copy_on_write', + 'copy']): + msg = _('Option gpfs_images_share_mode is not set correctly.') + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if(self.configuration.gpfs_images_share_mode and + self.configuration.gpfs_images_dir is None): + msg = _('Option gpfs_images_dir is not set correctly.') + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and + not self._is_samefs(self.configuration.gpfs_mount_point_base, + self.configuration.gpfs_images_dir)): + msg = (_('gpfs_images_share_mode is set to copy_on_write, but ' + '%(vol)s and %(img)s belong to different file systems') % + {'vol': self.configuration.gpfs_mount_point_base, + 'img': self.configuration.gpfs_images_dir}) + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + _gpfs_cluster_release_level = self._get_gpfs_cluster_release_level() + if not _gpfs_cluster_release_level >= GPFS_CLONE_MIN_RELEASE: + msg = (_('Downlevel GPFS Cluster Detected. GPFS Clone feature ' + 'not enabled in cluster daemon level %(cur)s - must ' + 'be at least at level %(min)s.') % + {'cur': _gpfs_cluster_release_level, + 'min': GPFS_CLONE_MIN_RELEASE}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + for directory in [self.configuration.gpfs_mount_point_base, + self.configuration.gpfs_images_dir]: + if directory is None: + continue + + if not directory.startswith('/'): + msg = (_('%s must be an absolute path.') % directory) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not os.path.isdir(directory): + msg = (_('%s is not a directory.') % directory) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Check if GPFS is mounted + self._verify_gpfs_path_state(directory) + + fs, fslevel = self._get_gpfs_filesystem_release_level(directory) + if not fslevel >= GPFS_CLONE_MIN_RELEASE: + msg = (_('The GPFS filesystem %(fs)s is not at the required ' + 'release level. Current level is %(cur)s, must be ' + 'at least %(min)s.') % + {'fs': fs, + 'cur': fslevel, + 'min': GPFS_CLONE_MIN_RELEASE}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _create_sparse_file(self, path, size): + """Creates file with 0 disk usage.""" + + sizestr = self._sizestr(size) + self._execute('truncate', '-s', sizestr, path, run_as_root=True) + self._execute('chmod', '666', path, run_as_root=True) + + def _allocate_file_blocks(self, path, size): + """Preallocate file blocks by writing zeros.""" + + block_size_mb = 1 + block_count = size * units.GiB / (block_size_mb * units.MiB) + + self._execute('dd', 'if=/dev/zero', 'of=%s' % path, + 'bs=%dM' % block_size_mb, + 'count=%d' % block_count, + run_as_root=True) + + def _gpfs_change_attributes(self, options, path): + cmd = ['mmchattr'] + cmd.extend(options) + cmd.append(path) + self._execute(*cmd, run_as_root=True) + + def _set_volume_attributes(self, path, metadata): + """Set various GPFS attributes for this volume.""" + + options = [] + for item in metadata: + if item['key'] == 'data_pool_name': + options.extend(['-P', item['value']]) + elif item['key'] == 'replicas': + options.extend(['-r', item['value'], '-m', item['value']]) + elif item['key'] == 'dio': + options.extend(['-D', item['value']]) + elif item['key'] == 'write_affinity_depth': + options.extend(['--write-affinity-depth', item['value']]) + elif item['key'] == 'block_group_factor': + options.extend(['--block-group-factor', item['value']]) + elif item['key'] == 'write_affinity_failure_group': + options.extend(['--write-affinity-failure-group', + item['value']]) + + if options: + self._gpfs_change_attributes(options, path) + + def create_volume(self, volume): + """Creates a GPFS volume.""" + # Check if GPFS is mounted + self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) + + volume_path = self.local_path(volume) + volume_size = volume['size'] + + # Create a sparse file first; allocate blocks later if requested + self._create_sparse_file(volume_path, volume_size) + + # Set the attributes prior to allocating any blocks so that + # they are allocated according to the policy + v_metadata = volume.get('volume_metadata') + self._set_volume_attributes(volume_path, v_metadata) + + if not self.configuration.gpfs_sparse_volumes: + self._allocate_file_blocks(volume_path, volume_size) + + fstype = None + fslabel = None + for item in v_metadata: + if item['key'] == 'fstype': + fstype = item['value'] + elif item['key'] == 'fslabel': + fslabel = item['value'] + if fstype: + self._mkfs(volume, fstype, fslabel) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a GPFS volume from a snapshot.""" + volume_path = self.local_path(volume) + snapshot_path = self.local_path(snapshot) + self._create_gpfs_copy(src=snapshot_path, dest=volume_path) + self._gpfs_redirect(volume_path) + virt_size = self._resize_volume_file(volume, volume['size']) + return {'size': math.ceil(virt_size / units.GiB)} + + def create_cloned_volume(self, volume, src_vref): + src = self.local_path(src_vref) + dest = self.local_path(volume) + self._create_gpfs_clone(src, dest) + virt_size = self._resize_volume_file(volume, volume['size']) + return {'size': math.ceil(virt_size / units.GiB)} + + def _delete_gpfs_file(self, fchild): + if not os.path.exists(fchild): + return + (out, err) = self._execute('mmclone', 'show', fchild, run_as_root=True) + fparent = None + reInode = re.compile( + '.*\s+(?:yes|no)\s+\d+\s+(?P\d+)', re.M | re.S) + match = reInode.match(out) + if match: + inode = match.group('inode') + path = os.path.dirname(fchild) + (out, err) = self._execute('find', path, '-maxdepth', '1', + '-inum', inode, run_as_root=True) + if out: + fparent = out.split('\n', 1)[0] + self._execute( + 'rm', '-f', fchild, check_exit_code=False, run_as_root=True) + + # There is no need to check for volume references on this snapshot + # because 'rm -f' itself serves as a simple and implicit check. If the + # parent is referenced by another volume, GPFS doesn't allow deleting + # it. 'rm -f' silently fails and the subsequent check on the path + # indicates whether there are any volumes derived from that snapshot. + # If there are such volumes, we quit recursion and let the other + # volumes delete the snapshot later. If there are no references, rm + # would succeed and the snapshot is deleted. + if not os.path.exists(fchild) and fparent: + fpbase = os.path.basename(fparent) + if (fpbase.endswith('.snap') or fpbase.endswith('.ts')): + self._delete_gpfs_file(fparent) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + # Check if GPFS is mounted + self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) + + volume_path = self.local_path(volume) + self._delete_gpfs_file(volume_path) + + def _gpfs_redirect(self, src): + """Removes the copy_on_write dependency between src and parent. + + Remove the copy_on_write dependency between the src file and its + immediate parent such that the length of dependency chain is reduced + by 1. + """ + max_depth = self.configuration.gpfs_max_clone_depth + if max_depth == 0: + return False + (out, err) = self._execute('mmclone', 'show', src, run_as_root=True) + reDepth = re.compile('.*\s+no\s+(?P\d+)', re.M | re.S) + match = reDepth.match(out) + if match: + depth = int(match.group('depth')) + if depth > max_depth: + self._execute('mmclone', 'redirect', src, run_as_root=True) + return True + return False + + def _create_gpfs_clone(self, src, dest): + snap = dest + ".snap" + self._create_gpfs_snap(src, snap) + self._create_gpfs_copy(snap, dest) + if(self._gpfs_redirect(src) and self._gpfs_redirect(dest)): + self._execute('rm', '-f', snap, run_as_root=True) + + def _create_gpfs_copy(self, src, dest, modebits='666'): + self._execute('mmclone', 'copy', src, dest, run_as_root=True) + self._execute('chmod', modebits, dest, run_as_root=True) + + def _create_gpfs_snap(self, src, dest=None, modebits='644'): + if dest is None: + self._execute('mmclone', 'snap', src, run_as_root=True) + self._execute('chmod', modebits, src, run_as_root=True) + else: + self._execute('mmclone', 'snap', src, dest, run_as_root=True) + self._execute('chmod', modebits, dest, run_as_root=True) + + def _is_gpfs_parent_file(self, gpfs_file): + out, _ = self._execute('mmclone', 'show', gpfs_file, run_as_root=True) + ptoken = out.splitlines().pop().split()[0] + return ptoken == 'yes' + + def create_snapshot(self, snapshot): + """Creates a GPFS snapshot.""" + snapshot_path = self.local_path(snapshot) + volume_path = os.path.join(self.configuration.gpfs_mount_point_base, + snapshot['volume_name']) + self._create_gpfs_snap(src=volume_path, dest=snapshot_path) + self._gpfs_redirect(volume_path) + + def delete_snapshot(self, snapshot): + """Deletes a GPFS snapshot.""" + # Rename the deleted snapshot to indicate it no longer exists in + # cinder db. Attempt to delete the snapshot. If the snapshot has + # clone children, the delete will fail silently. When volumes that + # are clone children are deleted in the future, the remaining ts + # snapshots will also be deleted. + snapshot_path = self.local_path(snapshot) + snapshot_ts_path = '%s.ts' % snapshot_path + self._execute('mv', snapshot_path, snapshot_ts_path, run_as_root=True) + self._execute('rm', '-f', snapshot_ts_path, + check_exit_code=False, run_as_root=True) + + def local_path(self, volume): + return os.path.join(self.configuration.gpfs_mount_point_base, + volume['name']) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume.""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + pass + + def initialize_connection(self, volume, connector): + return { + 'driver_volume_type': 'local', + 'data': { + 'name': volume['name'], + 'device_path': self.local_path(volume), + } + } + + def terminate_connection(self, volume, connector, **kwargs): + pass + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, or stats have never been updated, run update + the stats first. + """ + if not self._stats or refresh: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + + LOG.debug("Updating volume stats") + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'GPFS' + data["vendor_name"] = 'IBM' + data["driver_version"] = self.VERSION + data["storage_protocol"] = 'file' + free, capacity = self._get_available_capacity(self.configuration. + gpfs_mount_point_base) + data['total_capacity_gb'] = math.ceil(capacity / units.GiB) + data['free_capacity_gb'] = math.ceil(free / units.GiB) + data['reserved_percentage'] = 0 + data['QoS_support'] = False + self._stats = data + + def _sizestr(self, size_in_g): + if int(size_in_g) == 0: + return '100M' + return '%sG' % size_in_g + + def clone_image(self, volume, image_location, image_id, image_meta): + return self._clone_image(volume, image_location, image_id) + + def _is_cloneable(self, image_id): + if not((self.configuration.gpfs_images_dir and + self.configuration.gpfs_images_share_mode)): + reason = 'glance repository not configured to use GPFS' + return False, reason, None + + image_path = os.path.join(self.configuration.gpfs_images_dir, image_id) + try: + self._is_gpfs_path(image_path) + except processutils.ProcessExecutionError: + reason = 'image file not in GPFS' + return False, reason, None + + return True, None, image_path + + def _clone_image(self, volume, image_location, image_id): + """Attempt to create a volume by efficiently copying image to volume. + + If both source and target are backed by gpfs storage and the source + image is in raw format move the image to create a volume using either + gpfs clone operation or with a file copy. If the image format is not + raw, convert it to raw at the volume path. + """ + # Check if GPFS is mounted + self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) + + cloneable_image, reason, image_path = self._is_cloneable(image_id) + if not cloneable_image: + LOG.debug('Image %(img)s not cloneable: %(reas)s' % + {'img': image_id, 'reas': reason}) + return (None, False) + + vol_path = self.local_path(volume) + # if the image is not already a GPFS snap file make it so + if not self._is_gpfs_parent_file(image_path): + self._create_gpfs_snap(image_path, modebits='666') + + data = image_utils.qemu_img_info(image_path) + + # if image format is already raw either clone it or + # copy it depending on config file settings + if data.file_format == 'raw': + if (self.configuration.gpfs_images_share_mode == + 'copy_on_write'): + LOG.debug('Clone image to vol %s using mmclone' % + volume['id']) + self._create_gpfs_copy(image_path, vol_path) + elif self.configuration.gpfs_images_share_mode == 'copy': + LOG.debug('Clone image to vol %s using copyfile' % + volume['id']) + shutil.copyfile(image_path, vol_path) + self._execute('chmod', '666', vol_path, run_as_root=True) + + # if image is not raw convert it to raw into vol_path destination + else: + LOG.debug('Clone image to vol %s using qemu convert' % + volume['id']) + image_utils.convert_image(image_path, vol_path, 'raw') + self._execute('chmod', '666', vol_path, run_as_root=True) + + self._resize_volume_file(volume, volume['size']) + + return {'provider_location': None}, True + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume. + + Note that cinder.volume.flows.create_volume will attempt to use + clone_image to efficiently create volume from image when both + source and target are backed by gpfs storage. If that is not the + case, this function is invoked and uses fetch_to_raw to create the + volume. + """ + # Check if GPFS is mounted + self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) + + LOG.debug('Copy image to vol %s using image_utils fetch_to_raw' % + volume['id']) + image_utils.fetch_to_raw(context, image_service, image_id, + self.local_path(volume), + self.configuration.volume_dd_blocksize, + size=volume['size']) + self._resize_volume_file(volume, volume['size']) + + def _resize_volume_file(self, volume, new_size): + """Resize volume file to new size.""" + vol_path = self.local_path(volume) + try: + image_utils.resize_image(vol_path, new_size) + except processutils.ProcessExecutionError as exc: + LOG.error(_("Failed to resize volume " + "%(volume_id)s, error: %(error)s") % + {'volume_id': volume['id'], + 'error': exc.stderr}) + raise exception.VolumeBackendAPIException(data=exc.stderr) + + data = image_utils.qemu_img_info(vol_path) + return data.virtual_size + + def extend_volume(self, volume, new_size): + """Extend an existing volume.""" + self._resize_volume_file(volume, new_size) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + image_utils.upload_volume(context, + image_service, + image_meta, + self.local_path(volume)) + + def backup_volume(self, context, backup, backup_service): + """Create a new backup from an existing volume.""" + raise NotImplementedError() + + def restore_backup(self, context, backup, volume, backup_service): + """Restore an existing backup to a new or existing volume.""" + raise NotImplementedError() + + def _mkfs(self, volume, fs, label=None): + if fs == 'swap': + cmd = ['mkswap'] + else: + cmd = ['mkfs', '-t', fs] + + if fs in ('ext3', 'ext4'): + cmd.append('-F') + if label: + if fs in ('msdos', 'vfat'): + label_opt = '-n' + else: + label_opt = '-L' + cmd.extend([label_opt, label]) + + path = self.local_path(volume) + cmd.append(path) + try: + self._execute(*cmd, run_as_root=True) + except processutils.ProcessExecutionError as exc: + exception_message = (_("mkfs failed on volume %(vol)s, " + "error message was: %(err)s") + % {'vol': volume['name'], 'err': exc.stderr}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + def _get_available_capacity(self, path): + """Calculate available space on path.""" + # Check if GPFS is mounted + try: + self._verify_gpfs_path_state(path) + mounted = True + except exception.VolumeBackendAPIException: + mounted = False + + # If GPFS is not mounted, return zero capacity. So that the volume + # request can be scheduled to another volume service. + if not mounted: + return 0, 0 + + out, _ = self._execute('df', '-P', '-B', '1', path, + run_as_root=True) + out = out.splitlines()[1] + size = int(out.split()[1]) + available = int(out.split()[3]) + return available, size + + def _verify_gpfs_path_state(self, path): + """Examine if GPFS is active and file system is mounted or not.""" + try: + self._is_gpfs_path(path) + except processutils.ProcessExecutionError: + msg = (_('%s cannot be accessed. Verify that GPFS is active and ' + 'file system is mounted.') % path) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) diff --git a/cinder/volume/drivers/hds/__init__.py b/cinder/volume/drivers/hds/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/drivers/hds/hds.py b/cinder/volume/drivers/hds/hds.py new file mode 100644 index 0000000000..a188bffb80 --- /dev/null +++ b/cinder/volume/drivers/hds/hds.py @@ -0,0 +1,530 @@ +# Copyright (c) 2013 Hitachi Data Systems, Inc. +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +iSCSI Cinder Volume driver for Hitachi Unified Storage (HUS) platform. +""" + +from oslo.config import cfg +from xml.etree import ElementTree as ETree + +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume import driver +from cinder.volume.drivers.hds.hus_backend import HusBackend + +HDS_VERSION = '1.0.2' + +LOG = logging.getLogger(__name__) + +HUS_OPTS = [ + cfg.StrOpt('hds_cinder_config_file', + default='/opt/hds/hus/cinder_hus_conf.xml', + help='configuration file for HDS cinder plugin for HUS'), ] + +CONF = cfg.CONF +CONF.register_opts(HUS_OPTS) + +HI_IQN = 'iqn.1994-04.jp.co.hitachi:' # fixed string, for now. + +HUS_DEFAULT_CONFIG = {'hus_cmd': 'hus-cmd', + 'lun_start': '0', + 'lun_end': '8192'} + + +def factory_bend(): + """Factory over-ride in self-tests.""" + return HusBackend() + + +def _loc_info(loc): + """Parse info from location string.""" + info = {} + tup = loc.split(',') + if len(tup) < 5: + info['id_lu'] = tup[0].split('.') + return info + info['id_lu'] = tup[2].split('.') + info['tgt'] = tup + return info + + +def _do_lu_range_check(start, end, maxlun): + """Validate array allocation range.""" + LOG.debug(_("Range: start LU: %(start)s, end LU: %(end)s") + % {'start': start, + 'end': end}) + if int(start) < 0: + msg = 'start LU limit too low: ' + start + raise exception.InvalidInput(reason=msg) + if int(start) >= int(maxlun): + msg = 'start LU limit high: ' + start + ' max: ' + maxlun + raise exception.InvalidInput(reason=msg) + if int(end) <= int(start): + msg = 'LU end limit too low: ' + end + raise exception.InvalidInput(reason=msg) + if int(end) > int(maxlun): + end = maxlun + LOG.debug(_("setting LU upper (end) limit to %s") % maxlun) + return (start, end) + + +def _xml_read(root, element, check=None): + """Read an xml element.""" + try: + val = root.findtext(element) + LOG.info(_("%(element)s: %(val)s") + % {'element': element, + 'val': val}) + if val: + return val.strip() + if check: + raise exception.ParameterNotFound(param=element) + return None + except ETree.ParseError as e: + if check: + with excutils.save_and_reraise_exception(): + LOG.error(_("XML exception reading parameter: %s") % element) + else: + LOG.info(_("XML exception reading parameter: %s") % element) + return None + + +def _read_config(xml_config_file): + """Read hds driver specific xml config file.""" + try: + root = ETree.parse(xml_config_file).getroot() + except Exception: + raise exception.NotFound(message='config file not found: ' + + xml_config_file) + config = {} + arg_prereqs = ['mgmt_ip0', 'mgmt_ip1', 'username', 'password'] + for req in arg_prereqs: + config[req] = _xml_read(root, req, 'check') + + config['hdp'] = {} + config['services'] = {} + for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']: # min one needed + if _xml_read(root, svc) is None: + continue + service = {} + service['label'] = svc + for arg in ['volume_type', 'hdp', 'iscsi_ip']: # none optional + service[arg] = _xml_read(root, svc + '/' + arg, 'check') + config['services'][service['volume_type']] = service + config['hdp'][service['hdp']] = service['hdp'] + + if config['services'].keys() is None: # at least one service required! + raise exception.ParameterNotFound(param="No service found") + + config['snapshot_hdp'] = _xml_read(root, 'snapshot/hdp', 'check') + + for arg in ['hus_cmd', 'lun_start', 'lun_end']: # optional + config[arg] = _xml_read(root, arg) or HUS_DEFAULT_CONFIG[arg] + + return config + + +class HUSDriver(driver.ISCSIDriver): + """HDS HUS volume driver.""" + + VERSION = HDS_VERSION + + def _array_info_get(self): + """Get array parameters.""" + out = self.bend.get_version(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password']) + inf = out.split() + return(inf[1], 'hus_' + inf[1], inf[6]) + + def _get_iscsi_info(self): + """Validate array iscsi parameters.""" + out = self.bend.get_iscsi_info(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password']) + lines = out.split('\n') + conf = {} # dict based on iSCSI portal ip addresses + for line in lines: + if 'CTL' in line: + inf = line.split() + (ctl, port, ip, ipp) = (inf[1], inf[3], inf[5], inf[7]) + conf[ip] = {} + conf[ip]['ctl'] = ctl + conf[ip]['port'] = port + conf[ip]['iscsi_port'] = ipp # HUS default: 3260 + msg = _('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s') + LOG.debug(msg + % {'ip': ip, + 'ipp': ipp, + 'ctl': ctl, + 'port': port}) + return conf + + def _get_service(self, volume): + """Get the available service parameters for a given volume type.""" + label = None + if volume['volume_type']: + label = volume['volume_type']['name'] + label = label or 'default' + if label in self.config['services'].keys(): + svc = self.config['services'][label] + service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'], + svc['port'], svc['hdp']) # ip, ipp, ctl, port, hdp + else: + LOG.error(_("No configuration found for service: %s") % label) + raise exception.ParameterNotFound(param=label) + return service + + def _get_stats(self): + """Get HDP stats from HUS.""" + total_cap = 0 + total_used = 0 + out = self.bend.get_hdp_info(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password']) + for line in out.split('\n'): + if 'HDP' in line: + (hdp, size, _ign, used) = line.split()[1:5] # in MB + if hdp in self.config['hdp'].keys(): + total_cap += int(size) + total_used += int(used) + hus_stat = {} + hus_stat['total_capacity_gb'] = int(total_cap / 1024) # in GB + hus_stat['free_capacity_gb'] = int((total_cap - total_used) / 1024) + be_name = self.configuration.safe_get('volume_backend_name') + hus_stat["volume_backend_name"] = be_name or 'HUSDriver' + hus_stat["vendor_name"] = 'HDS' + hus_stat["driver_version"] = HDS_VERSION + hus_stat["storage_protocol"] = 'iSCSI' + hus_stat['QoS_support'] = False + hus_stat['reserved_percentage'] = 0 + return hus_stat + + def _get_hdp_list(self): + """Get HDPs from HUS.""" + out = self.bend.get_hdp_info(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password']) + hdp_list = [] + for line in out.split('\n'): + if 'HDP' in line: + hdp_list.extend(line.split()[1:2]) + return hdp_list + + def _check_hdp_list(self): + """Verify all HDPs specified in the configuration exist.""" + hdpl = self._get_hdp_list() + lst = self.config['hdp'].keys() + lst.extend([self.config['snapshot_hdp'], ]) + for hdp in lst: + if hdp not in hdpl: + LOG.error(_("HDP not found: %s") % hdp) + err = "HDP not found: " + hdp + raise exception.ParameterNotFound(param=err) + + def _id_to_vol(self, idd): + """Given the volume id, retrieve the volume object from database.""" + vol = self.db.volume_get(self.context, idd) + return vol + + def _update_vol_location(self, id, loc): + """Update the provider location.""" + update = {} + update['provider_location'] = loc + self.db.volume_update(self.context, id, update) + + def __init__(self, *args, **kwargs): + """Initialize, read different config parameters.""" + super(HUSDriver, self).__init__(*args, **kwargs) + self.driver_stats = {} + self.context = {} + self.bend = factory_bend() + self.configuration.append_config_values(HUS_OPTS) + self.config = _read_config(self.configuration.hds_cinder_config_file) + (self.arid, self.hus_name, self.lumax) = self._array_info_get() + self._check_hdp_list() + start = self.config['lun_start'] + end = self.config['lun_end'] + maxlun = self.lumax + (self.start, self.end) = _do_lu_range_check(start, end, maxlun) + iscsi_info = self._get_iscsi_info() + for svc in self.config['services'].keys(): + svc_ip = self.config['services'][svc]['iscsi_ip'] + if svc_ip in iscsi_info.keys(): + self.config['services'][svc]['port'] = ( + iscsi_info[svc_ip]['port']) + self.config['services'][svc]['ctl'] = iscsi_info[svc_ip]['ctl'] + self.config['services'][svc]['iscsi_port'] = ( + iscsi_info[svc_ip]['iscsi_port']) + else: # config iscsi address not found on device! + LOG.error(_("iSCSI portal not found for service: %s") % svc_ip) + raise exception.ParameterNotFound(param=svc_ip) + return + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + return + + def do_setup(self, context): + """do_setup. + + Setup and verify HDS HUS storage connection. But moved it to + __init__ as (setup/errors) could became an infinite loop. + """ + self.context = context + + def ensure_export(self, context, volume): + return + + def create_export(self, context, volume): + """Create an export. Moved to initialize_connection.""" + return + + @utils.synchronized('hds_hus', external=True) + def create_volume(self, volume): + """Create a LU on HUS.""" + service = self._get_service(volume) + (_ip, _ipp, _ctl, _port, hdp) = service + out = self.bend.create_lu(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password'], + self.arid, hdp, self.start, self.end, + '%s' % (int(volume['size']) * 1024)) + lun = self.arid + '.' + out.split()[1] + sz = int(out.split()[5]) + LOG.debug(_("LUN %(lun)s of size %(sz)s MB is created.") + % {'lun': lun, + 'sz': sz}) + return {'provider_location': lun} + + @utils.synchronized('hds_hus', external=True) + def create_cloned_volume(self, dst, src): + """Create a clone of a volume.""" + if src['size'] != dst['size']: + msg = 'clone volume size mismatch' + raise exception.VolumeBackendAPIException(data=msg) + service = self._get_service(dst) + (_ip, _ipp, _ctl, _port, hdp) = service + size = int(src['size']) * 1024 + source_vol = self._id_to_vol(src['id']) + (arid, slun) = _loc_info(source_vol['provider_location'])['id_lu'] + out = self.bend.create_dup(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password'], + arid, slun, + hdp, + self.start, self.end, + '%s' % (size)) + lun = self.arid + '.' + out.split()[1] + size = int(out.split()[5]) + LOG.debug(_("LUN %(lun)s of size %(size)s MB is cloned.") + % {'lun': lun, + 'size': size}) + return {'provider_location': lun} + + @utils.synchronized('hds_hus', external=True) + def extend_volume(self, volume, new_size): + """Extend an existing volume.""" + (arid, lun) = _loc_info(volume['provider_location'])['id_lu'] + out = self.bend.extend_vol(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password'], + arid, lun, + '%s' % (new_size * 1024)) + LOG.debug(_("LUN %(lun)s extended to %(size)s GB.") + % {'lun': lun, + 'size': new_size}) + + @utils.synchronized('hds_hus', external=True) + def delete_volume(self, volume): + """Delete an LU on HUS.""" + prov_loc = volume['provider_location'] + if prov_loc is None: + return + info = _loc_info(prov_loc) + (arid, lun) = info['id_lu'] + if 'tgt' in info.keys(): # connected? + (_portal, iqn, loc, ctl, port) = info['tgt'] + _out = self.bend.del_iscsi_conn(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password'], + arid, lun, ctl, port, iqn, + '') + name = self.hus_name + LOG.debug(_("delete lun %(lun)s on %(name)s") + % {'lun': lun, + 'name': name}) + _out = self.bend.delete_lu(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password'], + arid, lun) + + def remove_export(self, context, volume): + """Disconnect a volume from an attached instance.""" + return + + @utils.synchronized('hds_hus', external=True) + def initialize_connection(self, volume, connector): + """Map the created volume to connector['initiator'].""" + service = self._get_service(volume) + (ip, ipp, ctl, port, _hdp) = service + info = _loc_info(volume['provider_location']) + if 'tgt' in info.keys(): # spurious repeat connection + return + (arid, lun) = info['id_lu'] + loc = arid + '.' + lun + iqn = HI_IQN + connector['host'] + out = self.bend.add_iscsi_conn(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password'], + arid, lun, ctl, port, iqn, + connector['initiator']) + hus_portal = ip + ':' + ipp + tgt = hus_portal + ',' + iqn + ',' + loc + ',' + ctl + ',' + port + properties = {} + hlun = out.split()[1] + properties['provider_location'] = tgt + self._update_vol_location(volume['id'], tgt) + properties['target_discovered'] = False + properties['target_portal'] = hus_portal + properties['target_iqn'] = iqn + properties['target_lun'] = hlun + properties['volume_id'] = volume['id'] + return {'driver_volume_type': 'iscsi', 'data': properties} + + @utils.synchronized('hds_hus', external=True) + def terminate_connection(self, volume, connector, **kwargs): + """Terminate a connection to a volume.""" + info = _loc_info(volume['provider_location']) + if 'tgt' not in info.keys(): # spurious disconnection + return + (arid, lun) = info['id_lu'] + (_portal, iqn, loc, ctl, port) = info['tgt'] + + _out = self.bend.del_iscsi_conn(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password'], + arid, lun, ctl, port, iqn, + connector['initiator']) + self._update_vol_location(volume['id'], loc) + return {'provider_location': loc} + + @utils.synchronized('hds_hus', external=True) + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot.""" + size = int(snapshot['volume_size']) * 1024 + (arid, slun) = _loc_info(snapshot['provider_location'])['id_lu'] + service = self._get_service(volume) + (_ip, _ipp, _ctl, _port, hdp) = service + out = self.bend.create_dup(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password'], + arid, slun, hdp, + self.start, self.end, + '%s' % (size)) + lun = self.arid + '.' + out.split()[1] + sz = int(out.split()[5]) + LOG.debug(_("LUN %(lun)s of size %(sz)s MB is created from snapshot.") + % {'lun': lun, + 'sz': sz}) + return {'provider_location': lun} + + @utils.synchronized('hds_hus', external=True) + def create_snapshot(self, snapshot): + """Create a snapshot.""" + source_vol = self._id_to_vol(snapshot['volume_id']) + size = int(snapshot['volume_size']) * 1024 + (arid, slun) = _loc_info(source_vol['provider_location'])['id_lu'] + out = self.bend.create_dup(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password'], + arid, slun, + self.config['snapshot_hdp'], + self.start, self.end, + '%s' % (size)) + lun = self.arid + '.' + out.split()[1] + size = int(out.split()[5]) + LOG.debug(_("LUN %(lun)s of size %(size)s MB is created as snapshot.") + % {'lun': lun, + 'size': size}) + return {'provider_location': lun} + + @utils.synchronized('hds_hus', external=True) + def delete_snapshot(self, snapshot): + """Delete a snapshot.""" + loc = snapshot['provider_location'] + if loc is None: # to take care of spurious input + return # which could cause exception. + (arid, lun) = loc.split('.') + _out = self.bend.delete_lu(self.config['hus_cmd'], + HDS_VERSION, + self.config['mgmt_ip0'], + self.config['mgmt_ip1'], + self.config['username'], + self.config['password'], + arid, lun) + LOG.debug(_("LUN %s is deleted.") % lun) + return + + @utils.synchronized('hds_hus', external=True) + def get_volume_stats(self, refresh=False): + """Get volume stats. If 'refresh', run update the stats first.""" + if refresh: + self.driver_stats = self._get_stats() + return self.driver_stats diff --git a/cinder/volume/drivers/hds/hus_backend.py b/cinder/volume/drivers/hds/hus_backend.py new file mode 100644 index 0000000000..4920b8fd5e --- /dev/null +++ b/cinder/volume/drivers/hds/hus_backend.py @@ -0,0 +1,171 @@ +# Copyright (c) 2013 Hitachi Data Systems, Inc. +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +Hitachi Unified Storage (HUS) platform. Backend operations. +""" + +from cinder.openstack.common import log as logging +from cinder import utils + +LOG = logging.getLogger("cinder.volume.driver") + + +class HusBackend: + """Back end. Talks to HUS.""" + def get_version(self, cmd, ver, ip0, ip1, user, pw): + out, err = utils.execute(cmd, + '--driver-version', ver, + '--ip0', ip0, + '--ip1', ip1, + '--user', user, + '--password', pw, + '--version', '1', + run_as_root=True, + check_exit_code=True) + LOG.debug('get_version: ' + out + ' -- ' + err) + return out + + def get_iscsi_info(self, cmd, ver, ip0, ip1, user, pw): + out, err = utils.execute(cmd, + '--driver-version', ver, + '--ip0', ip0, + '--ip1', ip1, + '--user', user, + '--password', pw, + '--iscsi', '1', + check_exit_code=True) + LOG.debug('get_iscsi_info: ' + out + ' -- ' + err) + return out + + def get_hdp_info(self, cmd, ver, ip0, ip1, user, pw): + out, err = utils.execute(cmd, + '--driver-version', ver, + '--ip0', ip0, + '--ip1', ip1, + '--user', user, + '--password', pw, + '--hdp', '1', + check_exit_code=True) + LOG.debug('get_hdp_info: ' + out + ' -- ' + err) + return out + + def create_lu(self, cmd, ver, ip0, ip1, user, pw, id, hdp, start, + end, size): + out, err = utils.execute(cmd, + '--driver-version', ver, + '--ip0', ip0, + '--ip1', ip1, + '--user', user, + '--password', pw, + '--create-lun', '1', + '--array-id', id, + '--hdp', hdp, + '--start', start, + '--end', end, + '--size', size, + check_exit_code=True) + LOG.debug('create_lu: ' + out + ' -- ' + err) + return out + + def delete_lu(self, cmd, ver, ip0, ip1, user, pw, id, lun): + out, err = utils.execute(cmd, + '--driver-version', ver, + '--ip0', ip0, + '--ip1', ip1, + '--user', user, + '--password', pw, + '--delete-lun', '1', + '--array-id', id, + '--lun', lun, + '--force', 1, + check_exit_code=True) + LOG.debug('delete_lu: ' + out + ' -- ' + err) + return out + + def create_dup(self, cmd, ver, ip0, ip1, user, pw, id, src_lun, + hdp, start, end, size): + out, err = utils.execute(cmd, + '--driver-version', ver, + '--ip0', ip0, + '--ip1', ip1, + '--user', user, + '--password', pw, + '--create-dup', '1', + '--array-id', id, + '--pvol', src_lun, + '--hdp', hdp, + '--start', start, + '--end', end, + '--size', size, + check_exit_code=True) + LOG.debug('create_dup: ' + out + ' -- ' + err) + return out + + def extend_vol(self, cmd, ver, ip0, ip1, user, pw, id, lun, new_size): + out, err = utils.execute(cmd, + '--driver-version', ver, + '--ip0', ip0, + '--ip1', ip1, + '--user', user, + '--password', pw, + '--extend-lun', '1', + '--array-id', id, + '--lun', lun, + '--size', new_size, + check_exit_code=True) + LOG.debug('extend_vol: ' + out + ' -- ' + err) + return out + + def add_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port, + iqn, initiator): + out, err = utils.execute(cmd, + '--driver-version', ver, + '--ip0', ip0, + '--ip1', ip1, + '--user', user, + '--password', pw, + '--add-iscsi-connection', '1', + '--array-id', id, + '--lun', lun, + '--ctl', ctl, + '--port', port, + '--target', iqn, + '--initiator', initiator, + check_exit_code=True) + LOG.debug('add_iscsi_conn: ' + out + ' -- ' + err) + return out + + def del_iscsi_conn(self, cmd, ver, ip0, ip1, user, pw, id, lun, ctl, port, + iqn, initiator): + out, err = utils.execute(cmd, + '--driver-version', ver, + '--ip0', ip0, + '--ip1', ip1, + '--user', user, + '--password', pw, + '--delete-iscsi-connection', '1', + '--array-id', id, + '--lun', lun, + '--ctl', ctl, + '--port', port, + '--target', iqn, + '--initiator', initiator, + '--force', 1, + check_exit_code=True) + LOG.debug('del_iscsi_conn: ' + out + ' -- ' + err) + return out diff --git a/cinder/volume/drivers/huawei/__init__.py b/cinder/volume/drivers/huawei/__init__.py new file mode 100644 index 0000000000..a71bfbbd80 --- /dev/null +++ b/cinder/volume/drivers/huawei/__init__.py @@ -0,0 +1,102 @@ +# Copyright (c) 2013 Huawei Technologies Co., Ltd. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Provide a unified driver class for users. + +The product type and the protocol should be specified in config file before. +""" + +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.volume.drivers.huawei import huawei_dorado +from cinder.volume.drivers.huawei import huawei_hvs +from cinder.volume.drivers.huawei import huawei_t +from cinder.volume.drivers.huawei import huawei_utils + +LOG = logging.getLogger(__name__) + +huawei_opt = [ + cfg.StrOpt('cinder_huawei_conf_file', + default='/etc/cinder/cinder_huawei_conf.xml', + help='config data for cinder huawei plugin')] + +CONF = cfg.CONF +CONF.register_opts(huawei_opt) + + +class HuaweiVolumeDriver(object): + """Define an unified driver for Huawei drivers.""" + + def __init__(self, *args, **kwargs): + super(HuaweiVolumeDriver, self).__init__() + self._product = {'T': huawei_t, 'Dorado': huawei_dorado, + 'HVS': huawei_hvs} + self._protocol = {'iSCSI': 'ISCSIDriver', 'FC': 'FCDriver'} + + self.driver = self._instantiate_driver(*args, **kwargs) + + def _instantiate_driver(self, *args, **kwargs): + """Instantiate the specified driver.""" + self.configuration = kwargs.get('configuration', None) + if not self.configuration: + msg = (_('_instantiate_driver: configuration not found.')) + raise exception.InvalidInput(reason=msg) + + self.configuration.append_config_values(huawei_opt) + conf_file = self.configuration.cinder_huawei_conf_file + (product, protocol) = self._get_conf_info(conf_file) + + LOG.debug(_('_instantiate_driver: Loading %(protocol)s driver for ' + 'Huawei OceanStor %(product)s series storage arrays.') + % {'protocol': protocol, + 'product': product}) + + driver_module = self._product[product] + driver_class = 'Huawei' + product + self._protocol[protocol] + + driver_class = getattr(driver_module, driver_class) + return driver_class(*args, **kwargs) + + def _get_conf_info(self, filename): + """Get product type and connection protocol from config file.""" + root = huawei_utils.parse_xml_file(filename) + product = root.findtext('Storage/Product').strip() + protocol = root.findtext('Storage/Protocol').strip() + if (product in self._product.keys() and + protocol in self._protocol.keys()): + return (product, protocol) + else: + msg = (_('"Product" or "Protocol" is illegal. "Product" should ' + 'be set to either T, Dorado or HVS. "Protocol" should ' + 'be set to either iSCSI or FC. Product: %(product)s ' + 'Protocol: %(protocol)s') + % {'product': str(product), + 'protocol': str(protocol)}) + raise exception.InvalidInput(reason=msg) + + def __setattr__(self, name, value): + """Set the attribute.""" + if getattr(self, 'driver', None): + self.driver.__setattr__(name, value) + return + object.__setattr__(self, name, value) + + def __getattr__(self, name): + """"Get the attribute.""" + drver = object.__getattribute__(self, 'driver') + return getattr(drver, name) diff --git a/cinder/volume/drivers/huawei/huawei_dorado.py b/cinder/volume/drivers/huawei/huawei_dorado.py new file mode 100644 index 0000000000..3534ff084a --- /dev/null +++ b/cinder/volume/drivers/huawei/huawei_dorado.py @@ -0,0 +1,106 @@ +# Copyright (c) 2013 Huawei Technologies Co., Ltd. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume Drivers for Huawei OceanStor Dorado series storage arrays. +""" + +import re + +from cinder.openstack.common import log as logging +from cinder.volume.drivers.huawei import huawei_t +from cinder.volume.drivers.huawei import ssh_common + +LOG = logging.getLogger(__name__) + + +class HuaweiDoradoISCSIDriver(huawei_t.HuaweiTISCSIDriver): + """ISCSI driver class for Huawei OceanStor Dorado storage arrays.""" + + def __init__(self, *args, **kwargs): + super(HuaweiDoradoISCSIDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + """Instantiate common class.""" + self.common = ssh_common.DoradoCommon(configuration=self.configuration) + + self.common.do_setup(context) + self._assert_cli_out = self.common._assert_cli_out + self._assert_cli_operate_out = self.common._assert_cli_operate_out + + +class HuaweiDoradoFCDriver(huawei_t.HuaweiTFCDriver): + """FC driver class for Huawei OceanStor Dorado storage arrays.""" + + def __init__(self, *args, **kwargs): + super(HuaweiDoradoFCDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + """Instantiate common class.""" + self.common = ssh_common.DoradoCommon(configuration=self.configuration) + + self.common.do_setup(context) + self._assert_cli_out = self.common._assert_cli_out + self._assert_cli_operate_out = self.common._assert_cli_operate_out + + def _get_host_port_details(self, hostid): + cli_cmd = 'showfcmode' + out = self.common._execute_cli(cli_cmd) + + self._assert_cli_out(re.search('FC Port Topology Mode', out), + '_get_tgt_fc_port_wwns', + 'Failed to get FC port WWNs.', + cli_cmd, out) + + return [line.split()[3] for line in out.split('\r\n')[6:-2]] + + def _get_tgt_fc_port_wwns(self, port_details): + return port_details + + def initialize_connection(self, volume, connector): + """Create FC connection between a volume and a host.""" + LOG.debug(_('initialize_connection: volume name: %(vol)s ' + 'host: %(host)s initiator: %(wwn)s') + % {'vol': volume['name'], + 'host': connector['host'], + 'wwn': connector['wwpns']}) + + self.common._update_login_info() + # First, add a host if it is not added before. + host_id = self.common.add_host(connector['host'], connector['ip']) + # Then, add free FC ports to the host. + ini_wwns = connector['wwpns'] + free_wwns = self._get_connected_free_wwns() + for wwn in free_wwns: + if wwn in ini_wwns: + self._add_fc_port_to_host(host_id, wwn) + fc_port_details = self._get_host_port_details(host_id) + tgt_wwns = self._get_tgt_fc_port_wwns(fc_port_details) + + LOG.debug(_('initialize_connection: Target FC ports WWNS: %s') + % tgt_wwns) + + # Finally, map the volume to the host. + volume_id = volume['provider_location'] + hostlun_id = self.common.map_volume(host_id, volume_id) + + properties = {} + properties['target_discovered'] = False + properties['target_wwn'] = tgt_wwns + properties['target_lun'] = int(hostlun_id) + properties['volume_id'] = volume['id'] + + return {'driver_volume_type': 'fibre_channel', + 'data': properties} diff --git a/cinder/volume/drivers/huawei/huawei_hvs.py b/cinder/volume/drivers/huawei/huawei_hvs.py new file mode 100644 index 0000000000..afaf2ca728 --- /dev/null +++ b/cinder/volume/drivers/huawei/huawei_hvs.py @@ -0,0 +1,171 @@ +# Copyright (c) 2013 Huawei Technologies Co., Ltd. +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume Drivers for Huawei OceanStor HVS storage arrays. +""" + +from cinder.volume import driver +from cinder.volume.drivers.huawei.rest_common import HVSCommon + + +class HuaweiHVSISCSIDriver(driver.ISCSIDriver): + """ISCSI driver for Huawei OceanStor HVS storage arrays.""" + + VERSION = '1.0.0' + + def __init__(self, *args, **kwargs): + super(HuaweiHVSISCSIDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + """Instantiate common class and log in storage system.""" + self.common = HVSCommon(configuration=self.configuration) + + def check_for_setup_error(self): + """Check configuration file.""" + self.common._check_conf_file() + self.common.login() + + def create_volume(self, volume): + """Create a volume.""" + self.common.create_volume(volume) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot.""" + self.common.create_volume_from_snapshot(volume, snapshot) + + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the specified volume.""" + self.common.create_cloned_volume(volume, src_vref) + + def extend_volume(self, volume, new_size): + """Extend a volume.""" + self.common.extend_volume(volume, new_size) + + def delete_volume(self, volume): + """Delete a volume.""" + self.common.delete_volume(volume) + + def create_snapshot(self, snapshot): + """Create a snapshot.""" + self.common.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + """Delete a snapshot.""" + self.common.delete_snapshot(snapshot) + + def get_volume_stats(self, refresh=False): + """Get volume stats.""" + data = self.common.update_volume_stats(refresh) + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.__class__.__name__ + data['storage_protocol'] = 'iSCSI' + data['driver_version'] = self.VERSION + return data + + def initialize_connection(self, volume, connector): + """Map a volume to a host.""" + return self.common.initialize_connection_iscsi(volume, connector) + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate the map.""" + self.common.terminate_connection(volume, connector, **kwargs) + + def create_export(self, context, volume): + """Export the volume.""" + pass + + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass + + +class HuaweiHVSFCDriver(driver.FibreChannelDriver): + """FC driver for Huawei OceanStor HVS storage arrays.""" + + VERSION = '1.0.0' + + def __init__(self, *args, **kwargs): + super(HuaweiHVSFCDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + """Instantiate common class and log in storage system.""" + self.common = HVSCommon(configuration=self.configuration) + self.common.login() + + def check_for_setup_error(self): + """Check configuration file.""" + self.common._check_conf_file() + + def create_volume(self, volume): + """Create a volume.""" + self.common.create_volume(volume) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot.""" + self.common.create_volume_from_snapshot(volume, snapshot) + + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the specified volume.""" + self.common.create_cloned_volume(volume, src_vref) + + def extend_volume(self, volume, new_size): + """Extend a volume.""" + self.common.extend_volume(volume, new_size) + + def delete_volume(self, volume): + """Delete a volume.""" + self.common.delete_volume(volume) + + def create_snapshot(self, snapshot): + """Create a snapshot.""" + self.common.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + """Delete a snapshot.""" + self.common.delete_snapshot(snapshot) + + def get_volume_stats(self, refresh=False): + """Get volume stats.""" + data = self.common.update_volume_stats(refresh) + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.__class__.__name__ + data['storage_protocol'] = 'FC' + data['driver_version'] = self.VERSION + return data + + def initialize_connection(self, volume, connector): + """Map a volume to a host.""" + return self.common.initialize_connection_fc(volume, connector) + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate the map.""" + self.common.terminate_connection(volume, connector, **kwargs) + + def create_export(self, context, volume): + """Export the volume.""" + pass + + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass diff --git a/cinder/volume/drivers/huawei/huawei_t.py b/cinder/volume/drivers/huawei/huawei_t.py new file mode 100644 index 0000000000..523c16f6dd --- /dev/null +++ b/cinder/volume/drivers/huawei/huawei_t.py @@ -0,0 +1,592 @@ +# Copyright (c) 2013 Huawei Technologies Co., Ltd. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume Drivers for Huawei OceanStor T series storage arrays. +""" + +import re +import time + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.volume import driver +from cinder.volume.drivers.huawei import huawei_utils +from cinder.volume.drivers.huawei import ssh_common + + +LOG = logging.getLogger(__name__) + +HOST_PORT_PREFIX = 'HostPort_' + + +class HuaweiTISCSIDriver(driver.ISCSIDriver): + """ISCSI driver for Huawei OceanStor T series storage arrays.""" + + VERSION = '1.1.0' + + def __init__(self, *args, **kwargs): + super(HuaweiTISCSIDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + """Instantiate common class.""" + self.common = ssh_common.TseriesCommon(configuration= + self.configuration) + self.common.do_setup(context) + self._assert_cli_out = self.common._assert_cli_out + self._assert_cli_operate_out = self.common._assert_cli_operate_out + + def check_for_setup_error(self): + """Check something while starting.""" + self.common.check_for_setup_error() + + def create_volume(self, volume): + """Create a new volume.""" + volume_id = self.common.create_volume(volume) + return {'provider_location': volume_id} + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot.""" + volume_id = self.common.create_volume_from_snapshot(volume, snapshot) + return {'provider_location': volume_id} + + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the specified volume.""" + volume_id = self.common.create_cloned_volume(volume, src_vref) + return {'provider_location': volume_id} + + def extend_volume(self, volume, new_size): + """Extend a volume.""" + self.common.extend_volume(volume, new_size) + + def delete_volume(self, volume): + """Delete a volume.""" + self.common.delete_volume(volume) + + def create_export(self, context, volume): + """Export the volume.""" + pass + + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass + + def create_snapshot(self, snapshot): + """Create a snapshot.""" + snapshot_id = self.common.create_snapshot(snapshot) + return {'provider_location': snapshot_id} + + def delete_snapshot(self, snapshot): + """Delete a snapshot.""" + self.common.delete_snapshot(snapshot) + + def initialize_connection(self, volume, connector): + """Map a volume to a host and return target iSCSI information.""" + LOG.debug(_('initialize_connection: volume name: %(vol)s, ' + 'host: %(host)s, initiator: %(ini)s') + % {'vol': volume['name'], + 'host': connector['host'], + 'ini': connector['initiator']}) + + self.common._update_login_info() + (iscsi_iqn, target_ip, port_ctr) =\ + self._get_iscsi_params(connector['initiator']) + + # First, add a host if not added before. + host_id = self.common.add_host(connector['host'], connector['ip'], + connector['initiator']) + + # Then, add the iSCSI port to the host. + self._add_iscsi_port_to_host(host_id, connector) + + # Finally, map the volume to the host. + volume_id = volume['provider_location'] + hostlun_id = self.common.map_volume(host_id, volume_id) + + # Change LUN ctr for better performance, just for single path. + lun_details = self.common.get_lun_details(volume_id) + if (lun_details['LunType'] == 'THICK' and + lun_details['OwningController'] != port_ctr): + self.common.change_lun_ctr(volume_id, port_ctr) + + properties = {} + properties['target_discovered'] = False + properties['target_portal'] = ('%s:%s' % (target_ip, '3260')) + properties['target_iqn'] = iscsi_iqn + properties['target_lun'] = int(hostlun_id) + properties['volume_id'] = volume['id'] + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return {'driver_volume_type': 'iscsi', 'data': properties} + + def _get_iscsi_params(self, initiator): + """Get target iSCSI params, including iqn and IP.""" + conf_file = self.common.configuration.cinder_huawei_conf_file + iscsi_conf = self._get_iscsi_conf(conf_file) + target_ip = None + for ini in iscsi_conf['Initiator']: + if ini['Name'] == initiator: + target_ip = ini['TargetIP'] + break + # If didn't specify target IP for some initiator, use default IP. + if not target_ip: + if iscsi_conf['DefaultTargetIP']: + target_ip = iscsi_conf['DefaultTargetIP'] + + else: + msg = (_('_get_iscsi_params: Failed to get target IP ' + 'for initiator %(ini)s, please check config file.') + % {'ini': initiator}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + (target_iqn, port_ctr) = self._get_tgt_iqn(target_ip) + return (target_iqn, target_ip, port_ctr) + + def _get_iscsi_conf(self, filename): + """Get iSCSI info from config file. + + This function returns a dict: + {'DefaultTargetIP': '11.11.11.11', + 'Initiator': [{'Name': 'iqn.xxxxxx.1', 'TargetIP': '11.11.11.12'}, + {'Name': 'iqn.xxxxxx.2', 'TargetIP': '11.11.11.13'} + ] + } + + """ + + iscsiinfo = {} + root = huawei_utils.parse_xml_file(filename) + + default_ip = root.findtext('iSCSI/DefaultTargetIP') + if default_ip: + iscsiinfo['DefaultTargetIP'] = default_ip.strip() + else: + iscsiinfo['DefaultTargetIP'] = None + initiator_list = [] + tmp_dic = {} + for dic in root.findall('iSCSI/Initiator'): + # Strip the values of dict. + for k, v in dic.items(): + tmp_dic[k] = v.strip() + initiator_list.append(tmp_dic) + iscsiinfo['Initiator'] = initiator_list + return iscsiinfo + + def _get_tgt_iqn(self, port_ip): + """Run CLI command to get target iSCSI iqn. + + The iqn is formed with three parts: + iSCSI target name + iSCSI port info + iSCSI IP + + """ + + LOG.debug(_('_get_tgt_iqn: iSCSI IP is %s.') % port_ip) + + cli_cmd = 'showiscsitgtname' + out = self.common._execute_cli(cli_cmd) + + self._assert_cli_out(re.search('ISCSI Name', out), + '_get_tgt_iqn', + 'Failed to get iSCSI target %s iqn.' % port_ip, + cli_cmd, out) + + lines = out.split('\r\n') + index = lines[4].index('iqn') + iqn_prefix = lines[4][index:].strip() + # Here we make sure port_info won't be None. + port_info = self._get_iscsi_tgt_port_info(port_ip) + ctr = ('0' if port_info[0] == 'A' else '1') + interface = '0' + port_info[1] + port = '0' + port_info[2][1:] + iqn_suffix = ctr + '02' + interface + port + # iqn_suffix should not start with 0 + while(True): + if iqn_suffix.startswith('0'): + iqn_suffix = iqn_suffix[1:] + else: + break + + iqn = iqn_prefix + ':' + iqn_suffix + ':' + port_info[3] + + LOG.debug(_('_get_tgt_iqn: iSCSI target iqn is %s.') % iqn) + + return (iqn, port_info[0]) + + def _get_iscsi_tgt_port_info(self, port_ip): + """Get iSCSI Port information of storage device.""" + cli_cmd = 'showiscsiip' + out = self.common._execute_cli(cli_cmd) + if re.search('iSCSI IP Information', out): + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if tmp_line[3] == port_ip: + return tmp_line + + err_msg = _('_get_iscsi_tgt_port_info: Failed to get iSCSI port ' + 'info. Please make sure the iSCSI port IP %s is ' + 'configured in array.') % port_ip + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _add_iscsi_port_to_host(self, hostid, connector, multipathtype=0): + """Add an iSCSI port to the given host. + + First, add an initiator if needed, the initiator is equivalent to + an iSCSI port. Then, add the initiator to host if not added before. + + """ + + initiator = connector['initiator'] + # Add an iSCSI initiator. + if not self._initiator_added(initiator): + self._add_initiator(initiator) + # Add the initiator to host if not added before. + port_name = HOST_PORT_PREFIX + str(hash(initiator)) + portadded = False + hostport_info = self.common._get_host_port_info(hostid) + if hostport_info: + for hostport in hostport_info: + if hostport[2] == initiator: + portadded = True + break + if not portadded: + cli_cmd = ('addhostport -host %(id)s -type 5 ' + '-info %(info)s -n %(name)s -mtype %(multype)s' + % {'id': hostid, + 'info': initiator, + 'name': port_name, + 'multype': multipathtype}) + out = self.common._execute_cli(cli_cmd) + + msg = ('Failed to add iSCSI port %(port)s to host %(host)s' + % {'port': port_name, + 'host': hostid}) + self._assert_cli_operate_out('_add_iscsi_port_to_host', + msg, cli_cmd, out) + + def _initiator_added(self, ininame): + """Check whether the initiator is already added.""" + cli_cmd = 'showiscsiini -ini %(name)s' % {'name': ininame} + out = self.common._execute_cli(cli_cmd) + return (True if re.search('Initiator Information', out) else False) + + def _add_initiator(self, ininame): + """Add a new initiator to storage device.""" + cli_cmd = 'addiscsiini -n %(name)s' % {'name': ininame} + out = self.common._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_add_iscsi_host_port', + 'Failed to add initiator %s' % ininame, + cli_cmd, out) + + def _delete_initiator(self, ininame, attempts=2): + """Delete an initiator.""" + cli_cmd = 'deliscsiini -n %(name)s' % {'name': ininame} + while(attempts > 0): + out = self.common._execute_cli(cli_cmd) + if re.search('the port is in use', out): + attempts -= 1 + time.sleep(2) + else: + break + + self._assert_cli_operate_out('_map_lun', + 'Failed to delete initiator %s.' + % ininame, + cli_cmd, out) + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate the map.""" + LOG.debug(_('terminate_connection: volume: %(vol)s, host: %(host)s, ' + 'connector: %(initiator)s') + % {'vol': volume['name'], + 'host': connector['host'], + 'initiator': connector['initiator']}) + + self.common._update_login_info() + host_id = self.common.remove_map(volume['provider_location'], + connector['host'], + connector['initiator']) + if not self.common._get_host_map_info(host_id): + self._remove_iscsi_port(host_id, connector) + + def _remove_iscsi_port(self, hostid, connector): + """Remove iSCSI ports and delete host.""" + initiator = connector['initiator'] + # Delete the host initiator if no LUN mapped to it. + port_num = 0 + port_info = self.common._get_host_port_info(hostid) + if port_info: + port_num = len(port_info) + for port in port_info: + if port[2] == initiator: + self.common._delete_hostport(port[0]) + self._delete_initiator(initiator) + port_num -= 1 + break + else: + LOG.warn(_('_remove_iscsi_port: iSCSI port was not found ' + 'on host %(hostid)s.') % {'hostid': hostid}) + + # Delete host if no initiator added to it. + if port_num == 0: + self.common._delete_host(hostid) + + def get_volume_stats(self, refresh=False): + """Get volume stats.""" + self._stats = self.common.get_volume_stats(refresh) + self._stats['storage_protocol'] = 'iSCSI' + self._stats['driver_version'] = self.VERSION + backend_name = self.configuration.safe_get('volume_backend_name') + self._stats['volume_backend_name'] = (backend_name or + self.__class__.__name__) + return self._stats + + +class HuaweiTFCDriver(driver.FibreChannelDriver): + """FC driver for Huawei OceanStor T series storage arrays.""" + + VERSION = '1.0.0' + + def __init__(self, *args, **kwargs): + super(HuaweiTFCDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + """Instantiate common class.""" + self.common = ssh_common.TseriesCommon(configuration= + self.configuration) + self.common.do_setup(context) + self._assert_cli_out = self.common._assert_cli_out + self._assert_cli_operate_out = self.common._assert_cli_operate_out + + def check_for_setup_error(self): + """Check something while starting.""" + self.common.check_for_setup_error() + + def create_volume(self, volume): + """Create a new volume.""" + volume_id = self.common.create_volume(volume) + return {'provider_location': volume_id} + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot.""" + volume_id = self.common.create_volume_from_snapshot(volume, snapshot) + return {'provider_location': volume_id} + + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the specified volume.""" + volume_id = self.common.create_cloned_volume(volume, src_vref) + return {'provider_location': volume_id} + + def extend_volume(self, volume, new_size): + """Extend a volume.""" + self.common.extend_volume(volume, new_size) + + def delete_volume(self, volume): + """Delete a volume.""" + self.common.delete_volume(volume) + + def create_export(self, context, volume): + """Export the volume.""" + pass + + def ensure_export(self, context, volume): + """Synchronously recreate an export for a volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a volume.""" + pass + + def create_snapshot(self, snapshot): + """Create a snapshot.""" + snapshot_id = self.common.create_snapshot(snapshot) + return {'provider_location': snapshot_id} + + def delete_snapshot(self, snapshot): + """Delete a snapshot.""" + self.common.delete_snapshot(snapshot) + + def validate_connector(self, connector): + """Check for wwpns in connector.""" + if 'wwpns' not in connector: + err_msg = (_('validate_connector: The FC driver requires the' + 'wwpns in the connector.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def initialize_connection(self, volume, connector): + """Create FC connection between a volume and a host.""" + LOG.debug(_('initialize_connection: volume name: %(vol)s, ' + 'host: %(host)s, initiator: %(wwn)s') + % {'vol': volume['name'], + 'host': connector['host'], + 'wwn': connector['wwpns']}) + + self.common._update_login_info() + # First, add a host if it is not added before. + host_id = self.common.add_host(connector['host'], connector['ip']) + # Then, add free FC ports to the host. + ini_wwns = connector['wwpns'] + free_wwns = self._get_connected_free_wwns() + for wwn in free_wwns: + if wwn in ini_wwns: + self._add_fc_port_to_host(host_id, wwn) + fc_port_details = self._get_host_port_details(host_id) + tgt_wwns = self._get_tgt_fc_port_wwns(fc_port_details) + + LOG.debug(_('initialize_connection: Target FC ports WWNS: %s') + % tgt_wwns) + + # Finally, map the volume to the host. + volume_id = volume['provider_location'] + hostlun_id = self.common.map_volume(host_id, volume_id) + + # Change LUN ctr for better performance, just for single path. + if len(tgt_wwns) == 1: + lun_details = self.common.get_lun_details(volume_id) + port_ctr = self._get_fc_port_ctr(fc_port_details[0]) + if (lun_details['LunType'] == 'THICK' and + lun_details['OwningController'] != port_ctr): + self.common.change_lun_ctr(volume_id, port_ctr) + + properties = {} + properties['target_discovered'] = False + properties['target_wwn'] = tgt_wwns + properties['target_lun'] = int(hostlun_id) + properties['volume_id'] = volume['id'] + + return {'driver_volume_type': 'fibre_channel', + 'data': properties} + + def _get_connected_free_wwns(self): + """Get free connected FC port WWNs. + + If no new ports connected, return an empty list. + + """ + + cli_cmd = 'showfreeport' + out = self.common._execute_cli(cli_cmd) + wwns = [] + if re.search('Host Free Port Information', out): + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if (tmp_line[1] == 'FC') and (tmp_line[4] == 'Connected'): + wwns.append(tmp_line[0]) + + return wwns + + def _add_fc_port_to_host(self, hostid, wwn, multipathtype=0): + """Add a FC port to host.""" + portname = HOST_PORT_PREFIX + wwn + cli_cmd = ('addhostport -host %(id)s -type 1 ' + '-wwn %(wwn)s -n %(name)s -mtype %(multype)s' + % {'id': hostid, + 'wwn': wwn, + 'name': portname, + 'multype': multipathtype}) + out = self.common._execute_cli(cli_cmd) + + msg = ('Failed to add FC port %(port)s to host %(host)s.' + % {'port': portname, 'host': hostid}) + self._assert_cli_operate_out('_add_fc_port_to_host', msg, cli_cmd, out) + + def _get_host_port_details(self, host_id): + cli_cmd = 'showhostpath -host %s' % host_id + out = self.common._execute_cli(cli_cmd) + + self._assert_cli_out(re.search('Multi Path Information', out), + '_get_host_port_details', + 'Failed to get host port details.', + cli_cmd, out) + + port_details = [] + tmp_details = {} + for line in out.split('\r\n')[4:-2]: + line = line.split('|') + # Cut-point of multipal details, usually is "-------". + if len(line) == 1: + port_details.append(tmp_details) + continue + key = ''.join(line[0].strip().split()) + val = line[1].strip() + tmp_details[key] = val + port_details.append(tmp_details) + return port_details + + def _get_tgt_fc_port_wwns(self, port_details): + wwns = [] + for port in port_details: + wwns.append(port['TargetWWN']) + return wwns + + def _get_fc_port_ctr(self, port_details): + return port_details['ControllerID'] + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate the map.""" + LOG.debug(_('terminate_connection: volume: %(vol)s, host: %(host)s, ' + 'connector: %(initiator)s') + % {'vol': volume['name'], + 'host': connector['host'], + 'initiator': connector['initiator']}) + + self.common._update_login_info() + host_id = self.common.remove_map(volume['provider_location'], + connector['host']) + # Remove all FC ports and delete the host if + # no volume mapping to it. + if not self.common._get_host_map_info(host_id): + self._remove_fc_ports(host_id, connector) + + def _remove_fc_ports(self, hostid, connector): + """Remove FC ports and delete host.""" + wwns = connector['wwpns'] + port_num = 0 + port_info = self.common._get_host_port_info(hostid) + if port_info: + port_num = len(port_info) + for port in port_info: + if port[2] in wwns: + self.common._delete_hostport(port[0]) + port_num -= 1 + else: + LOG.warn(_('_remove_fc_ports: FC port was not found ' + 'on host %(hostid)s.') % {'hostid': hostid}) + + if port_num == 0: + self.common._delete_host(hostid) + + def get_volume_stats(self, refresh=False): + """Get volume stats.""" + self._stats = self.common.get_volume_stats(refresh) + self._stats['storage_protocol'] = 'FC' + self._stats['driver_version'] = self.VERSION + backend_name = self.configuration.safe_get('volume_backend_name') + self._stats['volume_backend_name'] = (backend_name or + self.__class__.__name__) + return self._stats diff --git a/cinder/volume/drivers/huawei/huawei_utils.py b/cinder/volume/drivers/huawei/huawei_utils.py new file mode 100644 index 0000000000..8366c14a44 --- /dev/null +++ b/cinder/volume/drivers/huawei/huawei_utils.py @@ -0,0 +1,132 @@ +# Copyright (c) 2013 Huawei Technologies Co., Ltd. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from xml.etree import ElementTree as ET + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +os_type = {'Linux': '0', + 'Windows': '1', + 'Solaris': '2', + 'HP-UX': '3', + 'AIX': '4', + 'XenServer': '5', + 'Mac OS X': '6', + 'VMware ESX': '7'} + + +def parse_xml_file(filepath): + """Get root of xml file.""" + try: + tree = ET.parse(filepath) + root = tree.getroot() + return root + except IOError as err: + LOG.error(_('parse_xml_file: %s') % err) + raise err + + +def get_xml_item(xml_root, item): + """Get the given item details. + + :param xml_root: The root of xml tree + :param item: The tag need to get + :return: A dict contains all the config info of the given item. + """ + items_list = [] + items = xml_root.findall(item) + for item in items: + tmp_dict = {'text': None, 'attrib': {}} + if item.text: + tmp_dict['text'] = item.text.strip() + for key, val in item.attrib.items(): + if val: + item.attrib[key] = val.strip() + tmp_dict['attrib'] = item.attrib + items_list.append(tmp_dict) + return items_list + + +def is_xml_item_exist(xml_root, item, attrib_key=None): + """Check if the given item exits in xml config file. + + :param xml_root: The root of xml tree + :param item: The xml tag to check + :param attrib_key: The xml attrib to check + :return: True of False + """ + items_list = get_xml_item(xml_root, item) + value = [] + if attrib_key: + for tmp_dict in items_list: + if tmp_dict['attrib'].get(attrib_key, None): + return True + else: + if items_list and items_list[0]['text']: + return True + return False + + +def is_xml_item_valid(xml_root, item, valid_list, attrib_key=None): + """Check if the given item is valid in xml config file. + + :param xml_root: The root of xml tree + :param item: The xml tag to check + :param valid_list: The valid item value + :param attrib_key: The xml attrib to check + :return: True of False + """ + items_list = get_xml_item(xml_root, item) + if attrib_key: + for tmp_dict in items_list: + value = tmp_dict['attrib'].get(attrib_key, None) + if value not in valid_list: + return False + else: + value = items_list[0]['text'] + if value not in valid_list: + return False + + return True + + +def get_conf_host_os_type(host_ip, config): + """Get host OS type from xml config file. + + :param host_ip: The IP of Nova host + :param config: xml config file + :return: host OS type + """ + os_conf = {} + root = parse_xml_file(config) + hosts_list = get_xml_item(root, 'Host') + for host in hosts_list: + os = host['attrib']['OSType'].strip() + ips = [ip.strip() for ip in host['attrib']['HostIP'].split(',')] + os_conf[os] = ips + host_os = None + for k, v in os_conf.items(): + if host_ip in v: + host_os = os_type.get(k, None) + if not host_os: + host_os = os_type['Linux'] # default os type + + LOG.debug(_('_get_host_os_type: Host %(ip)s OS type is %(os)s.') + % {'ip': host_ip, 'os': host_os}) + + return host_os diff --git a/cinder/volume/drivers/huawei/rest_common.py b/cinder/volume/drivers/huawei/rest_common.py new file mode 100644 index 0000000000..1bcbb40f36 --- /dev/null +++ b/cinder/volume/drivers/huawei/rest_common.py @@ -0,0 +1,1300 @@ +# Copyright (c) 2013 Huawei Technologies Co., Ltd. +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Common class for Huawei HVS storage drivers.""" + +import base64 +import cookielib +import json +import time +import urllib2 +import uuid + +from xml.etree import ElementTree as ET + +from cinder import context +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder import units +from cinder import utils +from cinder.volume.drivers.huawei import huawei_utils +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + +QOS_KEY = ["Qos-high", "Qos-normal", "Qos-low"] +TIER_KEY = ["Tier-high", "Tier-normal", "Tier-low"] + + +class HVSCommon(): + """Common class for Huawei OceanStor HVS storage system.""" + + def __init__(self, configuration): + self.configuration = configuration + self.cookie = cookielib.CookieJar() + self.url = None + self.xml_conf = self.configuration.cinder_huawei_conf_file + + def call(self, url=False, data=None, method=None): + """Send requests to HVS server. + + Send HTTPS call, get response in JSON. + Convert response into Python Object and return it. + """ + + LOG.debug(_('HVS Request URL: %(url)s') % {'url': url}) + LOG.debug(_('HVS Request Data: %(data)s') % {'data': data}) + + headers = {"Connection": "keep-alive", + "Content-Type": "application/json"} + opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie)) + urllib2.install_opener(opener) + + try: + urllib2.socket.setdefaulttimeout(720) + req = urllib2.Request(url, data, headers) + if method: + req.get_method = lambda: method + res = urllib2.urlopen(req).read().decode("utf-8") + LOG.debug(_('HVS Response Data: %(res)s') % {'res': res}) + except Exception as err: + err_msg = _('Bad response from server: %s') % err + LOG.error(err_msg) + raise err + + try: + res_json = json.loads(res) + except Exception as err: + LOG.error(_('JSON transfer error')) + raise err + + return res_json + + def login(self): + """Log in HVS array. + + If login failed, the driver will sleep 30's to avoid frequent + connection to the server. + """ + + login_info = self._get_login_info() + url = login_info['HVSURL'] + "xx/sessions" + data = json.dumps({"username": login_info['UserName'], + "password": login_info['UserPassword'], + "scope": "0"}) + result = self.call(url, data) + if (result['error']['code'] != 0) or ("data" not in result): + time.sleep(30) + msg = _("Login error, reason is %s") % result + LOG.error(msg) + raise exception.CinderException(msg) + + deviceid = result['data']['deviceid'] + self.url = login_info['HVSURL'] + deviceid + return deviceid + + def _init_tier_parameters(self, parameters, lunparam): + """Init the LUN parameters through the volume type "performance".""" + if "tier" in parameters: + smart_tier = parameters['tier'] + if smart_tier == 'Tier_high': + lunparam['INITIALDISTRIBUTEPOLICY'] = "1" + elif smart_tier == 'Tier_normal': + lunparam['INITIALDISTRIBUTEPOLICY'] = "2" + elif smart_tier == 'Tier_low': + lunparam['INITIALDISTRIBUTEPOLICY'] = "3" + else: + lunparam['INITIALDISTRIBUTEPOLICY'] = "2" + + def _init_lun_parameters(self, name, parameters): + """Init basic LUN parameters.""" + lunparam = {"TYPE": "11", + "NAME": name, + "PARENTTYPE": "216", + "PARENTID": parameters['pool_id'], + "DESCRIPTION": "", + "ALLOCTYPE": parameters['LUNType'], + "CAPACITY": parameters['volume_size'], + "WRITEPOLICY": parameters['WriteType'], + "MIRRORPOLICY": parameters['MirrorSwitch'], + "PREFETCHPOLICY": parameters['PrefetchType'], + "PREFETCHVALUE": parameters['PrefetchValue'], + "DATATRANSFERPOLICY": "1", + "INITIALDISTRIBUTEPOLICY": "0"} + + return lunparam + + def _init_qos_parameters(self, parameters, lun_param): + """Init the LUN parameters through the volume type "Qos-xxx".""" + policy_id = None + policy_info = None + if "qos" in parameters: + policy_info = self._find_qos_policy_info(parameters['qos']) + if policy_info: + policy_id = policy_info['ID'] + + lun_param['IOClASSID'] = policy_info['ID'] + qos_level = parameters['qos_level'] + if qos_level == 'Qos-high': + lun_param['IOPRIORITY'] = "3" + elif qos_level == 'Qos-normal': + lun_param['IOPRIORITY'] = "2" + elif qos_level == 'Qos-low': + lun_param['IOPRIORITY'] = "1" + else: + lun_param['IOPRIORITY'] = "2" + + return (policy_info, policy_id) + + def _assert_rest_result(self, result, err_str): + error_code = result['error']['code'] + if error_code != 0: + msg = _('%(err)s\nresult: %(res)s') % {'err': err_str, + 'res': result} + LOG.error(msg) + raise exception.CinderException(msg) + + def _assert_data_in_result(self, result, msg): + if "data" not in result: + err_msg = _('%s "data" was not in result.') % msg + LOG.error(err_msg) + raise exception.CinderException(err_msg) + + def _create_volume(self, lun_param): + url = self.url + "/lun" + data = json.dumps(lun_param) + result = self.call(url, data) + + msg = 'Create volume error.' + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data']['ID'] + + def create_volume(self, volume): + volume_name = self._encode_name(volume['id']) + config_params = self._parse_volume_type(volume) + + # Prepare lun parameters, including qos parameter and tier parameter. + lun_param = self._init_lun_parameters(volume_name, config_params) + self._init_tier_parameters(config_params, lun_param) + policy_info, policy_id = self._init_qos_parameters(config_params, + lun_param) + + # Create LUN in array + lunid = self._create_volume(lun_param) + + # Enable qos, need to add lun into qos policy + if "qos" in config_params: + lun_list = policy_info['LUNLIST'] + lun_list.append(lunid) + if policy_id: + self._update_qos_policy_lunlist(lun_list, policy_id) + else: + LOG.warn(_("Can't find the Qos policy in array")) + + # Create lun group and add LUN into to lun group + lungroup_id = self._create_lungroup(volume_name) + self._associate_lun_to_lungroup(lungroup_id, lunid) + + return lunid + + def _get_volume_size(self, poolinfo, volume): + """Calculate the volume size. + + We should divide the given volume size by 512 for the HVS system + calculates volume size with sectors, which is 512 bytes. + """ + + volume_size = units.GiB / 512 # 1G + if int(volume['size']) != 0: + volume_size = int(volume['size']) * units.GiB / 512 + + return volume_size + + def delete_volume(self, volume): + """Delete a volume. + + Three steps: first, remove associate from lun group. + Second, remove associate from qos policy. Third, remove the lun. + """ + + name = self._encode_name(volume['id']) + lun_id = self._get_volume_by_name(name) + lungroup_id = self._find_lungroup(name) + + if lun_id and lungroup_id: + self._delete_lun_from_qos_policy(volume, lun_id) + self._delete_associated_lun_from_lungroup(lungroup_id, lun_id) + self._delete_lungroup(lungroup_id) + self._delete_lun(lun_id) + else: + LOG.warn(_("Can't find lun or lun group in array")) + + def _delete_lun_from_qos_policy(self, volume, lun_id): + """Remove lun from qos policy.""" + parameters = self._parse_volume_type(volume) + + if "qos" in parameters: + qos = parameters['qos'] + policy_info = self._find_qos_policy_info(qos) + if policy_info: + lun_list = policy_info['LUNLIST'] + for item in lun_list: + if lun_id == item: + lun_list.remove(item) + self._update_qos_policy_lunlist(lun_list, policy_info['ID']) + + def _delete_lun(self, lun_id): + url = self.url + "/lun/" + lun_id + data = json.dumps({"TYPE": "11", + "ID": lun_id}) + result = self.call(url, data, "DELETE") + self._assert_rest_result(result, 'delete lun error') + + def _encode_name(self, name): + uuid_str = name.replace("-", "") + vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) + vol_encoded = base64.urlsafe_b64encode(vol_uuid.bytes) + newuuid = vol_encoded.replace("=", "") + return newuuid + + def _find_pool_info(self): + root = huawei_utils.parse_xml_file(self.xml_conf) + pool_name = root.findtext('LUN/StoragePool') + if not pool_name: + err_msg = _("Invalid resource pool: %s") % pool_name + LOG.error(err_msg) + raise exception.InvalidInput(err_msg) + + url = self.url + "/storagepool" + result = self.call(url, None) + self._assert_rest_result(result, 'Query resource pool error') + + poolinfo = {} + if "data" in result: + for item in result['data']: + if pool_name.strip() == item['NAME']: + poolinfo['ID'] = item['ID'] + poolinfo['CAPACITY'] = item['USERFREECAPACITY'] + poolinfo['TOTALCAPACITY'] = item['USERTOTALCAPACITY'] + break + + if not poolinfo: + msg = (_('Get pool info error, pool name is:%s') % pool_name) + LOG.error(msg) + raise exception.CinderException(msg) + + return poolinfo + + def _get_volume_by_name(self, name): + url = self.url + "/lun" + result = self.call(url, None, "GET") + self._assert_rest_result(result, 'Get volume by name error!') + + volume_id = None + if "data" in result: + for item in result['data']: + if name == item['NAME']: + volume_id = item['ID'] + break + return volume_id + + def _active_snapshot(self, snapshot_id): + activeurl = self.url + "/snapshot/activate" + data = json.dumps({"SNAPSHOTLIST": [snapshot_id]}) + result = self.call(activeurl, data) + self._assert_rest_result(result, 'Active snapshot error.') + + def _create_snapshot(self, snapshot): + snapshot_name = self._encode_name(snapshot['id']) + volume_name = self._encode_name(snapshot['volume_id']) + + LOG.debug(_('create_snapshot:snapshot name:%(snapshot)s, ' + 'volume name:%(volume)s.') + % {'snapshot': snapshot_name, + 'volume': volume_name}) + + lun_id = self._get_volume_by_name(volume_name) + url = self.url + "/snapshot" + data = json.dumps({"TYPE": "27", + "NAME": snapshot_name, + "PARENTTYPE": "11", + "PARENTID": lun_id}) + result = self.call(url, data) + + msg = 'Create snapshot error.' + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data']['ID'] + + def create_snapshot(self, snapshot): + snapshot_id = self._create_snapshot(snapshot) + self._active_snapshot(snapshot_id) + + def _stop_snapshot(self, snapshot): + snapshot_name = self._encode_name(snapshot['id']) + volume_name = self._encode_name(snapshot['volume_id']) + + LOG.debug(_('_stop_snapshot:snapshot name:%(snapshot)s, ' + 'volume name:%(volume)s.') + % {'snapshot': snapshot_name, + 'volume': volume_name}) + + snapshotid = self._get_snapshotid_by_name(snapshot_name) + stopdata = json.dumps({"ID": snapshotid}) + url = self.url + "/snapshot/stop" + result = self.call(url, stopdata, "PUT") + self._assert_rest_result(result, 'Stop snapshot error.') + + return snapshotid + + def _delete_snapshot(self, snapshotid): + url = self.url + "/snapshot/%s" % snapshotid + data = json.dumps({"TYPE": "27", "ID": snapshotid}) + result = self.call(url, data, "DELETE") + self._assert_rest_result(result, 'Delete snapshot error.') + + def delete_snapshot(self, snapshot): + snapshotid = self._stop_snapshot(snapshot) + self._delete_snapshot(snapshotid) + + def _get_snapshotid_by_name(self, name): + url = self.url + "/snapshot" + data = json.dumps({"TYPE": "27"}) + result = self.call(url, data, "GET") + self._assert_rest_result(result, 'Get snapshot id error.') + + snapshot_id = None + if "data" in result: + for item in result['data']: + if name == item['NAME']: + snapshot_id = item['ID'] + break + return snapshot_id + + def _copy_volume(self, volume, copy_name, src_lun, tgt_lun): + luncopy_id = self._create_luncopy(copy_name, + src_lun, tgt_lun) + try: + self._start_luncopy(luncopy_id) + self._wait_for_luncopy(luncopy_id) + except Exception: + with excutils.save_and_reraise_exception(): + self._delete_luncopy(luncopy_id) + self.delete_volume(volume) + + self._delete_luncopy(luncopy_id) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot. + + We use LUNcopy to copy a new volume from snapshot. + The time needed increases as volume size does. + """ + + snapshot_name = self._encode_name(snapshot['id']) + src_lun_id = self._get_snapshotid_by_name(snapshot_name) + tgt_lun_id = self.create_volume(volume) + luncopy_name = self._encode_name(volume['id']) + + self._copy_volume(volume, luncopy_name, src_lun_id, tgt_lun_id) + + def create_cloned_volume(self, volume, src_vref): + """Clone a new volume from an existing volume.""" + volume_name = self._encode_name(src_vref['id']) + src_lun_id = self._get_volume_by_name(volume_name) + tgt_lun_id = self.create_volume(volume) + luncopy_name = self._encode_name(volume['id']) + + self._copy_volume(volume, luncopy_name, src_lun_id, tgt_lun_id) + + def _create_luncopy(self, luncopyname, srclunid, tgtlunid): + """Create a luncopy.""" + url = self.url + "/luncopy" + data = json.dumps({"TYPE": "219", + "NAME": luncopyname, + "DESCRIPTION": luncopyname, + "COPYSPEED": "2", + "LUNCOPYTYPE": "1", + "SOURCELUN": ("INVALID;%s;INVALID;INVALID;INVALID" + % srclunid), + "TARGETLUN": ("INVALID;%s;INVALID;INVALID;INVALID" + % tgtlunid)}) + result = self.call(url, data) + + msg = 'Create lun copy error.' + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data']['ID'] + + def _add_host_into_hostgroup(self, host_name, host_ip): + """Associate host to hostgroup. + + If host group doesn't exist, create one. + + """ + + hostgroup_id = self._find_hostgroup(host_name) + if hostgroup_id is None: + hostgroup_id = self._create_hostgroup(host_name) + + hostid = self._find_host(host_name) + if hostid is None: + os_type = huawei_utils.get_conf_host_os_type(host_ip, + self.xml_conf) + hostid = self._add_host(host_name, os_type) + self._associate_host_to_hostgroup(hostgroup_id, hostid) + + return hostid, hostgroup_id + + def _mapping_hostgroup_and_lungroup(self, volume_name, + hostgroup_id, host_id): + """Add hostgroup and lungroup to view.""" + lungroup_id = self._find_lungroup(volume_name) + lun_id = self._get_volume_by_name(volume_name) + view_id = self._find_mapping_view(volume_name) + + LOG.debug(_('_mapping_hostgroup_and_lungroup: lun_group: %(lun_group)s' + 'view_id: %(view_id)s') + % {'lun_group': str(lungroup_id), + 'view_id': str(view_id)}) + + try: + if view_id is None: + view_id = self._add_mapping_view(volume_name, host_id) + self._associate_hostgroup_to_view(view_id, hostgroup_id) + self._associate_lungroup_to_view(view_id, lungroup_id) + else: + if not self._hostgroup_associated(view_id, hostgroup_id): + self._associate_hostgroup_to_view(view_id, hostgroup_id) + if not self._lungroup_associated(view_id, lungroup_id): + self._associate_lungroup_to_view(view_id, lungroup_id) + + except Exception: + with excutils.save_and_reraise_exception(): + self._delete_hostgoup_mapping_view(view_id, hostgroup_id) + self._delete_lungroup_mapping_view(view_id, lungroup_id) + self._delete_mapping_view(view_id) + + return lun_id + + def _ensure_initiator_added(self, initiator_name, hostid): + added = self._initiator_is_added_to_array(initiator_name) + if not added: + self._add_initiator_to_array(initiator_name) + else: + if self._is_initiator_associated_to_host(initiator_name) is False: + self._associate_initiator_to_host(initiator_name, hostid) + + def initialize_connection_iscsi(self, volume, connector): + """Map a volume to a host and return target iSCSI information.""" + initiator_name = connector['initiator'] + volume_name = self._encode_name(volume['id']) + + LOG.debug(_('initiator name:%(initiator_name)s, ' + 'volume name:%(volume)s.') + % {'initiator_name': initiator_name, + 'volume': volume_name}) + + (iscsi_iqn, target_ip) = self._get_iscsi_params(connector) + + #create host_group if not exist + hostid, hostgroup_id = self._add_host_into_hostgroup(connector['host'], + connector['ip']) + self._ensure_initiator_added(initiator_name, hostid) + + # Mapping lungroup and hostgroup to view + lun_id = self._mapping_hostgroup_and_lungroup(volume_name, + hostgroup_id, hostid) + hostlunid = self._find_host_lun_id(hostid, lun_id) + LOG.debug(_("host lun id is %s") % hostlunid) + + # Return iSCSI properties. + properties = {} + properties['target_discovered'] = False + properties['target_portal'] = ('%s:%s' % (target_ip, '3260')) + properties['target_iqn'] = iscsi_iqn + properties['target_lun'] = int(hostlunid) + properties['volume_id'] = volume['id'] + + return {'driver_volume_type': 'iscsi', 'data': properties} + + def initialize_connection_fc(self, volume, connector): + wwns = connector['wwpns'] + volume_name = self._encode_name(volume['id']) + + LOG.debug(_('initiator name:%(initiator_name)s, ' + 'volume name:%(volume)s.') + % {'initiator_name': wwns, + 'volume': volume_name}) + + # Create host group if not exist + hostid, hostgroup_id = self._add_host_into_hostgroup(connector['host'], + connector['ip']) + + free_wwns = self._get_connected_free_wwns() + LOG.debug(_("the free wwns %s") % free_wwns) + for wwn in wwns: + if wwn in free_wwns: + self._add_fc_port_to_host(hostid, wwn) + + lun_id = self._mapping_hostgroup_and_lungroup(volume_name, + hostgroup_id, hostid) + host_lun_id = self._find_host_lun_id(hostid, lun_id) + + tgt_port_wwns = [] + for wwn in wwns: + tgtwwpns = self._get_fc_target_wwpns(wwn) + if tgtwwpns: + tgt_port_wwns.append(tgtwwpns) + + # Return FC properties. + properties = {} + properties['target_discovered'] = False + properties['target_wwn'] = tgt_port_wwns + properties['target_lun'] = int(host_lun_id) + properties['volume_id'] = volume['id'] + LOG.debug(_("the fc server properties is:%s") % properties) + + return {'driver_volume_type': 'fibre_channel', + 'data': properties} + + def _get_iscsi_tgt_port(self): + url = self.url + "/iscsidevicename" + result = self.call(url, None) + + msg = 'Get iSCSI target port error.' + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data'][0]['CMO_ISCSI_DEVICE_NAME'] + + def _find_hostgroup(self, groupname): + """Get the given hostgroup id.""" + url = self.url + "/hostgroup" + result = self.call(url, None, "GET") + self._assert_rest_result(result, 'Get host group information error.') + + host_group_id = None + if "data" in result: + for item in result['data']: + if groupname == item['NAME']: + host_group_id = item['ID'] + break + return host_group_id + + def _find_lungroup(self, lungroupname): + """Get the given hostgroup id.""" + url = self.url + "/lungroup" + result = self.call(url, None, "GET") + self._assert_rest_result(result, 'Get lun group information error.') + + lun_group_id = None + if 'data' in result: + for item in result['data']: + if lungroupname == item['NAME']: + lun_group_id = item['ID'] + break + return lun_group_id + + def _create_hostgroup(self, hostgroupname): + url = self.url + "/hostgroup" + data = json.dumps({"TYPE": "14", "NAME": hostgroupname}) + result = self.call(url, data) + + msg = 'Create host group error.' + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data']['ID'] + + def _create_lungroup(self, lungroupname): + url = self.url + "/lungroup" + data = json.dumps({"DESCRIPTION": lungroupname, + "NAME": lungroupname}) + result = self.call(url, data) + + msg = 'Create lun group error.' + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + return result['data']['ID'] + + def _delete_lungroup(self, lungroupid): + url = self.url + "/LUNGroup/" + lungroupid + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, 'Delete lun group error.') + + def _lungroup_associated(self, viewid, lungroupid): + url_subfix = ("/mappingview/associate?TYPE=245&" + "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" % lungroupid) + url = self.url + url_subfix + result = self.call(url, None, "GET") + self._assert_rest_result(result, 'Check lun group associated error.') + + if "data" in result: + for item in result['data']: + if viewid == item['ID']: + return True + return False + + def _hostgroup_associated(self, viewid, hostgroupid): + url_subfix = ("/mappingview/associate?TYPE=245&" + "ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s" % hostgroupid) + url = self.url + url_subfix + result = self.call(url, None, "GET") + self._assert_rest_result(result, 'Check host group associated error.') + + if "data" in result: + for item in result['data']: + if viewid == item['ID']: + return True + return False + + def _find_host_lun_id(self, hostid, lunid): + time.sleep(2) + url = self.url + ("/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21" + "&ASSOCIATEOBJID=%s" % (hostid)) + result = self.call(url, None, "GET") + self._assert_rest_result(result, 'Find host lun id error.') + + host_lun_id = 1 + if "data" in result: + for item in result['data']: + if lunid == item['ID']: + associate_data = result['data'][0]['ASSOCIATEMETADATA'] + try: + hostassoinfo = json.loads(associate_data) + host_lun_id = hostassoinfo['HostLUNID'] + break + except Exception as err: + msg = _("JSON transfer data error. %s") % err + LOG.error(msg) + raise err + return host_lun_id + + def _find_host(self, hostname): + """Get the given host ID.""" + url = self.url + "/host" + data = json.dumps({"TYPE": "21"}) + result = self.call(url, data, "GET") + self._assert_rest_result(result, 'Find host in host group error.') + + host_id = None + if "data" in result: + for item in result['data']: + if hostname == item['NAME']: + host_id = item['ID'] + break + return host_id + + def _add_host(self, hostname, type): + """Add a new host.""" + url = self.url + "/host" + data = json.dumps({"TYPE": "21", + "NAME": hostname, + "OPERATIONSYSTEM": type}) + result = self.call(url, data) + self._assert_rest_result(result, 'Add new host error.') + + if "data" in result: + return result['data']['ID'] + else: + return None + + def _associate_host_to_hostgroup(self, hostgroupid, hostid): + url = self.url + "/host/associate" + data = json.dumps({"ID": hostgroupid, + "ASSOCIATEOBJTYPE": "21", + "ASSOCIATEOBJID": hostid}) + + result = self.call(url, data) + self._assert_rest_result(result, 'Associate host to host group error.') + + def _associate_lun_to_lungroup(self, lungroupid, lunid): + """Associate lun to lun group.""" + url = self.url + "/lungroup/associate" + data = json.dumps({"ID": lungroupid, + "ASSOCIATEOBJTYPE": "11", + "ASSOCIATEOBJID": lunid}) + result = self.call(url, data) + self._assert_rest_result(result, 'Associate lun to lun group error.') + + def _delete_associated_lun_from_lungroup(self, lungroupid, lunid): + """Remove lun from lun group.""" + + url = self.url + ("/lungroup/associate?ID=%s" + "&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=%s" + % (lungroupid, lunid)) + + result = self.call(url, None, 'DELETE') + self._assert_rest_result(result, + 'Delete associated lun from lun group error') + + def _initiator_is_added_to_array(self, ininame): + """Check whether the initiator is already added in array.""" + url = self.url + "/iscsi_initiator" + data = json.dumps({"TYPE": "222", "ID": ininame}) + result = self.call(url, data, "GET") + self._assert_rest_result(result, + 'Check initiator added to array error.') + + if "data" in result: + for item in result['data']: + if item["ID"] == ininame: + return True + return False + + def _is_initiator_associated_to_host(self, ininame): + """Check whether the initiator is associated to the host.""" + url = self.url + "/iscsi_initiator" + data = json.dumps({"TYPE": "222", "ID": ininame}) + result = self.call(url, data, "GET") + self._assert_rest_result(result, + 'Check initiator associated to host error.') + + if "data" in result: + for item in result['data']: + if item['ID'] == ininame and item['ISFREE'] == "true": + return False + return True + + def _add_initiator_to_array(self, ininame): + """Add a new initiator to storage device.""" + url = self.url + "/iscsi_initiator/" + data = json.dumps({"TYPE": "222", + "ID": ininame, + "USECHAP": "False"}) + result = self.call(url, data) + self._assert_rest_result(result, 'Add initiator to array error.') + + def _associate_initiator_to_host(self, ininame, hostid): + """Associate initiator with the host.""" + url = self.url + "/iscsi_initiator/" + ininame + data = json.dumps({"TYPE": "222", + "ID": ininame, + "USECHAP": "False", + "PARENTTYPE": "21", + "PARENTID": hostid}) + result = self.call(url, data, "PUT") + self._assert_rest_result(result, 'Associate initiator to host error.') + + def _find_mapping_view(self, name): + """Find mapping view.""" + url = self.url + "/mappingview" + data = json.dumps({"TYPE": "245"}) + result = self.call(url, data, "GET") + + msg = 'Find map view error.' + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + viewid = None + for item in result['data']: + if name == item['NAME']: + viewid = item['ID'] + break + return viewid + + def _add_mapping_view(self, name, host_id): + url = self.url + "/mappingview" + data = json.dumps({"NAME": name, "TYPE": "245"}) + result = self.call(url, data) + self._assert_rest_result(result, 'Add map view error.') + + return result['data']['ID'] + + def _associate_hostgroup_to_view(self, viewID, hostGroupID): + url = self.url + "/MAPPINGVIEW/CREATE_ASSOCIATE" + data = json.dumps({"ASSOCIATEOBJTYPE": "14", + "ASSOCIATEOBJID": hostGroupID, + "TYPE": "245", + "ID": viewID}) + result = self.call(url, data, "PUT") + self._assert_rest_result(result, 'Associate host to view error.') + + def _associate_lungroup_to_view(self, viewID, lunGroupID): + url = self.url + "/MAPPINGVIEW/CREATE_ASSOCIATE" + data = json.dumps({"ASSOCIATEOBJTYPE": "256", + "ASSOCIATEOBJID": lunGroupID, + "TYPE": "245", + "ID": viewID}) + result = self.call(url, data, "PUT") + self._assert_rest_result(result, 'Associate lun group to view error.') + + def _delete_lungroup_mapping_view(self, view_id, lungroup_id): + """remove lun group associate from the mapping view.""" + url = self.url + "/mappingview/REMOVE_ASSOCIATE" + data = json.dumps({"ASSOCIATEOBJTYPE": "256", + "ASSOCIATEOBJID": lungroup_id, + "TYPE": "245", + "ID": view_id}) + result = self.call(url, data, "PUT") + self._assert_rest_result(result, 'Delete lun group from view error.') + + def _delete_hostgoup_mapping_view(self, view_id, hostgroup_id): + """remove host group associate from the mapping view.""" + url = self.url + "/mappingview/REMOVE_ASSOCIATE" + data = json.dumps({"ASSOCIATEOBJTYPE": "14", + "ASSOCIATEOBJID": hostgroup_id, + "TYPE": "245", + "ID": view_id}) + result = self.call(url, data, "PUT") + self._assert_rest_result(result, 'Delete host group from view error.') + + def _delete_mapping_view(self, view_id): + """remove mapping view from the storage.""" + url = self.url + "/mappingview/" + view_id + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, 'Delete map view error.') + + def terminate_connection(self, volume, connector, **kwargs): + """Delete map between a volume and a host.""" + initiator_name = connector['initiator'] + volume_name = self._encode_name(volume['id']) + host_name = connector['host'] + + LOG.debug(_('terminate_connection:volume name: %(volume)s, ' + 'initiator name: %(ini)s.') + % {'volume': volume_name, + 'ini': initiator_name}) + + view_id = self._find_mapping_view(volume_name) + hostgroup_id = self._find_hostgroup(host_name) + lungroup_id = self._find_lungroup(volume_name) + + if view_id is not None: + self._delete_hostgoup_mapping_view(view_id, hostgroup_id) + self._delete_lungroup_mapping_view(view_id, lungroup_id) + self._delete_mapping_view(view_id) + + def login_out(self): + """logout the session.""" + url = self.url + "/sessions" + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, 'Log out of session error.') + + def _start_luncopy(self, luncopyid): + """Start a LUNcopy.""" + url = self.url + "/LUNCOPY/start" + data = json.dumps({"TYPE": "219", "ID": luncopyid}) + result = self.call(url, data, "PUT") + self._assert_rest_result(result, 'Start lun copy error.') + + def _get_capacity(self): + """Get free capacity and total capacity of the pools.""" + poolinfo = self._find_pool_info() + pool_capacity = {'total_capacity': 0.0, + 'CAPACITY': 0.0} + + if poolinfo: + total = int(poolinfo['TOTALCAPACITY']) / 1024.0 / 1024.0 / 2 + free = int(poolinfo['CAPACITY']) / 1024.0 / 1024.0 / 2 + pool_capacity['total_capacity'] = total + pool_capacity['free_capacity'] = free + + return pool_capacity + + def _get_lun_conf_params(self): + """Get parameters from config file for creating lun.""" + # Default lun set information + lunsetinfo = {'LUNType': 'Thick', + 'StripUnitSize': '64', + 'WriteType': '1', + 'MirrorSwitch': '1', + 'PrefetchType': '3', + 'PrefetchValue': '0', + 'PrefetchTimes': '0'} + + root = huawei_utils.parse_xml_file(self.xml_conf) + luntype = root.findtext('LUN/LUNType') + if luntype: + if luntype.strip() in ['Thick', 'Thin']: + lunsetinfo['LUNType'] = luntype.strip() + if luntype.strip() == 'Thick': + lunsetinfo['LUNType'] = 0 + if luntype.strip() == 'Thin': + lunsetinfo['LUNType'] = 1 + + elif luntype is not '' and luntype is not None: + err_msg = (_('Config file is wrong. LUNType must be "Thin"' + ' or "Thick". LUNType:%(fetchtype)s') + % {'fetchtype': luntype}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + stripunitsize = root.findtext('LUN/StripUnitSize') + if stripunitsize is not None: + lunsetinfo['StripUnitSize'] = stripunitsize.strip() + writetype = root.findtext('LUN/WriteType') + if writetype is not None: + lunsetinfo['WriteType'] = writetype.strip() + mirrorswitch = root.findtext('LUN/MirrorSwitch') + if mirrorswitch is not None: + lunsetinfo['MirrorSwitch'] = mirrorswitch.strip() + + prefetch = root.find('LUN/Prefetch') + fetchtype = prefetch.attrib['Type'] + if prefetch is not None and prefetch.attrib['Type']: + if fetchtype in ['0', '1', '2', '3']: + lunsetinfo['PrefetchType'] = fetchtype.strip() + typevalue = prefetch.attrib['Value'].strip() + if lunsetinfo['PrefetchType'] == '1': + lunsetinfo['PrefetchValue'] = typevalue + elif lunsetinfo['PrefetchType'] == '2': + lunsetinfo['PrefetchValue'] = typevalue + else: + err_msg = (_('PrefetchType config is wrong. PrefetchType' + ' must in 1,2,3,4. fetchtype is:%(fetchtype)s') + % {'fetchtype': fetchtype}) + LOG.error(err_msg) + raise exception.CinderException(err_msg) + else: + LOG.debug(_('Use default prefetch fetchtype. ' + 'Prefetch fetchtype:Intelligent.')) + + return lunsetinfo + + def _wait_for_luncopy(self, luncopyid): + """Wait for LUNcopy to complete.""" + while True: + luncopy_info = self._get_luncopy_info(luncopyid) + if luncopy_info['status'] == '40': + break + elif luncopy_info['state'] != '1': + err_msg = (_('_wait_for_luncopy:LUNcopy status is not normal.' + 'LUNcopy name: %(luncopyname)s') + % {'luncopyname': luncopyid}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + time.sleep(10) + + def _get_luncopy_info(self, luncopyid): + """Get LUNcopy information.""" + url = self.url + "/LUNCOPY?range=[0-100000]" + data = json.dumps({"TYPE": "219", }) + result = self.call(url, data, "GET") + self._assert_rest_result(result, 'Get lun copy information error.') + + luncopyinfo = {} + if "data" in result: + for item in result['data']: + if luncopyid == item['ID']: + luncopyinfo['name'] = item['NAME'] + luncopyinfo['id'] = item['ID'] + luncopyinfo['state'] = item['HEALTHSTATUS'] + luncopyinfo['status'] = item['RUNNINGSTATUS'] + break + return luncopyinfo + + def _delete_luncopy(self, luncopyid): + """Delete a LUNcopy.""" + url = self.url + "/LUNCOPY/%s" % luncopyid + result = self.call(url, None, "DELETE") + self._assert_rest_result(result, 'Delete lun copy error.') + + def _get_connected_free_wwns(self): + """Get free connected FC port WWNs. + + If no new ports connected, return an empty list. + """ + url = self.url + "/fc_initiator?ISFREE=true&range=[0-1000]" + result = self.call(url, None, "GET") + + msg = 'Get connected free FC wwn error.' + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + wwns = [] + for item in result['data']: + wwns.append(item['ID']) + return wwns + + def _add_fc_port_to_host(self, hostid, wwn, multipathtype=0): + """Add a FC port to the host.""" + url = self.url + "/fc_initiator/" + wwn + data = json.dumps({"TYPE": "223", + "ID": wwn, + "PARENTTYPE": 21, + "PARENTID": hostid}) + result = self.call(url, data, "PUT") + self._assert_rest_result(result, 'Add FC port to host error.') + + def _get_iscsi_port_info(self, ip): + """Get iscsi port info in order to build the iscsi target iqn.""" + url = self.url + "/eth_port" + result = self.call(url, None, "GET") + + msg = 'Get iSCSI port information error.' + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + iscsi_port_info = None + for item in result['data']: + if ip == item['IPV4ADDR']: + iscsi_port_info = item['LOCATION'] + break + + if not iscsi_port_info: + msg = (_('_get_iscsi_port_info: Failed to get iscsi port info ' + 'through config IP %(ip)s, please check config file.') + % {'ip': ip}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + return iscsi_port_info + + def _get_iscsi_conf(self): + """Get iSCSI info from config file.""" + iscsiinfo = {} + root = huawei_utils.parse_xml_file(self.xml_conf) + iscsiinfo['DefaultTargetIP'] = \ + root.findtext('iSCSI/DefaultTargetIP').strip() + initiator_list = [] + tmp_dic = {} + for dic in root.findall('iSCSI/Initiator'): + # Strip values of dic + for k, v in dic.items(): + tmp_dic[k] = v.strip() + initiator_list.append(tmp_dic) + iscsiinfo['Initiator'] = initiator_list + + return iscsiinfo + + def _get_tgt_iqn(self, iscsiip): + """Get target iSCSI iqn.""" + LOG.debug(_('_get_tgt_iqn: iSCSI IP is %s.') % iscsiip) + ip_info = self._get_iscsi_port_info(iscsiip) + iqn_prefix = self._get_iscsi_tgt_port() + + split_list = ip_info.split(".") + newstr = split_list[1] + split_list[2] + if newstr[0] == 'A': + ctr = "0" + elif newstr[0] == 'B': + ctr = "1" + interface = '0' + newstr[1] + port = '0' + newstr[3] + iqn_suffix = ctr + '02' + interface + port + for i in range(0, len(iqn_suffix)): + if iqn_suffix[i] != '0': + iqn_suffix = iqn_suffix[i:] + break + iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsiip + LOG.debug(_('_get_tgt_iqn: iSCSI target iqn is %s') % iqn) + return iqn + + def _get_fc_target_wwpns(self, wwn): + url = (self.url + + "/host_link?INITIATOR_TYPE=223&INITIATOR_PORT_WWN=" + wwn) + result = self.call(url, None, "GET") + + msg = 'Get FC target wwpn error.' + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + fc_wwpns = None + for item in result['data']: + if wwn == item['INITIATOR_PORT_WWN']: + fc_wwpns = item['TARGET_PORT_WWN'] + break + + return fc_wwpns + + def _parse_volume_type(self, volume): + type_id = volume['volume_type_id'] + params = self._get_lun_conf_params() + LOG.debug(_('_parse_volume_type: type id: %(type_id)s ' + 'config parameter is: %(params)s') + % {'type_id': type_id, + 'params': params}) + + poolinfo = self._find_pool_info() + volume_size = self._get_volume_size(poolinfo, volume) + params['volume_size'] = volume_size + params['pool_id'] = poolinfo['ID'] + + if type_id is not None: + ctxt = context.get_admin_context() + volume_type = volume_types.get_volume_type(ctxt, type_id) + specs = volume_type.get('extra_specs') + for key, value in specs.iteritems(): + key_split = key.split(':') + if len(key_split) > 1: + if key_split[0] == 'drivers': + key = key_split[1] + else: + continue + else: + key = key_split[0] + + if key in QOS_KEY: + params["qos"] = value.strip() + params["qos_level"] = key + elif key in TIER_KEY: + params["tier"] = value.strip() + elif key in params.keys(): + params[key] = value.strip() + else: + conf = self.configuration.cinder_huawei_conf_file + LOG.warn(_('_parse_volume_type: Unacceptable parameter ' + '%(key)s. Please check this key in extra_specs ' + 'and make it consistent with the configuration ' + 'file %(conf)s.') % {'key': key, 'conf': conf}) + + LOG.debug(_("The config parameters are: %s") % params) + return params + + def update_volume_stats(self, refresh=False): + capacity = self._get_capacity() + data = {} + data['vendor_name'] = 'Huawei' + data['total_capacity_gb'] = capacity['total_capacity'] + data['free_capacity_gb'] = capacity['free_capacity'] + data['reserved_percentage'] = 0 + data['QoS_support'] = True + data['Tier_support'] = True + return data + + def _find_qos_policy_info(self, policy_name): + url = self.url + "/ioclass" + result = self.call(url, None, "GET") + + msg = 'Get qos policy error.' + self._assert_rest_result(result, msg) + self._assert_data_in_result(result, msg) + + qos_info = {} + for item in result['data']: + if policy_name == item['NAME']: + qos_info['ID'] = item['ID'] + lun_list = json.loads(item['LUNLIST']) + qos_info['LUNLIST'] = lun_list + break + return qos_info + + def _update_qos_policy_lunlist(self, lunlist, policy_id): + url = self.url + "/ioclass/" + policy_id + data = json.dumps({"TYPE": "230", + "ID": policy_id, + "LUNLIST": lunlist}) + result = self.call(url, data, "PUT") + self._assert_rest_result(result, 'Up date qos policy error.') + + def _get_login_info(self): + """Get login IP, username and password from config file.""" + logininfo = {} + filename = self.configuration.cinder_huawei_conf_file + tree = ET.parse(filename) + root = tree.getroot() + logininfo['HVSURL'] = root.findtext('Storage/HVSURL').strip() + + need_encode = False + for key in ['UserName', 'UserPassword']: + node = root.find('Storage/%s' % key) + node_text = node.text + # Prefix !$$$ means encoded already. + if node_text.find('!$$$') > -1: + logininfo[key] = base64.b64decode(node_text[4:]) + else: + logininfo[key] = node_text + node.text = '!$$$' + base64.b64encode(node_text) + need_encode = True + if need_encode: + self._change_file_mode(filename) + try: + tree.write(filename, 'UTF-8') + except Exception as err: + LOG.warn(_('%s') % err) + + return logininfo + + def _change_file_mode(self, filepath): + utils.execute('chmod', '777', filepath, run_as_root=True) + + def _check_conf_file(self): + """Check the config file, make sure the essential items are set.""" + root = huawei_utils.parse_xml_file(self.xml_conf) + check_list = ['Storage/HVSURL', 'Storage/UserName', + 'Storage/UserPassword'] + for item in check_list: + if not huawei_utils.is_xml_item_exist(root, item): + err_msg = (_('_check_conf_file: Config file invalid. ' + '%s must be set.') % item) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + # make sure storage pool is set + if not huawei_utils.is_xml_item_exist(root, 'LUN/StoragePool'): + err_msg = _('_check_conf_file: Config file invalid. ' + 'StoragePool must be set.') + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + # make sure host os type valid + if huawei_utils.is_xml_item_exist(root, 'Host', 'OSType'): + os_list = huawei_utils.os_type.keys() + if not huawei_utils.is_xml_item_valid(root, 'Host', os_list, + 'OSType'): + err_msg = (_('_check_conf_file: Config file invalid. ' + 'Host OSType invalid.\n' + 'The valid values are: %(os_list)s') + % {'os_list': os_list}) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + def _get_iscsi_params(self, connector): + """Get target iSCSI params, including iqn, IP.""" + initiator = connector['initiator'] + iscsi_conf = self._get_iscsi_conf() + target_ip = None + for ini in iscsi_conf['Initiator']: + if ini['Name'] == initiator: + target_ip = ini['TargetIP'] + break + # If didn't specify target IP for some initiator, use default IP. + if not target_ip: + if iscsi_conf['DefaultTargetIP']: + target_ip = iscsi_conf['DefaultTargetIP'] + + else: + msg = (_('_get_iscsi_params: Failed to get target IP ' + 'for initiator %(ini)s, please check config file.') + % {'ini': initiator}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + target_iqn = self._get_tgt_iqn(target_ip) + + return (target_iqn, target_ip) + + def extend_volume(self, volume, new_size): + name = self._encode_name(volume['id']) + lun_id = self._get_volume_by_name(name) + if lun_id: + url = self.url + "/lun/expand" + capacity = int(new_size) * units.GiB / 512 + data = json.dumps({"TYPE": "11", + "ID": lun_id, + "CAPACITY": capacity}) + result = self.call(url, data, "PUT") + self._assert_rest_result(result, 'Extend lun error.') + else: + LOG.warn(_('Can not find lun in array')) diff --git a/cinder/volume/drivers/huawei/ssh_common.py b/cinder/volume/drivers/huawei/ssh_common.py new file mode 100644 index 0000000000..5e44aefc96 --- /dev/null +++ b/cinder/volume/drivers/huawei/ssh_common.py @@ -0,0 +1,1497 @@ +# Copyright (c) 2013 Huawei Technologies Co., Ltd. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Common classes for Huawei OceanStor T series and Dorado series storage arrays. + +The common classes provide the drivers command line operation using SSH. +""" + +import base64 +import re +import socket +import threading +import time + +from xml.etree import ElementTree as ET + +from cinder import context +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder import utils +from cinder.volume.drivers.huawei import huawei_utils +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + +HOST_GROUP_NAME = 'HostGroup_OpenStack' +HOST_NAME_PREFIX = 'Host_' +VOL_AND_SNAP_NAME_PREFIX = 'OpenStack_' + + +def ssh_read(user, channel, cmd, timeout): + """Get results of CLI commands.""" + result = '' + channel.settimeout(timeout) + while True: + try: + result = result + channel.recv(8192) + except socket.timeout as err: + msg = _('ssh_read: Read SSH timeout. %s') % err + LOG.error(msg) + raise err + else: + # CLI returns welcome information when first log in. So need to + # deal differently. + if not re.search('Welcome', result): + # Complete CLI response starts with CLI cmd and + # ends with "username:/>". + if result.startswith(cmd) and result.endswith(user + ':/>'): + break + # Some commands need to send 'y'. + elif re.search('(y/n)|y or n', result): + break + # Reach maximum limit of SSH connection. + elif re.search('No response message', result): + msg = _('No response message. Please check system status.') + LOG.error(msg) + raise exception.CinderException(msg) + elif (re.search(user + ':/>' + cmd, result) and + result.endswith(user + ':/>')): + break + + # Filter the last line: username:/> . + result = '\r\n'.join(result.split('\r\n')[:-1]) + # Filter welcome information. + index = result.find(user + ':/>') + + return (result[index:] if index > -1 else result) + + +class TseriesCommon(): + """Common class for Huawei T series storage arrays.""" + + def __init__(self, configuration=None): + self.configuration = configuration + self.xml_conf = self.configuration.cinder_huawei_conf_file + self.login_info = {} + self.lun_distribution = [0, 0] + self.hostgroup_id = None + self.ssh_pool = None + self.lock_ip = threading.Lock() + self.luncopy_list = [] # to store LUNCopy name + self.extended_lun_dict = {} + + def do_setup(self, context): + """Check config file.""" + LOG.debug(_('do_setup')) + + self._check_conf_file() + self.login_info = self._get_login_info() + exist_luns = self._get_all_luns_info() + self.lun_distribution = self._get_lun_distribution_info(exist_luns) + self.luncopy_list = self._get_all_luncopy_name() + self.hostgroup_id = self._get_hostgroup_id(HOST_GROUP_NAME) + self.extended_lun_dict = self._get_extended_lun(exist_luns) + + def _check_conf_file(self): + """Check config file, make sure essential items are set.""" + root = huawei_utils.parse_xml_file(self.xml_conf) + check_list = ['Storage/ControllerIP0', 'Storage/ControllerIP1', + 'Storage/UserName', 'Storage/UserPassword'] + for item in check_list: + if not huawei_utils.is_xml_item_exist(root, item): + err_msg = (_('_check_conf_file: Config file invalid. ' + '%s must be set.') % item) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + # make sure storage pool is set + if not huawei_utils.is_xml_item_exist(root, 'LUN/StoragePool', 'Name'): + err_msg = _('_check_conf_file: Config file invalid. ' + 'StoragePool must be set.') + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + # If setting os type, make sure it valid + if huawei_utils.is_xml_item_exist(root, 'Host', 'OSType'): + os_list = huawei_utils.os_type.keys() + if not huawei_utils.is_xml_item_valid(root, 'Host', os_list, + 'OSType'): + err_msg = (_('_check_conf_file: Config file invalid. ' + 'Host OSType is invalid.\n' + 'The valid values are: %(os_list)s') + % {'os_list': os_list}) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + def _get_login_info(self): + """Get login IP, username and password from config file.""" + logininfo = {} + filename = self.configuration.cinder_huawei_conf_file + tree = ET.parse(filename) + root = tree.getroot() + logininfo['ControllerIP0'] =\ + root.findtext('Storage/ControllerIP0').strip() + logininfo['ControllerIP1'] =\ + root.findtext('Storage/ControllerIP1').strip() + + need_encode = False + for key in ['UserName', 'UserPassword']: + node = root.find('Storage/%s' % key) + node_text = node.text.strip() + # Prefix !$$$ means encoded already. + if node_text.find('!$$$') > -1: + logininfo[key] = base64.b64decode(node_text[4:]) + else: + logininfo[key] = node_text + node.text = '!$$$' + base64.b64encode(node_text) + need_encode = True + if need_encode: + self._change_file_mode(filename) + try: + tree.write(filename, 'UTF-8') + except Exception as err: + LOG.info(_('_get_login_info: %s') % err) + + return logininfo + + def _change_file_mode(self, filepath): + utils.execute('chmod', '777', filepath, run_as_root=True) + + def _get_lun_distribution_info(self, luns): + """Get LUN distribution information. + + For we have two controllers for each array, we want to make all + LUNs(just for Thick LUN) distributed evenly. The driver uses the + LUN distribution info to determine in which controller to create + a new LUN. + + """ + + ctr_info = [0, 0] + for lun in luns: + if (lun[6].startswith(VOL_AND_SNAP_NAME_PREFIX) and + lun[8] == 'THICK'): + if lun[4] == 'A': + ctr_info[0] += 1 + else: + ctr_info[1] += 1 + return ctr_info + + def check_for_setup_error(self): + pass + + def _get_all_luncopy_name(self): + cli_cmd = 'showluncopy' + out = self._execute_cli(cli_cmd) + luncopy_ids = [] + if re.search('LUN Copy Information', out): + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if tmp_line[0].startswith(VOL_AND_SNAP_NAME_PREFIX): + luncopy_ids.append(tmp_line[0]) + return luncopy_ids + + def _get_extended_lun(self, luns): + extended_dict = {} + for lun in luns: + if lun[6].startswith('ext'): + vol_name = lun[6].split('_')[1] + add_ids = extended_dict.get(vol_name, []) + add_ids = add_ids.append(lun[0]) + extended_dict[vol_name] = add_ids + return extended_dict + + def create_volume(self, volume): + """Create a new volume.""" + volume_name = self._name_translate(volume['name']) + + LOG.debug(_('create_volume: volume name: %s') % volume_name) + + self._update_login_info() + if int(volume['size']) == 0: + volume_size = '100M' + else: + volume_size = '%sG' % volume['size'] + type_id = volume['volume_type_id'] + parameters = self._parse_volume_type(type_id) + volume_id = self._create_volume(volume_name, volume_size, parameters) + return volume_id + + def _name_translate(self, name): + """Form new names for volume and snapshot because of + 32-character limit on names. + """ + newname = VOL_AND_SNAP_NAME_PREFIX + str(hash(name)) + + LOG.debug(_('_name_translate: Name in cinder: %(old)s, new name in ' + 'storage system: %(new)s') % {'old': name, 'new': newname}) + + return newname + + def _update_login_info(self): + """Update user name and password.""" + self.login_info = self._get_login_info() + + def _parse_volume_type(self, typeid): + """Parse volume type form extra_specs by type id. + + The keys in extra_specs must be consistent with the element in config + file. And the keys can starts with "drivers" to make them distinguished + from capabilities keys, if you like. + + """ + + params = self._get_lun_params() + if typeid is not None: + ctxt = context.get_admin_context() + volume_type = volume_types.get_volume_type(ctxt, typeid) + specs = volume_type.get('extra_specs') + for key, value in specs.iteritems(): + key_split = key.split(':') + if len(key_split) > 1: + if key_split[0] == 'drivers': + key = key_split[1] + else: + continue + else: + key = key_split[0] + + if key in params.keys(): + params[key] = value.strip() + else: + conf = self.configuration.cinder_huawei_conf_file + LOG.warn(_('_parse_volume_type: Unacceptable parameter ' + '%(key)s. Please check this key in extra_specs ' + 'and make it consistent with the element in ' + 'configuration file %(conf)s.') + % {'key': key, + 'conf': conf}) + + return params + + def _create_volume(self, name, size, params): + """Create a new volume with the given name and size.""" + cli_cmd = ('createlun -n %(name)s -lunsize %(size)s ' + '-wrtype %(wrtype)s ' % {'name': name, + 'size': size, + 'wrtype': params['WriteType']}) + + # If write type is "write through", no need to set mirror switch. + if params['WriteType'] != '2': + cli_cmd = cli_cmd + ('-mirrorsw %(mirrorsw)s ' + % {'mirrorsw': params['MirrorSwitch']}) + + # Differences exist between "Thin" and "thick" LUN in CLI commands. + luntype = params['LUNType'] + ctr = None + if luntype == 'Thin': + cli_cmd = cli_cmd + ('-pool %(pool)s ' + % {'pool': params['StoragePool']}) + else: + # Make LUN distributed to A/B controllers evenly, + # just for Thick LUN. + ctr = self._calculate_lun_ctr() + cli_cmd = cli_cmd + ('-rg %(raidgroup)s -susize %(susize)s ' + '-c %(ctr)s ' + % {'raidgroup': params['StoragePool'], + 'susize': params['StripUnitSize'], + 'ctr': ctr}) + + prefetch_value_or_times = '' + pretype = '-pretype %s ' % params['PrefetchType'] + # If constant prefetch, we should specify prefetch value. + if params['PrefetchType'] == '1': + prefetch_value_or_times = '-value %s' % params['PrefetchValue'] + # If variable prefetch, we should specify prefetch multiple. + elif params['PrefetchType'] == '2': + prefetch_value_or_times = '-times %s' % params['PrefetchTimes'] + + cli_cmd = cli_cmd + pretype + prefetch_value_or_times + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_create_volume', + 'Failed to create volume %s' % name, + cli_cmd, out) + if ctr: + self._update_lun_distribution(ctr) + + return self._get_lun_id(name) + + def _calculate_lun_ctr(self): + return ('a' if self.lun_distribution[0] <= self.lun_distribution[1] + else 'b') + + def _update_lun_distribution(self, ctr): + index = (0 if ctr == 'a' else 1) + self.lun_distribution[index] += 1 + + def _get_lun_params(self): + params_conf = self._parse_conf_lun_params() + # Select a pool with maximum capacity. + pools_dev = self._get_dev_pool_info(params_conf['LUNType']) + params_conf['StoragePool'] = \ + self._get_maximum_capacity_pool_id(params_conf['StoragePool'], + pools_dev, + params_conf['LUNType']) + return params_conf + + def _parse_conf_lun_params(self): + """Get parameters from config file for creating LUN.""" + # Default LUN parameters. + conf_params = {'LUNType': 'Thin', + 'StripUnitSize': '64', + 'WriteType': '1', + 'MirrorSwitch': '1', + 'PrefetchType': '3', + 'PrefetchValue': '0', + 'PrefetchTimes': '0', + 'StoragePool': []} + + root = huawei_utils.parse_xml_file(self.xml_conf) + + luntype = root.findtext('LUN/LUNType') + if luntype: + if luntype.strip() in ['Thick', 'Thin']: + conf_params['LUNType'] = luntype.strip() + else: + err_msg = (_('LUNType must be "Thin" or "Thick". ' + 'LUNType:%(type)s') % {'type': luntype}) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + stripunitsize = root.findtext('LUN/StripUnitSize') + if stripunitsize: + conf_params['StripUnitSize'] = stripunitsize.strip() + writetype = root.findtext('LUN/WriteType') + if writetype: + conf_params['WriteType'] = writetype.strip() + mirrorswitch = root.findtext('LUN/MirrorSwitch') + if mirrorswitch: + conf_params['MirrorSwitch'] = mirrorswitch.strip() + prefetch = root.find('LUN/Prefetch') + if prefetch is not None and prefetch.attrib['Type']: + conf_params['PrefetchType'] = prefetch.attrib['Type'].strip() + if conf_params['PrefetchType'] == '1': + conf_params['PrefetchValue'] = prefetch.attrib['Value'].strip() + elif conf_params['PrefetchType'] == '2': + conf_params['PrefetchTimes'] = prefetch.attrib['Value'].strip() + else: + LOG.debug(_('_parse_conf_lun_params: Use default prefetch type. ' + 'Prefetch type: Intelligent')) + + pools_conf = root.findall('LUN/StoragePool') + for pool in pools_conf: + conf_params['StoragePool'].append(pool.attrib['Name'].strip()) + + return conf_params + + def _get_maximum_capacity_pool_id(self, pools_conf, pools_dev, luntype): + """Get the maximum pool from config file. + + According to the given pools' names in config file, + we select the pool with maximum free capacity. + + """ + + maxpool_id = None + maxpool_size = 0.0 + nameindex, sizeindex = ((1, 4) if luntype == 'Thin' else (5, 3)) + pools_dev = sorted(pools_dev, key=lambda x: float(x[sizeindex])) + while len(pools_dev) > 0: + pool = pools_dev.pop() + if pool[nameindex] in pools_conf: + return pool[0] + + err_msg = (_('_get_maximum_capacity_pool_id: Failed to get pool ' + 'id. Please check config file and make sure ' + 'the StoragePool %s is created in storage ' + 'array.') % pools_conf) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + def _execute_cli(self, cmd): + """Build SSH connection and execute CLI commands. + + If the connection to first controller timeout, + try to connect to the other controller. + + """ + + LOG.debug(_('CLI command: %s') % cmd) + connect_times = 1 + ip0 = self.login_info['ControllerIP0'] + ip1 = self.login_info['ControllerIP1'] + user = self.login_info['UserName'] + pwd = self.login_info['UserPassword'] + if not self.ssh_pool: + self.ssh_pool = utils.SSHPool(ip0, 22, 30, user, pwd, max_size=2) + ssh_client = None + while True: + try: + if connect_times == 2: + # Switch to the other controller. + with self.lock_ip: + if ssh_client: + if ssh_client.server_ip == self.ssh_pool.ip: + self.ssh_pool.ip = (ip1 + if self.ssh_pool.ip == ip0 + else ip0) + old_ip = ssh_client.server_ip + # Create a new client to replace the old one. + if getattr(ssh_client, 'chan', None): + ssh_client.chan.close() + ssh_client.close() + ssh_client = self.ssh_pool.create() + self._reset_transport_timeout(ssh_client, 0.1) + else: + self.ssh_pool.ip = ip1 + old_ip = ip0 + + LOG.info(_('_execute_cli: Can not connect to IP ' + '%(old)s, try to connect to the other ' + 'IP %(new)s.') + % {'old': old_ip, 'new': self.ssh_pool.ip}) + + if not ssh_client: + # Get an SSH client from SSH pool. + ssh_client = self.ssh_pool.get() + self._reset_transport_timeout(ssh_client, 0.1) + # "server_ip" shows the IP of SSH server. + if not getattr(ssh_client, 'server_ip', None): + with self.lock_ip: + setattr(ssh_client, 'server_ip', self.ssh_pool.ip) + # An SSH client owns one "chan". + if not getattr(ssh_client, 'chan', None): + setattr(ssh_client, 'chan', + utils.create_channel(ssh_client, 600, 800)) + + while True: + ssh_client.chan.send(cmd + '\n') + out = ssh_read(user, ssh_client.chan, cmd, 20) + if out.find('(y/n)') > -1 or out.find('y or n') > -1: + cmd = 'y' + else: + # Put SSH client back into SSH pool. + self.ssh_pool.put(ssh_client) + return out + + except Exception as err: + if connect_times < 2: + connect_times += 1 + continue + else: + if ssh_client: + self.ssh_pool.remove(ssh_client) + LOG.error(_('_execute_cli: %s') % err) + raise err + + def _reset_transport_timeout(self, ssh, time): + transport = ssh.get_transport() + transport.sock.settimeout(time) + + def delete_volume(self, volume): + volume_name = self._name_translate(volume['name']) + + LOG.debug(_('delete_volume: volume name: %s') % volume_name) + + self._update_login_info() + volume_id = volume.get('provider_location', None) + if volume_id is None or not self._check_volume_created(volume_id): + err_msg = (_('delete_volume: Volume %(name)s does not exist.') + % {'name': volume['name']}) + LOG.warn(err_msg) + return + else: + name = volume_name[len(VOL_AND_SNAP_NAME_PREFIX):] + added_vol_ids = self.extended_lun_dict.get(name, None) + if added_vol_ids: + self._del_lun_from_extended_lun(volume_id, added_vol_ids) + self.extended_lun_dict.pop(name) + self._delete_volume(volume_id) + + def _check_volume_created(self, volume_id): + cli_cmd = 'showlun -lun %s' % volume_id + out = self._execute_cli(cli_cmd) + return (True if re.search('LUN Information', out) else False) + + def _del_lun_from_extended_lun(self, extended_id, added_ids): + cli_cmd = 'rmlunfromextlun -ext %s' % extended_id + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_del_lun_from_extended_lun', + ('Failed to remove LUN from extended ' + 'LUN: %s' % extended_id), + cli_cmd, out) + for id in added_ids: + cli_cmd = 'dellun -lun %s' % id + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_del_lun_from_extended_lun', + 'Failed to delete LUN: %s' % id, + cli_cmd, out) + + def _delete_volume(self, volumeid): + """Run CLI command to delete volume.""" + cli_cmd = 'dellun -force -lun %s' % volumeid + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_delete_volume', + ('Failed to delete volume. volume id: %s' + % volumeid), + cli_cmd, out) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot. + + We use LUNcopy to copy a new volume from snapshot. + The time needed increases as volume size does. + + """ + + snapshot_name = self._name_translate(snapshot['name']) + volume_name = self._name_translate(volume['name']) + + LOG.debug(_('create_volume_from_snapshot: snapshot ' + 'name: %(snapshot)s, volume name: %(volume)s') + % {'snapshot': snapshot_name, + 'volume': volume_name}) + + self._update_login_info() + snapshot_id = snapshot.get('provider_location', None) + if not snapshot_id: + snapshot_id = self._get_snapshot_id(snapshot_name) + if snapshot_id is None: + err_msg = (_('create_volume_from_snapshot: Snapshot %(name)s ' + 'does not exist.') + % {'name': snapshot_name}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + # Create a target LUN. + if int(volume['size']) == 0: + volume_size = '%sG' % snapshot['volume_size'] + else: + volume_size = '%sG' % volume['size'] + type_id = volume['volume_type_id'] + parameters = self._parse_volume_type(type_id) + tgt_vol_id = self._create_volume(volume_name, volume_size, parameters) + self._copy_volume(snapshot_id, tgt_vol_id) + + return tgt_vol_id + + def _copy_volume(self, src_vol_id, tgt_vol_id): + """Copy a volume or snapshot to target volume.""" + luncopy_name = VOL_AND_SNAP_NAME_PREFIX + src_vol_id + '_' + tgt_vol_id + self._create_luncopy(luncopy_name, src_vol_id, tgt_vol_id) + self.luncopy_list.append(luncopy_name) + luncopy_id = self._get_luncopy_info(luncopy_name)[1] + try: + self._start_luncopy(luncopy_id) + self._wait_for_luncopy(luncopy_name) + # Delete the target volume if LUNcopy failed. + except Exception: + with excutils.save_and_reraise_exception(): + # Need to remove the LUNcopy of the volume first. + self._delete_luncopy(luncopy_id) + self.luncopy_list.remove(luncopy_name) + self._delete_volume(tgt_vol_id) + # Need to delete LUNcopy finally. + self._delete_luncopy(luncopy_id) + self.luncopy_list.remove(luncopy_name) + + def _create_luncopy(self, luncopyname, srclunid, tgtlunid): + """Run CLI command to create LUNcopy.""" + cli_cmd = ('createluncopy -n %(name)s -l 4 -slun %(srclunid)s ' + '-tlun %(tgtlunid)s' % {'name': luncopyname, + 'srclunid': srclunid, + 'tgtlunid': tgtlunid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_create_luncopy', + ('Failed to create LUNcopy %s' + % luncopyname), + cli_cmd, out) + + def _start_luncopy(self, luncopyid): + """Run CLI command to start LUNcopy.""" + cli_cmd = ('chgluncopystatus -luncopy %(luncopyid)s -start' + % {'luncopyid': luncopyid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_start_luncopy', + 'Failed to start LUNcopy %s' % luncopyid, + cli_cmd, out) + + def _wait_for_luncopy(self, luncopyname): + """Wait for LUNcopy to complete.""" + while True: + luncopy_info = self._get_luncopy_info(luncopyname) + # If state is complete + if luncopy_info[3] == 'Complete': + break + # If status is not normal + elif luncopy_info[4] != 'Normal': + err_msg = (_('_wait_for_luncopy: LUNcopy %(luncopyname)s ' + 'status is %(status)s.') + % {'luncopyname': luncopyname, + 'status': luncopy_info[4]}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + time.sleep(10) + + def _get_luncopy_info(self, luncopyname): + """Return a LUNcopy information list.""" + cli_cmd = 'showluncopy' + out = self._execute_cli(cli_cmd) + + self._assert_cli_out(re.search('LUN Copy Information', out), + '_get_luncopy_info', + 'No LUNcopy information was found.', + cli_cmd, out) + + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if tmp_line[0] == luncopyname: + return tmp_line + return None + + def _delete_luncopy(self, luncopyid): + """Run CLI command to delete LUNcopy.""" + cli_cmd = 'delluncopy -luncopy %(id)s' % {'id': luncopyid} + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_delete_luncopy', + 'Failed to delete LUNcopy %s' % luncopyid, + cli_cmd, out) + + def create_cloned_volume(self, tgt_volume, src_volume): + src_vol_name = self._name_translate(src_volume['name']) + tgt_vol_name = self._name_translate(tgt_volume['name']) + + LOG.debug(_('create_cloned_volume: src volume: %(src)s, ' + 'tgt volume: %(tgt)s') % {'src': src_vol_name, + 'tgt': tgt_vol_name}) + + self._update_login_info() + src_vol_id = src_volume.get('provider_location', None) + if not src_vol_id: + src_vol_id = self._get_lun_id(src_vol_name) + if src_vol_id is None: + err_msg = (_('Source volume %(name)s does not exist.') + % {'name': src_vol_name}) + LOG.error(err_msg) + raise exception.VolumeNotFound(volume_id=src_vol_name) + + # Create a target volume. + if int(tgt_volume['size']) == 0: + tgt_vol_size = '%sG' % src_vol_name['size'] + else: + tgt_vol_size = '%sG' % tgt_volume['size'] + type_id = tgt_volume['volume_type_id'] + params = self._parse_volume_type(type_id) + tgt_vol_id = self._create_volume(tgt_vol_name, tgt_vol_size, params) + self._copy_volume(src_vol_id, tgt_vol_id) + + return tgt_vol_id + + def _get_all_luns_info(self): + cli_cmd = 'showlun' + out = self._execute_cli(cli_cmd) + luns = [] + if re.search('LUN Information', out): + for line in out.split('\r\n')[6:-2]: + luns.append(line.replace('Not format', 'Notformat').split()) + return luns + + def _get_lun_id(self, lun_name): + luns = self._get_all_luns_info() + if luns: + for lun in luns: + if lun[6] == lun_name: + return lun[0] + return None + + def extend_volume(self, volume, new_size): + extended_vol_name = self._name_translate(volume['name']) + name = extended_vol_name[len(VOL_AND_SNAP_NAME_PREFIX):] + added_vol_ids = self.extended_lun_dict.get(name, []) + added_vol_name = ('ext_' + extended_vol_name.split('_')[1] + '_' + + str(len(added_vol_ids))) + added_vol_size = str(int(new_size) - int(volume['size'])) + 'G' + + LOG.debug(_('extend_volume: extended volume name: %(extended_name)s ' + 'new added volume name: %(added_name)s ' + 'new added volume size: %(added_size)s') + % {'extended_name': extended_vol_name, + 'added_name': added_vol_name, + 'added_size': added_vol_size}) + + if not volume['provider_location']: + err_msg = (_('extend_volume: volume %s does not exist.') + % extended_vol_name) + LOG.error(err_msg) + raise exception.VolumeNotFound(volume_id=extended_vol_name) + + type_id = volume['volume_type_id'] + parameters = self._parse_volume_type(type_id) + added_vol_id = self._create_volume(added_vol_name, added_vol_size, + parameters) + try: + self._extend_volume(volume['provider_location'], added_vol_id) + except Exception: + with excutils.save_and_reraise_exception(): + self._delete_volume(added_vol_id) + + added_vol_ids.append(added_vol_id) + self.extended_lun_dict[name] = added_vol_ids + + def _extend_volume(self, extended_vol_id, added_vol_id): + cli_cmd = ('addluntoextlun -extlun %(extended_vol)s ' + '-lun %(added_vol)s' % {'extended_vol': extended_vol_id, + 'added_vol': added_vol_id}) + out = self._execute_cli(cli_cmd) + self._assert_cli_operate_out('_extend_volume', + ('Failed to extend volume %s' + % extended_vol_id), + cli_cmd, out) + + def create_snapshot(self, snapshot): + snapshot_name = self._name_translate(snapshot['name']) + volume_name = self._name_translate(snapshot['volume_name']) + + LOG.debug(_('create_snapshot: snapshot name: %(snapshot)s, ' + 'volume name: %(volume)s') + % {'snapshot': snapshot_name, + 'volume': volume_name}) + + if self._resource_pool_enough() is False: + err_msg = (_('create_snapshot: ' + 'Resource pool needs 1GB valid size at least.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + lun_id = self._get_lun_id(volume_name) + if lun_id is None: + err_msg = (_('create_snapshot: Volume %(name)s does not exist.') + % {'name': volume_name}) + LOG.error(err_msg) + raise exception.VolumeNotFound(volume_id=volume_name) + + self._create_snapshot(snapshot_name, lun_id) + snapshot_id = self._get_snapshot_id(snapshot_name) + try: + self._active_snapshot(snapshot_id) + except Exception: + with excutils.save_and_reraise_exception(): + self._delete_snapshot(snapshot_id) + + return snapshot_id + + def _resource_pool_enough(self): + """Check whether resource pools' valid size is more than 1GB.""" + cli_cmd = 'showrespool' + out = self._execute_cli(cli_cmd) + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if float(tmp_line[3]) < 1024.0: + return False + + return True + + def _create_snapshot(self, snapshotname, srclunid): + """Create a snapshot with snapshot name and source LUN ID.""" + cli_cmd = ('createsnapshot -lun %(lunid)s -n %(snapname)s' + % {'lunid': srclunid, + 'snapname': snapshotname}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_create_snapshot', + ('Failed to create snapshot %s' + % snapshotname), + cli_cmd, out) + + def _get_snapshot_id(self, snapshotname): + cli_cmd = 'showsnapshot' + out = self._execute_cli(cli_cmd) + if re.search('Snapshot Information', out): + for line in out.split('\r\n')[6:-2]: + emp_line = line.split() + if emp_line[0] == snapshotname: + return emp_line[1] + return None + + def _active_snapshot(self, snapshotid): + """Run CLI command to active snapshot.""" + cli_cmd = ('actvsnapshot -snapshot %(snapshotid)s' + % {'snapshotid': snapshotid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_active_snapshot', + ('Failed to active snapshot %s' + % snapshotid), + cli_cmd, out) + + def delete_snapshot(self, snapshot): + snapshot_name = self._name_translate(snapshot['name']) + volume_name = self._name_translate(snapshot['volume_name']) + + LOG.debug(_('delete_snapshot: snapshot name: %(snapshot)s, ' + 'volume name: %(volume)s') % {'snapshot': snapshot_name, + 'volume': volume_name}) + + self._update_login_info() + snapshot_id = snapshot.get('provider_location', None) + if ((snapshot_id is not None) and + self._check_snapshot_created(snapshot_id)): + # Not allow to delete snapshot if it is copying. + if self._snapshot_in_luncopy(snapshot_id): + err_msg = (_('delete_snapshot: Can not delete snapshot %s ' + 'for it is a source LUN of LUNCopy.') + % snapshot_name) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + self._delete_snapshot(snapshot_id) + else: + err_msg = (_('delete_snapshot: Snapshot %(snap)s does not exist.') + % {'snap': snapshot_name}) + LOG.warn(err_msg) + + def _check_snapshot_created(self, snapshot_id): + cli_cmd = 'showsnapshot -snapshot %(snap)s' % {'snap': snapshot_id} + out = self._execute_cli(cli_cmd) + return (True if re.search('Snapshot Information', out) else False) + + def _snapshot_in_luncopy(self, snapshot_id): + for name in self.luncopy_list: + if name.startswith(VOL_AND_SNAP_NAME_PREFIX + snapshot_id): + return True + return False + + def _delete_snapshot(self, snapshotid): + """Send CLI command to delete snapshot. + + Firstly, disable the snapshot, then delete it. + + """ + + cli_cmd = ('disablesnapshot -snapshot %(snapshotid)s' + % {'snapshotid': snapshotid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_delete_snapshot', + ('Failed to disable snapshot %s' + % snapshotid), + cli_cmd, out) + + cli_cmd = ('delsnapshot -snapshot %(snapshotid)s' + % {'snapshotid': snapshotid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_delete_snapshot', + ('Failed to delete snapshot %s' + % snapshotid), + cli_cmd, out) + + def _assert_cli_out(self, condition, func, msg, cmd, cliout): + """Assertion for CLI query out.""" + if not condition: + err_msg = (_('%(func)s: %(msg)s\nCLI command: %(cmd)s\n' + 'CLI out: %(out)s') % {'func': func, + 'msg': msg, + 'cmd': cmd, + 'out': cliout}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _assert_cli_operate_out(self, func, msg, cmd, cliout): + """Assertion for CLI out string: command operates successfully.""" + condition = re.search('command operates successfully', cliout) + self._assert_cli_out(condition, func, msg, cmd, cliout) + + def map_volume(self, host_id, volume_id): + """Map a volume to a host.""" + # Map a LUN to a host if not mapped. + if not self._check_volume_created(volume_id): + LOG.error(_('map_volume: Volume %s was not found.') % volume_id) + raise exception.VolumeNotFound(volume_id=volume_id) + + hostlun_id = None + map_info = self._get_host_map_info(host_id) + # Make sure the host LUN ID starts from 1. + new_hostlun_id = 1 + new_hostlunid_found = False + if map_info: + for maping in map_info: + if maping[2] == volume_id: + hostlun_id = maping[4] + break + elif not new_hostlunid_found: + if new_hostlun_id < int(maping[4]): + new_hostlunid_found = True + else: + new_hostlun_id = int(maping[4]) + 1 + + if not hostlun_id: + cli_cmd = ('addhostmap -host %(host_id)s -devlun %(lunid)s ' + '-hostlun %(hostlunid)s' + % {'host_id': host_id, + 'lunid': volume_id, + 'hostlunid': new_hostlun_id}) + out = self._execute_cli(cli_cmd) + + msg = ('Failed to map LUN %s to host %s. host LUN ID: %s' + % (volume_id, host_id, new_hostlun_id)) + self._assert_cli_operate_out('map_volume', msg, cli_cmd, out) + + hostlun_id = new_hostlun_id + + return hostlun_id + + def add_host(self, host_name, host_ip, initiator=None): + """Create a host and add it to hostgroup.""" + # Create an OpenStack hostgroup if not created before. + hostgroup_name = HOST_GROUP_NAME + self.hostgroup_id = self._get_hostgroup_id(hostgroup_name) + if self.hostgroup_id is None: + self._create_hostgroup(hostgroup_name) + self.hostgroup_id = self._get_hostgroup_id(hostgroup_name) + + # Create a host and add it to the hostgroup. + # Check the old host name to support the upgrade from grizzly to + # higher versions. + if initiator: + old_host_name = HOST_NAME_PREFIX + str(hash(initiator)) + old_host_id = self._get_host_id(old_host_name, self.hostgroup_id) + if old_host_id is not None: + return old_host_id + + host_name = HOST_NAME_PREFIX + host_name + host_id = self._get_host_id(host_name, self.hostgroup_id) + if host_id is None: + os_type = huawei_utils.get_conf_host_os_type(host_ip, + self.xml_conf) + self._create_host(host_name, self.hostgroup_id, os_type) + host_id = self._get_host_id(host_name, self.hostgroup_id) + + return host_id + + def _get_hostgroup_id(self, groupname): + """Get the given hostgroup ID. + + If the hostgroup not found, return None. + + """ + + cli_cmd = 'showhostgroup' + out = self._execute_cli(cli_cmd) + if re.search('Host Group Information', out): + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if tmp_line[1] == groupname: + return tmp_line[0] + return None + + def _create_hostgroup(self, hostgroupname): + """Run CLI command to create host group.""" + cli_cmd = 'createhostgroup -n %(name)s' % {'name': hostgroupname} + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_create_hostgroup', + ('Failed to Create hostgroup %s.' + % hostgroupname), + cli_cmd, out) + + def _get_host_id(self, hostname, hostgroupid): + """Get the given host ID.""" + cli_cmd = 'showhost -group %(groupid)s' % {'groupid': hostgroupid} + out = self._execute_cli(cli_cmd) + if re.search('Host Information', out): + for line in out.split('\r\n')[6:-2]: + tmp_line = line.split() + if tmp_line[1] == hostname: + return tmp_line[0] + return None + + def _create_host(self, hostname, hostgroupid, type): + """Run CLI command to add host.""" + cli_cmd = ('addhost -group %(groupid)s -n %(hostname)s -t %(type)s' + % {'groupid': hostgroupid, + 'hostname': hostname, + 'type': type}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_create_host', + 'Failed to create host %s' % hostname, + cli_cmd, out) + + def _get_host_port_info(self, hostid): + """Run CLI command to get host port information.""" + cli_cmd = ('showhostport -host %(hostid)s' % {'hostid': hostid}) + out = self._execute_cli(cli_cmd) + if re.search('Host Port Information', out): + return [line.split() for line in out.split('\r\n')[6:-2]] + else: + return None + + def _get_host_map_info(self, hostid): + """Get map information of the given host.""" + + cli_cmd = 'showhostmap -host %(hostid)s' % {'hostid': hostid} + out = self._execute_cli(cli_cmd) + if re.search('Map Information', out): + mapinfo = [line.split() for line in out.split('\r\n')[6:-2]] + # Sorted by host LUN ID. + return sorted(mapinfo, key=lambda x: int(x[4])) + else: + return None + + def get_lun_details(self, lun_id): + cli_cmd = 'showlun -lun %s' % lun_id + out = self._execute_cli(cli_cmd) + lun_details = {} + if re.search('LUN Information', out): + for line in out.split('\r\n')[4:-2]: + line = line.split('|') + key = ''.join(line[0].strip().split()) + val = line[1].strip() + lun_details[key] = val + return lun_details + + def change_lun_ctr(self, lun_id, ctr): + LOG.debug(_('change_lun_ctr: Changing LUN %(lun)s ctr to %(ctr)s.') + % {'lun': lun_id, 'ctr': ctr}) + + cli_cmd = 'chglun -lun %s -c %s' % (lun_id, ctr) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('change_lun_ctr', + 'Failed to change owning controller for ' + 'LUN %s' % lun_id, + cli_cmd, out) + + def remove_map(self, volume_id, host_name, initiator=None): + """Remove host map.""" + # Check the old host name to support the upgrade from grizzly to + # higher versions. + host_id = None + if initiator: + old_host_name = HOST_NAME_PREFIX + str(hash(initiator)) + host_id = self._get_host_id(old_host_name, self.hostgroup_id) + if host_id is None: + host_name = HOST_NAME_PREFIX + host_name + host_id = self._get_host_id(host_name, self.hostgroup_id) + if host_id is None: + LOG.error(_('remove_map: Host %s does not exist.') % host_name) + raise exception.HostNotFound(host=host_name) + + if not self._check_volume_created(volume_id): + LOG.error(_('remove_map: Volume %s does not exist.') % volume_id) + raise exception.VolumeNotFound(volume_id=volume_id) + + map_id = None + map_info = self._get_host_map_info(host_id) + if map_info: + for maping in map_info: + if maping[2] == volume_id: + map_id = maping[0] + break + if map_id is not None: + self._delete_map(map_id) + else: + LOG.warn(_('remove_map: No map between host %(host)s and ' + 'volume %(volume)s.') % {'host': host_name, + 'volume': volume_id}) + return host_id + + def _delete_map(self, mapid, attempts=2): + """Run CLI command to remove map.""" + cli_cmd = 'delhostmap -force -map %(mapid)s' % {'mapid': mapid} + while True: + out = self._execute_cli(cli_cmd) + + # We retry to delete host map 10s later if there are + # IOs accessing the system. + if re.search('command operates successfully', out): + break + else: + if (re.search('there are IOs accessing the system', out) and + (attempts > 0)): + + LOG.debug(_('_delete_map: There are IOs accessing ' + 'the system. Retry to delete host map ' + '%(mapid)s 10s later.') % {'mapid': mapid}) + + time.sleep(10) + attempts -= 1 + continue + else: + err_msg = (_('_delete_map: Failed to delete host map ' + '%(mapid)s.\nCLI out: %(out)s') + % {'mapid': mapid, + 'times': attempts, + 'out': out}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _delete_hostport(self, portid): + """Run CLI command to delete host port.""" + cli_cmd = ('delhostport -force -p %(portid)s' % {'portid': portid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_delete_hostport', + 'Failed to delete host port %s.' % portid, + cli_cmd, out) + + def _delete_host(self, hostid): + """Run CLI command to delete host.""" + cli_cmd = ('delhost -force -host %(hostid)s' % {'hostid': hostid}) + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_delete_host', + 'Failed to delete host. %s.' % hostid, + cli_cmd, out) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + + LOG.debug(_("_update_volume_stats: Updating volume stats.")) + data = {} + data['vendor_name'] = 'Huawei' + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = self._get_free_capacity() + data['reserved_percentage'] = 0 + data['QoS_support'] = False + + self._stats = data + + def _get_free_capacity(self): + """Get total free capacity of pools.""" + self._update_login_info() + params_conf = self._parse_conf_lun_params() + lun_type = params_conf['LUNType'] + pools_conf = params_conf['StoragePool'] + pools_dev = self._get_dev_pool_info(lun_type) + total_free_capacity = 0.0 + for pool_dev in pools_dev: + for pool_conf in pools_conf: + if ((lun_type == 'Thick') and + (pool_dev[5] == pool_conf)): + total_free_capacity += float(pool_dev[3]) + break + elif pool_dev[1] == pool_conf: + total_free_capacity += float(pool_dev[4]) + break + + return total_free_capacity / 1024 + + def _get_dev_pool_info(self, pooltype): + """Get pools information created in storage device. + + Return a list whose elements are also list. + + """ + + cli_cmd = ('showpool' if pooltype == 'Thin' else 'showrg') + out = self._execute_cli(cli_cmd) + + test = (re.search('Pool Information', out) or + re.search('RAID Group Information', out)) + self._assert_cli_out(test, '_get_dev_pool_info', + 'No pools information found.', cli_cmd, out) + + pool = out.split('\r\n')[6:-2] + return [line.split() for line in pool] + + +class DoradoCommon(TseriesCommon): + """Common class for Huawei Dorado2100 G2 and Dorado5100 storage arrays. + + Dorados share a lot of common codes with T series storage systems, + so this class inherited from class TseriesCommon and just rewrite some + methods. + + """ + + def __init__(self, configuration=None): + TseriesCommon.__init__(self, configuration) + self.device_type = None + + def do_setup(self, context): + """Check config file.""" + LOG.debug(_('do_setup')) + + self._check_conf_file() + exist_luns = self._get_all_luns_info() + self.lun_distribution = self._get_lun_distribution_info(exist_luns) + self.hostgroup_id = self._get_hostgroup_id(HOST_GROUP_NAME) + self.extended_lun_dict = self._get_extended_lun(exist_luns) + + def _check_conf_file(self): + """Check the config file, make sure the key elements are set.""" + root = huawei_utils.parse_xml_file(self.xml_conf) + # Check login information + check_list = ['Storage/ControllerIP0', 'Storage/ControllerIP1', + 'Storage/UserName', 'Storage/UserPassword'] + for item in check_list: + if not huawei_utils.is_xml_item_exist(root, item): + err_msg = (_('_check_conf_file: Config file invalid. ' + '%s must be set.') % item) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + # Check storage pool + # No need for Dorado2100 G2 + self.login_info = self._get_login_info() + self.device_type = self._get_device_type() + if self.device_type == 'Dorado5100': + if not huawei_utils.is_xml_item_exist(root, 'LUN/StoragePool', + 'Name'): + err_msg = (_('_check_conf_file: Config file invalid. ' + 'StoragePool must be specified.')) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + # If setting os type, make sure it valid + if huawei_utils.is_xml_item_exist(root, 'Host', 'OSType'): + os_list = huawei_utils.os_type.keys() + if not huawei_utils.is_xml_item_valid(root, 'Host', os_list, + 'OSType'): + err_msg = (_('_check_conf_file: Config file invalid. ' + 'Host OSType is invalid.\n' + 'The valid values are: %(os_list)s') + % {'os_list': os_list}) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + def _get_device_type(self): + """Run CLI command to get system type.""" + cli_cmd = 'showsys' + out = self._execute_cli(cli_cmd) + + self._assert_cli_out(re.search('System Information', out), + '_get_device_type', + 'Failed to get system information', + cli_cmd, out) + + for line in out.split('\r\n')[4:-2]: + if re.search('Device Type', line): + if re.search('Dorado2100 G2$', line): + return 'Dorado2100 G2' + elif re.search('Dorado5100$', line): + return 'Dorado5100' + else: + LOG.error(_('_get_device_type: The driver only supports ' + 'Dorado5100 and Dorado 2100 G2 now.')) + raise exception.InvalidResults() + + def _get_lun_distribution_info(self, luns): + ctr_info = [0, 0] + (c, n) = ((2, 4) if self.device_type == 'Dorado2100 G2' else (3, 5)) + for lun in luns: + if lun[n].startswith(VOL_AND_SNAP_NAME_PREFIX): + if lun[c] == 'A': + ctr_info[0] += 1 + else: + ctr_info[1] += 1 + return ctr_info + + def _get_extended_lun(self, luns): + extended_dict = {} + n = 4 if self.device_type == 'Dorado2100 G2' else 5 + for lun in luns: + if lun[n].startswith('ext'): + vol_name = lun[n].split('_')[1] + add_ids = extended_dict.get(vol_name, []) + add_ids.append(lun[0]) + extended_dict[vol_name] = add_ids + return extended_dict + + def _create_volume(self, name, size, params): + """Create a new volume with the given name and size.""" + cli_cmd = ('createlun -n %(name)s -lunsize %(size)s ' + '-wrtype %(wrtype)s ' + % {'name': name, + 'size': size, + 'wrtype': params['WriteType']}) + + # If write type is "write through", no need to set mirror switch. + if params['WriteType'] != '2': + cli_cmd = cli_cmd + ('-mirrorsw %(mirrorsw)s ' + % {'mirrorsw': params['MirrorSwitch']}) + + ctr = self._calculate_lun_ctr() + # Dorado5100 does not support thin LUN. + if self.device_type == 'Dorado5100': + cli_cmd = cli_cmd + ('-rg %(raidgroup)s -susize %(susize)s ' + '-c %(ctr)s' + % {'raidgroup': params['StoragePool'], + 'susize': params['StripUnitSize'], + 'ctr': ctr}) + else: + if params['LUNType'] == 'Thin': + # Not allowed to specify ctr for thin LUN. + ctr_str = '' + luntype_str = '-type 2' + else: + ctr_str = ' -c %s' % ctr + luntype_str = '-type 3' + + cli_cmd = cli_cmd + luntype_str + ctr_str + + out = self._execute_cli(cli_cmd) + + self._assert_cli_operate_out('_create_volume', + 'Failed to create volume %s' % name, + cli_cmd, out) + + self._update_lun_distribution(ctr) + + return self._get_lun_id(name) + + def _get_lun_id(self, name): + luns = self._get_all_luns_info() + if luns: + n_index = (4 if 'Dorado2100 G2' == self.device_type else 5) + for lun in luns: + if lun[n_index] == name: + return lun[0] + return None + + def create_volume_from_snapshot(self, volume, snapshot): + err_msg = (_('create_volume_from_snapshot: %(device)s does ' + 'not support create volume from snapshot.') + % {'device': self.device_type}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def create_cloned_volume(self, volume, src_vref): + err_msg = (_('create_cloned_volume: %(device)s does ' + 'not support clone volume.') + % {'device': self.device_type}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def extend_volume(self, volume, new_size): + if self.device_type == 'Dorado2100 G2': + err_msg = (_('extend_volume: %(device)s does not support ' + 'extend volume.') % {'device': self.device_type}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + else: + return TseriesCommon.extend_volume(self, volume, new_size) + + def create_snapshot(self, snapshot): + if self.device_type == 'Dorado2100 G2': + err_msg = (_('create_snapshot: %(device)s does not support ' + 'snapshot.') % {'device': self.device_type}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + else: + return TseriesCommon.create_snapshot(self, snapshot) + + def delete_snapshot(self, snapshot): + if self.device_type == 'Dorado2100 G2': + return + else: + TseriesCommon.delete_snapshot(self, snapshot) + + def _get_lun_params(self): + params_conf = self._parse_conf_lun_params() + # Select a pool with maximum capacity. + if self.device_type == 'Dorado5100': + pools_dev = self._get_dev_pool_info('Thick') + params_conf['StoragePool'] = \ + self._get_maximum_capacity_pool_id(params_conf['StoragePool'], + pools_dev, 'Thick') + return params_conf + + def _parse_conf_lun_params(self): + """Get parameters from config file for creating LUN.""" + # Default LUN parameters. + conf_params = {'LUNType': 'Thin', + 'StripUnitSize': '64', + 'WriteType': '1', + 'MirrorSwitch': '1'} + + root = huawei_utils.parse_xml_file(self.xml_conf) + + luntype = root.findtext('LUN/LUNType') + if luntype: + if luntype.strip() in ['Thick', 'Thin']: + conf_params['LUNType'] = luntype.strip() + else: + err_msg = (_('LUNType must be "Thin" or "Thick". ' + 'LUNType:%(type)s') % {'type': luntype}) + LOG.error(err_msg) + raise exception.InvalidInput(reason=err_msg) + + # Here we do not judge whether the parameters are set correct. + # CLI will return error responses if the parameters are invalid. + stripunitsize = root.findtext('LUN/StripUnitSize') + if stripunitsize: + conf_params['StripUnitSize'] = stripunitsize.strip() + writetype = root.findtext('LUN/WriteType') + if writetype: + conf_params['WriteType'] = writetype.strip() + mirrorswitch = root.findtext('LUN/MirrorSwitch') + if mirrorswitch: + conf_params['MirrorSwitch'] = mirrorswitch.strip() + + # No need to set StoragePool for Dorado2100 G2. + if self.device_type == 'Dorado2100 G2': + return conf_params + + pools_conf = root.findall('LUN/StoragePool') + conf_params['StoragePool'] = [] + for pool in pools_conf: + conf_params['StoragePool'].append(pool.attrib['Name'].strip()) + + return conf_params + + def _get_free_capacity(self): + """Get total free capacity of pools.""" + self._update_login_info() + lun_type = ('Thin' if self.device_type == 'Dorado2100 G2' else 'Thick') + pools_dev = self._get_dev_pool_info(lun_type) + total_free_capacity = 0.0 + for pool_dev in pools_dev: + if self.device_type == 'Dorado2100 G2': + total_free_capacity += float(pool_dev[2]) + continue + else: + params_conf = self._parse_conf_lun_params() + pools_conf = params_conf['StoragePool'] + for pool_conf in pools_conf: + if pool_dev[5] == pool_conf: + total_free_capacity += float(pool_dev[3]) + break + + return total_free_capacity / 1024 diff --git a/cinder/volume/drivers/ibm/__init__.py b/cinder/volume/drivers/ibm/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/drivers/ibm/storwize_svc/__init__.py b/cinder/volume/drivers/ibm/storwize_svc/__init__.py new file mode 100644 index 0000000000..399258f0aa --- /dev/null +++ b/cinder/volume/drivers/ibm/storwize_svc/__init__.py @@ -0,0 +1,638 @@ +# Copyright 2013 IBM Corp. +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +""" +Volume driver for IBM Storwize family and SVC storage systems. + +Notes: +1. If you specify both a password and a key file, this driver will use the + key file only. +2. When using a key file for authentication, it is up to the user or + system administrator to store the private key in a safe manner. +3. The defaults for creating volumes are "-rsize 2% -autoexpand + -grainsize 256 -warning 0". These can be changed in the configuration + file or by using volume types(recommended only for advanced users). + +Limitations: +1. The driver expects CLI output in English, error messages may be in a + localized format. +2. Clones and creating volumes from snapshots, where the source and target + are of different sizes, is not supported. + +""" + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder import units +from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers +from cinder.volume.drivers.san import san +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +storwize_svc_opts = [ + cfg.StrOpt('storwize_svc_volpool_name', + default='volpool', + help='Storage system storage pool for volumes'), + cfg.IntOpt('storwize_svc_vol_rsize', + default=2, + help='Storage system space-efficiency parameter for volumes ' + '(percentage)'), + cfg.IntOpt('storwize_svc_vol_warning', + default=0, + help='Storage system threshold for volume capacity warnings ' + '(percentage)'), + cfg.BoolOpt('storwize_svc_vol_autoexpand', + default=True, + help='Storage system autoexpand parameter for volumes ' + '(True/False)'), + cfg.IntOpt('storwize_svc_vol_grainsize', + default=256, + help='Storage system grain size parameter for volumes ' + '(32/64/128/256)'), + cfg.BoolOpt('storwize_svc_vol_compression', + default=False, + help='Storage system compression option for volumes'), + cfg.BoolOpt('storwize_svc_vol_easytier', + default=True, + help='Enable Easy Tier for volumes'), + cfg.IntOpt('storwize_svc_vol_iogrp', + default=0, + help='The I/O group in which to allocate volumes'), + cfg.IntOpt('storwize_svc_flashcopy_timeout', + default=120, + help='Maximum number of seconds to wait for FlashCopy to be ' + 'prepared. Maximum value is 600 seconds (10 minutes)'), + cfg.StrOpt('storwize_svc_connection_protocol', + default='iSCSI', + help='Connection protocol (iSCSI/FC)'), + cfg.BoolOpt('storwize_svc_iscsi_chap_enabled', + default=True, + help='Configure CHAP authentication for iSCSI connections ' + '(Default: Enabled)'), + cfg.BoolOpt('storwize_svc_multipath_enabled', + default=False, + help='Connect with multipath (FC only; iSCSI multipath is ' + 'controlled by Nova)'), + cfg.BoolOpt('storwize_svc_multihostmap_enabled', + default=True, + help='Allows vdisk to multi host mapping'), +] + +CONF = cfg.CONF +CONF.register_opts(storwize_svc_opts) + + +class StorwizeSVCDriver(san.SanDriver): + """IBM Storwize V7000 and SVC iSCSI/FC volume driver. + + Version history: + 1.0 - Initial driver + 1.1 - FC support, create_cloned_volume, volume type support, + get_volume_stats, minor bug fixes + 1.2.0 - Added retype + 1.2.1 - Code refactor, improved exception handling + """ + + VERSION = "1.2.1" + + def __init__(self, *args, **kwargs): + super(StorwizeSVCDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(storwize_svc_opts) + self._helpers = storwize_helpers.StorwizeHelpers(self._run_ssh) + self._state = {'storage_nodes': {}, + 'enabled_protocols': set(), + 'compression_enabled': False, + 'available_iogrps': [], + 'system_name': None, + 'system_id': None, + 'extent_size': None, + 'code_level': None, + } + + def do_setup(self, ctxt): + """Check that we have all configuration details from the storage.""" + LOG.debug(_('enter: do_setup')) + + # Get storage system name, id, and code level + self._state.update(self._helpers.get_system_info()) + + # Validate that the pool exists + pool = self.configuration.storwize_svc_volpool_name + try: + attributes = self._helpers.get_pool_attrs(pool) + except exception.VolumeBackendAPIException: + msg = _('Failed getting details for pool %s') % pool + raise exception.InvalidInput(reason=msg) + self._state['extent_size'] = attributes['extent_size'] + + # Check if compression is supported + self._state['compression_enabled'] = \ + self._helpers.compression_enabled() + + # Get the available I/O groups + self._state['available_iogrps'] = \ + self._helpers.get_available_io_groups() + + # Get the iSCSI and FC names of the Storwize/SVC nodes + self._state['storage_nodes'] = self._helpers.get_node_info() + + # Add the iSCSI IP addresses and WWPNs to the storage node info + self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes']) + self._helpers.add_fc_wwpns(self._state['storage_nodes']) + + # For each node, check what connection modes it supports. Delete any + # nodes that do not support any types (may be partially configured). + to_delete = [] + for k, node in self._state['storage_nodes'].iteritems(): + if ((len(node['ipv4']) or len(node['ipv6'])) + and len(node['iscsi_name'])): + node['enabled_protocols'].append('iSCSI') + self._state['enabled_protocols'].add('iSCSI') + if len(node['WWPN']): + node['enabled_protocols'].append('FC') + self._state['enabled_protocols'].add('FC') + if not len(node['enabled_protocols']): + to_delete.append(k) + for delkey in to_delete: + del self._state['storage_nodes'][delkey] + + # Make sure we have at least one node configured + if not len(self._state['storage_nodes']): + msg = _('do_setup: No configured nodes.') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + LOG.debug(_('leave: do_setup')) + + def check_for_setup_error(self): + """Ensure that the flags are set properly.""" + LOG.debug(_('enter: check_for_setup_error')) + + # Check that we have the system ID information + if self._state['system_name'] is None: + exception_msg = (_('Unable to determine system name')) + raise exception.VolumeBackendAPIException(data=exception_msg) + if self._state['system_id'] is None: + exception_msg = (_('Unable to determine system id')) + raise exception.VolumeBackendAPIException(data=exception_msg) + if self._state['extent_size'] is None: + exception_msg = (_('Unable to determine pool extent size')) + raise exception.VolumeBackendAPIException(data=exception_msg) + + required_flags = ['san_ip', 'san_ssh_port', 'san_login', + 'storwize_svc_volpool_name'] + for flag in required_flags: + if not self.configuration.safe_get(flag): + raise exception.InvalidInput(reason=_('%s is not set') % flag) + + # Ensure that either password or keyfile were set + if not (self.configuration.san_password or + self.configuration.san_private_key): + raise exception.InvalidInput( + reason=_('Password or SSH private key is required for ' + 'authentication: set either san_password or ' + 'san_private_key option')) + + # Check that flashcopy_timeout is not more than 10 minutes + flashcopy_timeout = self.configuration.storwize_svc_flashcopy_timeout + if not (flashcopy_timeout > 0 and flashcopy_timeout <= 600): + raise exception.InvalidInput( + reason=_('Illegal value %d specified for ' + 'storwize_svc_flashcopy_timeout: ' + 'valid values are between 0 and 600') + % flashcopy_timeout) + + opts = self._helpers.build_default_opts(self.configuration) + self._helpers.check_vdisk_opts(self._state, opts) + + LOG.debug(_('leave: check_for_setup_error')) + + def ensure_export(self, ctxt, volume): + """Check that the volume exists on the storage. + + The system does not "export" volumes as a Linux iSCSI target does, + and therefore we just check that the volume exists on the storage. + """ + volume_defined = self._helpers.is_vdisk_defined(volume['name']) + if not volume_defined: + LOG.error(_('ensure_export: Volume %s not found on storage') + % volume['name']) + + def create_export(self, ctxt, volume): + model_update = None + return model_update + + def remove_export(self, ctxt, volume): + pass + + def validate_connector(self, connector): + """Check connector for at least one enabled protocol (iSCSI/FC).""" + valid = False + if ('iSCSI' in self._state['enabled_protocols'] and + 'initiator' in connector): + valid = True + if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector: + valid = True + if not valid: + msg = (_('The connector does not contain the required ' + 'information.')) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + def _get_vdisk_params(self, type_id, volume_type=None): + return self._helpers.get_vdisk_params(self.configuration, self._state, + type_id, volume_type=volume_type) + + def initialize_connection(self, volume, connector): + """Perform the necessary work so that an iSCSI/FC connection can + be made. + + To be able to create an iSCSI/FC connection from a given host to a + volume, we must: + 1. Translate the given iSCSI name or WWNN to a host name + 2. Create new host on the storage system if it does not yet exist + 3. Map the volume to the host if it is not already done + 4. Return the connection information for relevant nodes (in the + proper I/O group) + + """ + + LOG.debug(_('enter: initialize_connection: volume %(vol)s with ' + 'connector %(conn)s') % {'vol': str(volume), + 'conn': str(connector)}) + + vol_opts = self._get_vdisk_params(volume['volume_type_id']) + host_name = connector['host'] + volume_name = volume['name'] + + # Check if a host object is defined for this host name + host_name = self._helpers.get_host_from_connector(connector) + if host_name is None: + # Host does not exist - add a new host to Storwize/SVC + host_name = self._helpers.create_host(connector) + + if vol_opts['protocol'] == 'iSCSI': + chap_secret = self._helpers.get_chap_secret_for_host(host_name) + chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled + if chap_enabled and chap_secret is None: + chap_secret = self._helpers.add_chap_secret_to_host(host_name) + elif not chap_enabled and chap_secret: + LOG.warning(_('CHAP secret exists for host but CHAP is ' + 'disabled')) + + volume_attributes = self._helpers.get_vdisk_attributes(volume_name) + if volume_attributes is None: + msg = (_('initialize_connection: Failed to get attributes' + ' for volume %s') % volume_name) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + multihostmap = self.configuration.storwize_svc_multihostmap_enabled + lun_id = self._helpers.map_vol_to_host(volume_name, host_name, + multihostmap) + try: + preferred_node = volume_attributes['preferred_node_id'] + IO_group = volume_attributes['IO_group_id'] + except KeyError as e: + LOG.error(_('Did not find expected column name in ' + 'lsvdisk: %s') % str(e)) + msg = (_('initialize_connection: Missing volume ' + 'attribute for volume %s') % volume_name) + raise exception.VolumeBackendAPIException(data=msg) + + try: + # Get preferred node and other nodes in I/O group + preferred_node_entry = None + io_group_nodes = [] + for node in self._state['storage_nodes'].itervalues(): + if vol_opts['protocol'] not in node['enabled_protocols']: + continue + if node['id'] == preferred_node: + preferred_node_entry = node + if node['IO_group'] == IO_group: + io_group_nodes.append(node) + + if not len(io_group_nodes): + msg = (_('initialize_connection: No node found in ' + 'I/O group %(gid)s for volume %(vol)s') % + {'gid': IO_group, 'vol': volume_name}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if not preferred_node_entry and not vol_opts['multipath']: + # Get 1st node in I/O group + preferred_node_entry = io_group_nodes[0] + LOG.warn(_('initialize_connection: Did not find a preferred ' + 'node for volume %s') % volume_name) + + properties = {} + properties['target_discovered'] = False + properties['target_lun'] = lun_id + properties['volume_id'] = volume['id'] + if vol_opts['protocol'] == 'iSCSI': + type_str = 'iscsi' + if len(preferred_node_entry['ipv4']): + ipaddr = preferred_node_entry['ipv4'][0] + else: + ipaddr = preferred_node_entry['ipv6'][0] + properties['target_portal'] = '%s:%s' % (ipaddr, '3260') + properties['target_iqn'] = preferred_node_entry['iscsi_name'] + if chap_secret: + properties['auth_method'] = 'CHAP' + properties['auth_username'] = connector['initiator'] + properties['auth_password'] = chap_secret + else: + type_str = 'fibre_channel' + conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name) + if len(conn_wwpns) == 0: + msg = (_('Could not get FC connection information for the ' + 'host-volume connection. Is the host configured ' + 'properly for FC connections?')) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if not vol_opts['multipath']: + if preferred_node_entry['WWPN'] in conn_wwpns: + properties['target_wwn'] = preferred_node_entry['WWPN'] + else: + properties['target_wwn'] = conn_wwpns[0] + else: + properties['target_wwn'] = conn_wwpns + except Exception: + with excutils.save_and_reraise_exception(): + self.terminate_connection(volume, connector) + LOG.error(_('initialize_connection: Failed to collect return ' + 'properties for volume %(vol)s and connector ' + '%(conn)s.\n') % {'vol': str(volume), + 'conn': str(connector)}) + + LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n ' + 'connector %(conn)s\n properties: %(prop)s') + % {'vol': str(volume), + 'conn': str(connector), + 'prop': str(properties)}) + + return {'driver_volume_type': type_str, 'data': properties, } + + def terminate_connection(self, volume, connector, **kwargs): + """Cleanup after an iSCSI connection has been terminated. + + When we clean up a terminated connection between a given connector + and volume, we: + 1. Translate the given connector to a host name + 2. Remove the volume-to-host mapping if it exists + 3. Delete the host if it has no more mappings (hosts are created + automatically by this driver when mappings are created) + """ + LOG.debug(_('enter: terminate_connection: volume %(vol)s with ' + 'connector %(conn)s') % {'vol': str(volume), + 'conn': str(connector)}) + + vol_name = volume['name'] + if 'host' in connector: + host_name = self._helpers.get_host_from_connector(connector) + if host_name is None: + msg = (_('terminate_connection: Failed to get host name from' + ' connector.')) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + else: + # See bug #1244257 + host_name = None + + self._helpers.unmap_vol_from_host(vol_name, host_name) + + LOG.debug(_('leave: terminate_connection: volume %(vol)s with ' + 'connector %(conn)s') % {'vol': str(volume), + 'conn': str(connector)}) + + def create_volume(self, volume): + opts = self._get_vdisk_params(volume['volume_type_id']) + pool = self.configuration.storwize_svc_volpool_name + return self._helpers.create_vdisk(volume['name'], str(volume['size']), + 'gb', pool, opts) + + def delete_volume(self, volume): + self._helpers.delete_vdisk(volume['name'], False) + + def create_snapshot(self, snapshot): + ctxt = context.get_admin_context() + source_vol = self.db.volume_get(ctxt, snapshot['volume_id']) + opts = self._get_vdisk_params(source_vol['volume_type_id']) + self._helpers.create_copy(snapshot['volume_name'], snapshot['name'], + snapshot['volume_id'], self.configuration, + opts, False) + + def delete_snapshot(self, snapshot): + self._helpers.delete_vdisk(snapshot['name'], False) + + def create_volume_from_snapshot(self, volume, snapshot): + if volume['size'] != snapshot['volume_size']: + msg = (_('create_volume_from_snapshot: Source and destination ' + 'size differ.')) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + opts = self._get_vdisk_params(volume['volume_type_id']) + self._helpers.create_copy(snapshot['name'], volume['name'], + snapshot['id'], self.configuration, + opts, True) + + def create_cloned_volume(self, tgt_volume, src_volume): + if src_volume['size'] != tgt_volume['size']: + msg = (_('create_cloned_volume: Source and destination ' + 'size differ.')) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + opts = self._get_vdisk_params(tgt_volume['volume_type_id']) + self._helpers.create_copy(src_volume['name'], tgt_volume['name'], + src_volume['id'], self.configuration, + opts, True) + + def extend_volume(self, volume, new_size): + LOG.debug(_('enter: extend_volume: volume %s') % volume['id']) + ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'], + allow_snaps=False) + if not ret: + msg = (_('extend_volume: Extending a volume with snapshots is not ' + 'supported.')) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + extend_amt = int(new_size) - volume['size'] + self._helpers.extend_vdisk(volume['name'], extend_amt) + LOG.debug(_('leave: extend_volume: volume %s') % volume['id']) + + def migrate_volume(self, ctxt, volume, host): + """Migrate directly if source and dest are managed by same storage. + + The method uses the migratevdisk method, which returns almost + immediately, if the source and target pools have the same extent_size. + Otherwise, it uses addvdiskcopy and rmvdiskcopy, which require waiting + for the copy operation to complete. + + :param ctxt: Context + :param volume: A dictionary describing the volume to migrate + :param host: A dictionary describing the host to migrate to, where + host['host'] is its name, and host['capabilities'] is a + dictionary of its reported capabilities. + """ + LOG.debug(_('enter: migrate_volume: id=%(id)s, host=%(host)s') % + {'id': volume['id'], 'host': host['host']}) + + false_ret = (False, None) + dest_pool = self._helpers.can_migrate_to_host(host, self._state) + if dest_pool is None: + return false_ret + + if 'extent_size' not in host['capabilities']: + return false_ret + if host['capabilities']['extent_size'] == self._state['extent_size']: + # If source and dest pools have the same extent size, migratevdisk + self._helpers.migrate_vdisk(volume['name'], dest_pool) + else: + # If source and dest pool extent size differ, add/delete vdisk copy + ctxt = context.get_admin_context() + if volume['volume_type_id'] is not None: + volume_type_id = volume['volume_type_id'] + vol_type = volume_types.get_volume_type(ctxt, volume_type_id) + else: + vol_type = None + self._helpers.migrate_volume_vdiskcopy(volume['name'], dest_pool, + vol_type, + self._state, + self.configuration) + + LOG.debug(_('leave: migrate_volume: id=%(id)s, host=%(host)s') % + {'id': volume['id'], 'host': host['host']}) + return (True, None) + + def retype(self, ctxt, volume, new_type, diff, host): + """Convert the volume to be of the new type. + + Returns a boolean indicating whether the retype occurred. + + :param ctxt: Context + :param volume: A dictionary describing the volume to migrate + :param new_type: A dictionary describing the volume type to convert to + :param diff: A dictionary with the difference between the two types + :param host: A dictionary describing the host to migrate to, where + host['host'] is its name, and host['capabilities'] is a + dictionary of its reported capabilities. + """ + LOG.debug(_('enter: retype: id=%(id)s, new_type=%(new_type)s,' + 'diff=%(diff)s, host=%(host)s') % {'id': volume['id'], + 'new_type': new_type, + 'diff': diff, + 'host': host}) + + ignore_keys = ['protocol', 'multipath'] + no_copy_keys = ['warning', 'autoexpand', 'easytier', 'iogrp'] + copy_keys = ['rsize', 'grainsize', 'compression'] + all_keys = ignore_keys + no_copy_keys + copy_keys + old_opts = self._get_vdisk_params(volume['volume_type_id']) + new_opts = self._get_vdisk_params(new_type['id'], + volume_type=new_type) + + vdisk_changes = [] + need_copy = False + for key in all_keys: + if old_opts[key] != new_opts[key]: + if key in copy_keys: + need_copy = True + break + elif key in no_copy_keys: + vdisk_changes.append(key) + + dest_location = host['capabilities'].get('location_info') + if self._stats['location_info'] != dest_location: + need_copy = True + + if need_copy: + dest_pool = self._helpers.can_migrate_to_host(host, self._state) + if dest_pool is None: + return False + + self._helpers.migrate_volume_vdiskcopy(volume['name'], dest_pool, + new_type, + self._state, + self.configuration) + else: + self._helpers.change_vdisk_options(volume['name'], vdisk_changes, + new_opts, self._state) + + LOG.debug(_('exit: retype: ild=%(id)s, new_type=%(new_type)s,' + 'diff=%(diff)s, host=%(host)s') % {'id': volume['id'], + 'new_type': new_type, + 'diff': diff, + 'host': host['host']}) + return True + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If we haven't gotten stats yet or 'refresh' is True, + run update the stats first. + """ + if not self._stats or refresh: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + + LOG.debug(_("Updating volume stats")) + data = {} + + data['vendor_name'] = 'IBM' + data['driver_version'] = self.VERSION + data['storage_protocol'] = list(self._state['enabled_protocols']) + + data['total_capacity_gb'] = 0 # To be overwritten + data['free_capacity_gb'] = 0 # To be overwritten + data['reserved_percentage'] = self.configuration.reserved_percentage + data['QoS_support'] = False + + pool = self.configuration.storwize_svc_volpool_name + backend_name = self.configuration.safe_get('volume_backend_name') + if not backend_name: + backend_name = '%s_%s' % (self._state['system_name'], pool) + data['volume_backend_name'] = backend_name + + attributes = self._helpers.get_pool_attrs(pool) + if not attributes: + LOG.error(_('Could not get pool data from the storage')) + exception_message = (_('_update_volume_stats: ' + 'Could not get storage pool data')) + raise exception.VolumeBackendAPIException(data=exception_message) + + data['total_capacity_gb'] = (float(attributes['capacity']) / + units.GiB) + data['free_capacity_gb'] = (float(attributes['free_capacity']) / + units.GiB) + data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto'] + data['compression_support'] = self._state['compression_enabled'] + data['extent_size'] = self._state['extent_size'] + data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' % + {'sys_id': self._state['system_id'], + 'pool': pool}) + + self._stats = data diff --git a/cinder/volume/drivers/ibm/storwize_svc/helpers.py b/cinder/volume/drivers/ibm/storwize_svc/helpers.py new file mode 100644 index 0000000000..9fc6b0b79b --- /dev/null +++ b/cinder/volume/drivers/ibm/storwize_svc/helpers.py @@ -0,0 +1,750 @@ +# Copyright 2014 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import random +import re +import six +import unicodedata + +from eventlet import greenthread + +from cinder import context +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import loopingcall +from cinder.openstack.common import strutils +from cinder import utils +from cinder.volume.drivers.ibm.storwize_svc import ssh as storwize_ssh +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + + +class StorwizeHelpers(object): + def __init__(self, run_ssh): + self.ssh = storwize_ssh.StorwizeSSH(run_ssh) + self.check_fcmapping_interval = 3 + + @staticmethod + def handle_keyerror(cmd, out): + msg = (_('Could not find key in output of command %(cmd)s: %(out)s') + % {'out': out, 'cmd': cmd}) + raise exception.VolumeBackendAPIException(data=msg) + + def compression_enabled(self): + """Return whether or not compression is enabled for this system.""" + resp = self.ssh.lslicense() + keys = ['license_compression_enclosures', + 'license_compression_capacity'] + for key in keys: + if resp[key] != '0': + return True + return False + + def get_system_info(self): + """Return system's name, ID, and code level.""" + resp = self.ssh.lssystem() + level = resp['code_level'] + match_obj = re.search('([0-9].){3}[0-9]', level) + if match_obj is None: + msg = _('Failed to get code level (%s).') % str(level) + raise exception.VolumeBackendAPIException(data=msg) + code_level = match_obj.group().split('.') + return {'code_level': tuple([int(x) for x in code_level]), + 'system_name': resp['name'], + 'system_id': resp['id']} + + def get_pool_attrs(self, pool): + """Return attributes for the specified pool.""" + return self.ssh.lsmdiskgrp(pool) + + def get_available_io_groups(self): + """Return list of available IO groups.""" + iogrps = [] + resp = self.ssh.lsiogrp() + for iogrp in resp: + try: + if int(iogrp['node_count']) > 0: + iogrps.append(int(iogrp['id'])) + except KeyError: + self.handle_keyerror('lsiogrp', str(iogrp)) + except ValueError: + msg = (_('Expected integer for node_count, ' + 'svcinfo lsiogrp returned: %(node)s') % + {'node': iogrp['node_count']}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return iogrps + + def get_node_info(self): + """Return dictionary containing information on system's nodes.""" + nodes = {} + resp = self.ssh.lsnode() + for node_data in resp: + try: + if node_data['status'] != 'online': + continue + node = {} + node['id'] = node_data['id'] + node['name'] = node_data['name'] + node['IO_group'] = node_data['IO_group_id'] + node['iscsi_name'] = node_data['iscsi_name'] + node['WWNN'] = node_data['WWNN'] + node['status'] = node_data['status'] + node['WWPN'] = [] + node['ipv4'] = [] + node['ipv6'] = [] + node['enabled_protocols'] = [] + nodes[node['id']] = node + except KeyError: + self.handle_keyerror('lsnode', str(node_data)) + return nodes + + def add_iscsi_ip_addrs(self, storage_nodes): + """Add iSCSI IP addresses to system node information.""" + resp = self.ssh.lsportip() + for ip_data in resp: + try: + state = ip_data['state'] + if ip_data['node_id'] in storage_nodes and ( + state == 'configured' or state == 'online'): + node = storage_nodes[ip_data['node_id']] + if len(ip_data['IP_address']): + node['ipv4'].append(ip_data['IP_address']) + if len(ip_data['IP_address_6']): + node['ipv6'].append(ip_data['IP_address_6']) + except KeyError: + self.handle_keyerror('lsportip', str(ip_data)) + + def add_fc_wwpns(self, storage_nodes): + """Add FC WWPNs to system node information.""" + for key in storage_nodes: + node = storage_nodes[key] + resp = self.ssh.lsnode(node_id=node['id']) + wwpns = set(node['WWPN']) + for i, s in resp.select('port_id', 'port_status'): + if 'unconfigured' != s: + wwpns.add(i) + node['WWPN'] = list(wwpns) + LOG.info(_('WWPN on node %(node)s: %(wwpn)s') + % {'node': node['id'], 'wwpn': node['WWPN']}) + + def add_chap_secret_to_host(self, host_name): + """Generate and store a randomly-generated CHAP secret for the host.""" + chap_secret = utils.generate_password() + self.ssh.add_chap_secret(chap_secret, host_name) + return chap_secret + + def get_chap_secret_for_host(self, host_name): + """Generate and store a randomly-generated CHAP secret for the host.""" + resp = self.ssh.lsiscsiauth() + host_found = False + for host_data in resp: + try: + if host_data['name'] == host_name: + host_found = True + if host_data['iscsi_auth_method'] == 'chap': + return host_data['iscsi_chap_secret'] + except KeyError: + self.handle_keyerror('lsiscsiauth', str(host_data)) + if not host_found: + msg = _('Failed to find host %s') % host_name + raise exception.VolumeBackendAPIException(data=msg) + return None + + def get_conn_fc_wwpns(self, host): + wwpns = [] + resp = self.ssh.lsfabric(host=host) + for wwpn in resp.select('local_wwpn'): + wwpns.append(wwpn) + return wwpns + + def get_host_from_connector(self, connector): + """Return the Storwize host described by the connector.""" + LOG.debug(_('enter: get_host_from_connector: %s') % str(connector)) + + # If we have FC information, we have a faster lookup option + host_name = None + if 'wwpns' in connector: + for wwpn in connector['wwpns']: + resp = self.ssh.lsfabric(wwpn=wwpn) + for wwpn_info in resp: + try: + if wwpn_info['remote_wwpn'] == wwpn: + host_name = wwpn_info['name'] + except KeyError: + self.handle_keyerror('lsfabric', str(wwpn_info)) + + # That didn't work, so try exhaustive search + if not host_name: + hosts_info = self.ssh.lshost() + for name in hosts_info.select('name'): + resp = self.ssh.lshost(host=name) + for iscsi, wwpn in resp.select('iscsi_name', 'WWPN'): + if ('initiator' in connector and + iscsi == connector['initiator']): + host_name = name + elif ('wwpns' in connector and + len(connector['wwpns']) and + wwpn.lower() in + [str(x).lower for x in connector['wwpns']]): + host_name = name + + LOG.debug(_('leave: get_host_from_connector: host %s') % host_name) + return host_name + + def create_host(self, connector): + """Create a new host on the storage system. + + We create a host name and associate it with the given connection + information. The host name will be a cleaned up version of the given + host name (at most 55 characters), plus a random 8-character suffix to + avoid collisions. The total length should be at most 63 characters. + """ + LOG.debug(_('enter: create_host: host %s') % connector['host']) + + # Before we start, make sure host name is a string and that we have at + # least one port. + host_name = connector['host'] + if not isinstance(host_name, six.string_types): + msg = _('create_host: Host name is not unicode or string') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + ports = [] + if 'initiator' in connector: + ports.append(['initiator', '%s' % connector['initiator']]) + if 'wwpns' in connector: + for wwpn in connector['wwpns']: + ports.append(['wwpn', '%s' % wwpn]) + if not len(ports): + msg = _('create_host: No initiators or wwpns supplied.') + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + # Build a host name for the Storwize host - first clean up the name + if isinstance(host_name, unicode): + host_name = unicodedata.normalize('NFKD', host_name).encode( + 'ascii', 'replace').decode('ascii') + + for num in range(0, 128): + ch = str(chr(num)) + if not ch.isalnum() and ch not in [' ', '.', '-', '_']: + host_name = host_name.replace(ch, '-') + + # Storwize doesn't like hostname that doesn't starts with letter or _. + if not re.match('^[A-Za-z]', host_name): + host_name = '_' + host_name + + # Add a random 8-character suffix to avoid collisions + rand_id = str(random.randint(0, 99999999)).zfill(8) + host_name = '%s-%s' % (host_name[:55], rand_id) + + # Create a host with one port + port = ports.pop(0) + self.ssh.mkhost(host_name, port[0], port[1]) + + # Add any additional ports to the host + for port in ports: + self.ssh.addhostport(host_name, port[0], port[1]) + + LOG.debug(_('leave: create_host: host %(host)s - %(host_name)s') % + {'host': connector['host'], 'host_name': host_name}) + return host_name + + def delete_host(self, host_name): + self.ssh.rmhost(host_name) + + def map_vol_to_host(self, volume_name, host_name, multihostmap): + """Create a mapping between a volume to a host.""" + + LOG.debug(_('enter: map_vol_to_host: volume %(volume_name)s to ' + 'host %(host_name)s') + % {'volume_name': volume_name, 'host_name': host_name}) + + # Check if this volume is already mapped to this host + mapped = False + luns_used = [] + result_lun = '-1' + resp = self.ssh.lshostvdiskmap(host_name) + for mapping_info in resp: + luns_used.append(int(mapping_info['SCSI_id'])) + if mapping_info['vdisk_name'] == volume_name: + mapped = True + result_lun = mapping_info['SCSI_id'] + + if not mapped: + # Find unused lun + luns_used.sort() + result_lun = str(len(luns_used)) + for index, n in enumerate(luns_used): + if n > index: + result_lun = str(index) + break + self.ssh.mkvdiskhostmap(host_name, volume_name, result_lun, + multihostmap) + + LOG.debug(_('leave: map_vol_to_host: LUN %(result_lun)s, volume ' + '%(volume_name)s, host %(host_name)s') % + {'result_lun': result_lun, + 'volume_name': volume_name, + 'host_name': host_name}) + return result_lun + + def unmap_vol_from_host(self, volume_name, host_name): + """Unmap the volume and delete the host if it has no more mappings.""" + + LOG.debug(_('enter: unmap_vol_from_host: volume %(volume_name)s from ' + 'host %(host_name)s') + % {'volume_name': volume_name, 'host_name': host_name}) + + # Check if the mapping exists + resp = self.ssh.lsvdiskhostmap(volume_name) + if not len(resp): + LOG.warning(_('unmap_vol_from_host: No mapping of volume ' + '%(vol_name)s to any host found.') % + {'vol_name': volume_name}) + return + if host_name is None: + if len(resp) > 1: + LOG.warning(_('unmap_vol_from_host: Multiple mappings of ' + 'volume %(vol_name)s found, no host ' + 'specified.') % {'vol_name': volume_name}) + return + else: + host_name = resp[0]['host_name'] + else: + found = False + for h in resp.select('host_name'): + if h == host_name: + found = True + if not found: + LOG.warning(_('unmap_vol_from_host: No mapping of volume ' + '%(vol_name)s to host %(host) found.') % + {'vol_name': volume_name, 'host': host_name}) + + # We now know that the mapping exists + self.ssh.rmvdiskhostmap(host_name, volume_name) + + # If this host has no more mappings, delete it + resp = self.ssh.lshostvdiskmap(host_name) + if not len(resp): + self.delete_host(host_name) + + LOG.debug(_('leave: unmap_vol_from_host: volume %(volume_name)s from ' + 'host %(host_name)s') + % {'volume_name': volume_name, 'host_name': host_name}) + + @staticmethod + def build_default_opts(config): + # Ignore capitalization + protocol = config.storwize_svc_connection_protocol + if protocol.lower() == 'fc': + protocol = 'FC' + elif protocol.lower() == 'iscsi': + protocol = 'iSCSI' + + opt = {'rsize': config.storwize_svc_vol_rsize, + 'warning': config.storwize_svc_vol_warning, + 'autoexpand': config.storwize_svc_vol_autoexpand, + 'grainsize': config.storwize_svc_vol_grainsize, + 'compression': config.storwize_svc_vol_compression, + 'easytier': config.storwize_svc_vol_easytier, + 'protocol': protocol, + 'multipath': config.storwize_svc_multipath_enabled, + 'iogrp': config.storwize_svc_vol_iogrp} + return opt + + @staticmethod + def check_vdisk_opts(state, opts): + # Check that rsize is either -1 or between 0 and 100 + if not (opts['rsize'] >= -1 and opts['rsize'] <= 100): + raise exception.InvalidInput( + reason=_('Illegal value specified for storwize_svc_vol_rsize: ' + 'set to either a percentage (0-100) or -1')) + + # Check that warning is either -1 or between 0 and 100 + if not (opts['warning'] >= -1 and opts['warning'] <= 100): + raise exception.InvalidInput( + reason=_('Illegal value specified for ' + 'storwize_svc_vol_warning: ' + 'set to a percentage (0-100)')) + + # Check that grainsize is 32/64/128/256 + if opts['grainsize'] not in [32, 64, 128, 256]: + raise exception.InvalidInput( + reason=_('Illegal value specified for ' + 'storwize_svc_vol_grainsize: set to either ' + '32, 64, 128, or 256')) + + # Check that compression is supported + if opts['compression'] and not state['compression_enabled']: + raise exception.InvalidInput( + reason=_('System does not support compression')) + + # Check that rsize is set if compression is set + if opts['compression'] and opts['rsize'] == -1: + raise exception.InvalidInput( + reason=_('If compression is set to True, rsize must ' + 'also be set (not equal to -1)')) + + # Check that the requested protocol is enabled + if opts['protocol'] not in state['enabled_protocols']: + raise exception.InvalidInput( + reason=_('Illegal value %(prot)s specified for ' + 'storwize_svc_connection_protocol: ' + 'valid values are %(enabled)s') + % {'prot': opts['protocol'], + 'enabled': ','.join(state['enabled_protocols'])}) + + if opts['iogrp'] not in state['available_iogrps']: + avail_grps = ''.join(str(e) for e in state['available_iogrps']) + raise exception.InvalidInput( + reason=_('I/O group %(iogrp)d is not valid; available ' + 'I/O groups are %(avail)s') + % {'iogrp': opts['iogrp'], + 'avail': avail_grps}) + + def get_vdisk_params(self, config, state, type_id, volume_type=None): + """Return the parameters for creating the vdisk. + + Takes volume type and defaults from config options into account. + """ + opts = self.build_default_opts(config) + if volume_type is None and type_id is not None: + ctxt = context.get_admin_context() + volume_type = volume_types.get_volume_type(ctxt, type_id) + if volume_type: + specs = dict(volume_type).get('extra_specs') + for k, value in specs.iteritems(): + # Get the scope, if using scope format + key_split = k.split(':') + if len(key_split) == 1: + scope = None + key = key_split[0] + else: + scope = key_split[0] + key = key_split[1] + + # We generally do not look at capabilities in the driver, but + # protocol is a special case where the user asks for a given + # protocol and we want both the scheduler and the driver to act + # on the value. + if scope == 'capabilities' and key == 'storage_protocol': + scope = None + key = 'protocol' + words = value.split() + if not (words and len(words) == 2 and words[0] == ''): + LOG.error(_('Protocol must be specified as ' + '\' iSCSI\' or \' FC\'.')) + del words[0] + value = words[0] + + # Any keys that the driver should look at should have the + # 'drivers' scope. + if scope and scope != 'drivers': + continue + + if key in opts: + this_type = type(opts[key]).__name__ + if this_type == 'int': + value = int(value) + elif this_type == 'bool': + value = strutils.bool_from_string(value) + opts[key] = value + + self.check_vdisk_opts(state, opts) + return opts + + @staticmethod + def _get_vdisk_create_params(opts): + easytier = 'on' if opts['easytier'] else 'off' + + if opts['rsize'] == -1: + params = [] + else: + params = ['-rsize', '%s%%' % str(opts['rsize']), + '-autoexpand', '-warning', + '%s%%' % str(opts['warning'])] + if not opts['autoexpand']: + params.remove('-autoexpand') + + if opts['compression']: + params.append('-compressed') + else: + params.extend(['-grainsize', str(opts['grainsize'])]) + + params.extend(['-easytier', easytier]) + return params + + def create_vdisk(self, name, size, units, pool, opts): + LOG.debug(_('enter: create_vdisk: vdisk %s ') % name) + params = self._get_vdisk_create_params(opts) + self.ssh.mkvdisk(name, size, units, pool, opts, params) + LOG.debug(_('leave: _create_vdisk: volume %s ') % name) + + def get_vdisk_attributes(self, vdisk): + attrs = self.ssh.lsvdisk(vdisk) + return attrs + + def is_vdisk_defined(self, vdisk_name): + """Check if vdisk is defined.""" + attrs = self.get_vdisk_attributes(vdisk_name) + return attrs is not None + + def _prepare_fc_map(self, fc_map_id, timeout): + self.ssh.prestartfcmap(fc_map_id) + mapping_ready = False + wait_time = 5 + max_retries = (timeout / wait_time) + 1 + for try_number in range(1, max_retries): + mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id) + if (mapping_attrs is None or + 'status' not in mapping_attrs): + break + if mapping_attrs['status'] == 'prepared': + mapping_ready = True + break + elif mapping_attrs['status'] == 'stopped': + self.ssh.prestartfcmap(fc_map_id) + elif mapping_attrs['status'] != 'preparing': + msg = (_('Unexecpted mapping status %(status)s for mapping' + '%(id)s. Attributes: %(attr)s') + % {'status': mapping_attrs['status'], + 'id': fc_map_id, + 'attr': mapping_attrs}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + greenthread.sleep(wait_time) + + if not mapping_ready: + msg = (_('Mapping %(id)s prepare failed to complete within the' + 'allotted %(to)d seconds timeout. Terminating.') + % {'id': fc_map_id, + 'to': timeout}) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + def run_flashcopy(self, source, target, timeout, full_copy=True): + """Create a FlashCopy mapping from the source to the target.""" + LOG.debug(_('enter: run_flashcopy: execute FlashCopy from source ' + '%(source)s to target %(target)s') % + {'source': source, 'target': target}) + + fc_map_id = self.ssh.mkfcmap(source, target, full_copy) + self._prepare_fc_map(fc_map_id, timeout) + self.ssh.startfcmap(fc_map_id) + + LOG.debug(_('leave: run_flashcopy: FlashCopy started from ' + '%(source)s to %(target)s') % + {'source': source, 'target': target}) + + def _get_vdisk_fc_mappings(self, vdisk): + """Return FlashCopy mappings that this vdisk is associated with.""" + mapping_ids = [] + resp = self.ssh.lsvdiskfcmappings(vdisk) + for id in resp.select('id'): + mapping_ids.append(id) + return mapping_ids + + def _get_flashcopy_mapping_attributes(self, fc_map_id): + resp = self.ssh.lsfcmap(fc_map_id) + if not len(resp): + return None + return resp[0] + + def _check_vdisk_fc_mappings(self, name, allow_snaps=True): + """FlashCopy mapping check helper.""" + LOG.debug(_('Loopcall: _check_vdisk_fc_mappings(), vdisk %s') % name) + mapping_ids = self._get_vdisk_fc_mappings(name) + wait_for_copy = False + for map_id in mapping_ids: + attrs = self._get_flashcopy_mapping_attributes(map_id) + if not attrs: + continue + source = attrs['source_vdisk_name'] + target = attrs['target_vdisk_name'] + copy_rate = attrs['copy_rate'] + status = attrs['status'] + + if copy_rate == '0': + if source == name: + # Vdisk with snapshots. Return False if snapshot + # not allowed. + if not allow_snaps: + raise loopingcall.LoopingCallDone(retvalue=False) + self.ssh.chfcmap(map_id, copyrate='50', autodel='on') + wait_for_copy = True + else: + # A snapshot + if target != name: + msg = (_('Vdisk %(name)s not involved in ' + 'mapping %(src)s -> %(tgt)s') % + {'name': name, 'src': source, 'tgt': target}) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + if status in ['copying', 'prepared']: + self.ssh.stopfcmap(map_id) + # Need to wait for the fcmap to change to + # stopped state before remove fcmap + wait_for_copy = True + elif status in ['stopping', 'preparing']: + wait_for_copy = True + else: + self.ssh.rmfcmap(map_id) + # Case 4: Copy in progress - wait and will autodelete + else: + if status == 'prepared': + self.ssh.stopfcmap(map_id) + self.ssh.rmfcmap(map_id) + elif status == 'idle_or_copied': + # Prepare failed + self.ssh.rmfcmap(map_id) + else: + wait_for_copy = True + if not wait_for_copy or not len(mapping_ids): + raise loopingcall.LoopingCallDone(retvalue=True) + + def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True): + """Ensure vdisk has no flashcopy mappings.""" + timer = loopingcall.FixedIntervalLoopingCall( + self._check_vdisk_fc_mappings, name, allow_snaps) + # Create a timer greenthread. The default volume service heart + # beat is every 10 seconds. The flashcopy usually takes hours + # before it finishes. Don't set the sleep interval shorter + # than the heartbeat. Otherwise volume service heartbeat + # will not be serviced. + LOG.debug(_('Calling _ensure_vdisk_no_fc_mappings: vdisk %s') + % name) + ret = timer.start(interval=self.check_fcmapping_interval).wait() + timer.stop() + return ret + + def delete_vdisk(self, vdisk, force): + """Ensures that vdisk is not part of FC mapping and deletes it.""" + LOG.debug(_('enter: delete_vdisk: vdisk %s') % vdisk) + if not self.is_vdisk_defined(vdisk): + LOG.info(_('Tried to delete non-existant vdisk %s.') % vdisk) + return + self.ensure_vdisk_no_fc_mappings(vdisk) + self.ssh.rmvdisk(vdisk, force=force) + LOG.debug(_('leave: delete_vdisk: vdisk %s') % vdisk) + + def create_copy(self, src, tgt, src_id, config, opts, full_copy): + """Create a new snapshot using FlashCopy.""" + LOG.debug(_('enter: create_copy: snapshot %(src)s to %(tgt)s') % + {'tgt': tgt, 'src': src}) + + src_attrs = self.get_vdisk_attributes(src) + if src_attrs is None: + msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) ' + 'does not exist') % {'src': src, 'src_id': src_id}) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + src_size = src_attrs['capacity'] + pool = config.storwize_svc_volpool_name + self.create_vdisk(tgt, src_size, 'b', pool, opts) + timeout = config.storwize_svc_flashcopy_timeout + try: + self.run_flashcopy(src, tgt, timeout, full_copy=full_copy) + except Exception: + with excutils.save_and_reraise_exception(): + self.delete_vdisk(tgt, True) + + LOG.debug(_('leave: _create_copy: snapshot %(tgt)s from ' + 'vdisk %(src)s') % + {'tgt': tgt, 'src': src}) + + def extend_vdisk(self, vdisk, amount): + self.ssh.expandvdisksize(vdisk, amount) + + def migrate_volume_vdiskcopy(self, vdisk, dest_pool, volume_type, + state, config): + """Migrate a volume using addvdiskcopy and rmvdiskcopy. + + This will add a vdisk copy with the given volume type in the given + pool, wait until it syncs, and delete the original copy. + """ + this_pool = config.storwize_svc_volpool_name + resp = self.ssh.lsvdiskcopy(vdisk) + orig_copy_id = None + for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'): + if mdisk_grp == this_pool: + orig_copy_id = copy_id + + if orig_copy_id is None: + msg = (_('migrate_volume started without a vdisk copy in the ' + 'expected pool.')) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + + if volume_type is None: + opts = self.get_vdisk_params(config, state, None) + else: + opts = self.get_vdisk_params(config, state, volume_type['id'], + volume_type=volume_type) + params = self._get_vdisk_create_params(opts) + new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params) + + sync = False + while not sync: + sync = self.ssh.lsvdiskcopy(vdisk, copy_id=new_copy_id)[0]['sync'] + if sync == 'yes': + sync = True + else: + greenthread.sleep(10) + + self.ssh.rmvdiskcopy(vdisk, orig_copy_id) + + def migrate_vdisk(self, vdisk, dest_pool): + self.ssh.migratevdisk(vdisk, dest_pool) + + @staticmethod + def can_migrate_to_host(host, state): + if 'location_info' not in host['capabilities']: + return None + info = host['capabilities']['location_info'] + try: + (dest_type, dest_id, dest_pool) = info.split(':') + except ValueError: + return None + if (dest_type != 'StorwizeSVCDriver' or dest_id != state['system_id']): + return None + return dest_pool + + def change_vdisk_options(self, vdisk, changes, opts, state): + if 'iogrp' in opts: + opts['iogrp'] = str(opts['iogrp']) + if 'warning' in opts: + opts['warning'] = '%s%%' % str(opts['warning']) + if 'easytier' in opts: + opts['easytier'] = 'on' if opts['easytier'] else 'off' + if 'autoexpand' in opts: + opts['autoexpand'] = 'on' if opts['autoexpand'] else 'off' + + if 'iogrp' in changes: + changes.remove('iogrp') + if state['code_level'] < (6, 4, 0, 0): + LOG.debug(_('Ignore change IO group as storage code level ' + 'is %(code_level)s, below then ' + '6.4.0.0') % {'code_level': state['code_level']}) + else: + self.ssh.movevdisk(vdisk, opts['iogrp']) + + for key in changes: + self.ssh.chvdisk(vdisk, ['-' + key, opts[key]]) diff --git a/cinder/volume/drivers/ibm/storwize_svc/ssh.py b/cinder/volume/drivers/ibm/storwize_svc/ssh.py new file mode 100644 index 0000000000..5ebfb6dbd3 --- /dev/null +++ b/cinder/volume/drivers/ibm/storwize_svc/ssh.py @@ -0,0 +1,412 @@ +# Copyright 2014 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import re + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils + +LOG = logging.getLogger(__name__) + + +class StorwizeSSH(object): + """SSH interface to IBM Storwize family and SVC storage systems.""" + def __init__(self, run_ssh): + self._ssh = run_ssh + + def _run_ssh(self, ssh_cmd): + try: + return self._ssh(ssh_cmd) + except processutils.ProcessExecutionError as e: + msg = (_('CLI Exception output:\n command: %(cmd)s\n ' + 'stdout: %(out)s\n stderr: %(err)s') % + {'cmd': ssh_cmd, + 'out': e.stdout, + 'err': e.stderr}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def run_ssh_info(self, ssh_cmd, delim='!', with_header=False): + """Run an SSH command and return parsed output.""" + raw = self._run_ssh(ssh_cmd) + return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim, + with_header=with_header) + + def run_ssh_assert_no_output(self, ssh_cmd): + """Run an SSH command and assert no output returned.""" + out, err = self._run_ssh(ssh_cmd) + if len(out.strip()) != 0: + msg = (_('Expected no output from CLI command %(cmd)s, ' + 'got %(out)s') % {'cmd': ' '.join(ssh_cmd), 'out': out}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def run_ssh_check_created(self, ssh_cmd): + """Run an SSH command and return the ID of the created object.""" + out, err = self._run_ssh(ssh_cmd) + try: + match_obj = re.search(r'\[([0-9]+)\],? successfully created', out) + return match_obj.group(1) + except (AttributeError, IndexError): + msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' + 'stdout: %(out)s\n stderr: %(err)s') % + {'cmd': ssh_cmd, + 'out': out, + 'err': err}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def lsnode(self, node_id=None): + with_header = True + ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!'] + if node_id: + with_header = False + ssh_cmd.append(node_id) + return self.run_ssh_info(ssh_cmd, with_header=with_header) + + def lslicense(self): + ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!'] + return self.run_ssh_info(ssh_cmd)[0] + + def lssystem(self): + ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!'] + return self.run_ssh_info(ssh_cmd)[0] + + def lsmdiskgrp(self, pool): + ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool] + return self.run_ssh_info(ssh_cmd)[0] + + def lsiogrp(self): + ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!'] + return self.run_ssh_info(ssh_cmd, with_header=True) + + def lsportip(self): + ssh_cmd = ['svcinfo', 'lsportip', '-delim', '!'] + return self.run_ssh_info(ssh_cmd, with_header=True) + + @staticmethod + def _create_port_arg(port_type, port_name): + if port_type == 'initiator': + port = ['-iscsiname'] + else: + port = ['-hbawwpn'] + port.append(port_name) + return port + + def mkhost(self, host_name, port_type, port_name): + port = self._create_port_arg(port_type, port_name) + ssh_cmd = ['svctask', 'mkhost', '-force'] + port + ['-name', host_name] + return self.run_ssh_check_created(ssh_cmd) + + def addhostport(self, host, port_type, port_name): + port = self._create_port_arg(port_type, port_name) + ssh_cmd = ['svctask', 'addhostport', '-force'] + port + [host] + self.run_ssh_assert_no_output(ssh_cmd) + + def lshost(self, host=None): + with_header = True + ssh_cmd = ['svcinfo', 'lshost', '-delim', '!'] + if host: + with_header = False + ssh_cmd.append(host) + return self.run_ssh_info(ssh_cmd, with_header=with_header) + + def add_chap_secret(self, secret, host): + ssh_cmd = ['svctask', 'chhost', '-chapsecret', secret, host] + self.run_ssh_assert_no_output(ssh_cmd) + + def lsiscsiauth(self): + ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!'] + return self.run_ssh_info(ssh_cmd, with_header=True) + + def lsfabric(self, wwpn=None, host=None): + if wwpn: + ssh_cmd = ['svcinfo', 'lsfabric', '-wwpn', wwpn, '-delim', '!'] + elif host: + ssh_cmd = ['svcinfo', 'lsfabric', '-host', host] + else: + msg = (_('Must pass wwpn or host to lsfabric.')) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + return self.run_ssh_info(ssh_cmd, with_header=True) + + def mkvdiskhostmap(self, host, vdisk, lun, multihostmap): + """Map vdisk to host. + + If vdisk already mapped and multihostmap is True, use the force flag. + """ + ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', host, + '-scsi', lun, vdisk] + out, err = self._ssh(ssh_cmd, check_exit_code=False) + if 'successfully created' in out: + return + if not err: + msg = (_('Did not find success message nor error for %(fun)s: ' + '%(out)s') % {'out': out, 'fun': ssh_cmd}) + raise exception.VolumeBackendAPIException(data=msg) + if err.startswith('CMMVC6071E'): + if not multihostmap: + LOG.error(_('storwize_svc_multihostmap_enabled is set ' + 'to False, not allowing multi host mapping.')) + msg = 'CMMVC6071E The VDisk-to-host mapping '\ + 'was not created because the VDisk is '\ + 'already mapped to a host.\n"' + raise exception.VolumeDriverException(message=msg) + + ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force') + return self.run_ssh_check_created(ssh_cmd) + + def rmvdiskhostmap(self, host, vdisk): + ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', host, vdisk] + self.run_ssh_assert_no_output(ssh_cmd) + + def lsvdiskhostmap(self, vdisk): + ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk] + return self.run_ssh_info(ssh_cmd, with_header=True) + + def lshostvdiskmap(self, host): + ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', host] + return self.run_ssh_info(ssh_cmd, with_header=True) + + def rmhost(self, host): + ssh_cmd = ['svctask', 'rmhost', host] + self.run_ssh_assert_no_output(ssh_cmd) + + def mkvdisk(self, name, size, units, pool, opts, params): + ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp', pool, + '-iogrp', str(opts['iogrp']), '-size', size, '-unit', + units] + params + return self.run_ssh_check_created(ssh_cmd) + + def rmvdisk(self, vdisk, force=True): + ssh_cmd = ['svctask', 'rmvdisk'] + if force: + ssh_cmd += ['-force'] + ssh_cmd += [vdisk] + self.run_ssh_assert_no_output(ssh_cmd) + + def lsvdisk(self, vdisk): + """Return vdisk attributes or None if it doesn't exist.""" + ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk] + out, err = self._ssh(ssh_cmd, check_exit_code=False) + if not len(err): + return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!', + with_header=False)[0] + if err.startswith('CMMVC5754E'): + return None + msg = (_('CLI Exception output:\n command: %(cmd)s\n ' + 'stdout: %(out)s\n stderr: %(err)s') % + {'cmd': ssh_cmd, + 'out': out, + 'err': err}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def chvdisk(self, vdisk, params): + ssh_cmd = ['svctask', 'chvdisk'] + params + [vdisk] + self.run_ssh_assert_no_output(ssh_cmd) + + def movevdisk(self, vdisk, iogrp): + ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, vdisk] + self.run_ssh_assert_no_output(ssh_cmd) + + def expandvdisksize(self, vdisk, amount): + ssh_cmd = (['svctask', 'expandvdisksize', '-size', str(amount), + '-unit', 'gb', vdisk]) + self.run_ssh_assert_no_output(ssh_cmd) + + def migratevdisk(self, vdisk, dest_pool): + ssh_cmd = ['svctask', 'migratevdisk', '-mdiskgrp', dest_pool, + '-vdisk', vdisk] + self.run_ssh_assert_no_output(ssh_cmd) + + def mkfcmap(self, source, target, full_copy): + ssh_cmd = ['svctask', 'mkfcmap', '-source', source, '-target', + target, '-autodelete'] + if not full_copy: + ssh_cmd.extend(['-copyrate', '0']) + out, err = self._ssh(ssh_cmd, check_exit_code=False) + if 'successfully created' not in out: + msg = (_('CLI Exception output:\n command: %(cmd)s\n ' + 'stdout: %(out)s\n stderr: %(err)s') % + {'cmd': ssh_cmd, + 'out': out, + 'err': err}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + try: + match_obj = re.search(r'FlashCopy Mapping, id \[([0-9]+)\], ' + 'successfully created', out) + fc_map_id = match_obj.group(1) + except (AttributeError, IndexError): + msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' + 'stdout: %(out)s\n stderr: %(err)s') % + {'cmd': ssh_cmd, + 'out': out, + 'err': err}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return fc_map_id + + def prestartfcmap(self, fc_map_id): + ssh_cmd = ['svctask', 'prestartfcmap', fc_map_id] + self.run_ssh_assert_no_output(ssh_cmd) + + def startfcmap(self, fc_map_id): + ssh_cmd = ['svctask', 'startfcmap', fc_map_id] + self.run_ssh_assert_no_output(ssh_cmd) + + def chfcmap(self, fc_map_id, copyrate='50', autodel='on'): + ssh_cmd = ['svctask', 'chfcmap', '-copyrate', copyrate, + '-autodelete', autodel, fc_map_id] + self.run_ssh_assert_no_output(ssh_cmd) + + def stopfcmap(self, fc_map_id): + ssh_cmd = ['svctask', 'stopfcmap', fc_map_id] + self.run_ssh_assert_no_output(ssh_cmd) + + def rmfcmap(self, fc_map_id): + ssh_cmd = ['svctask', 'rmfcmap', '-force', fc_map_id] + self.run_ssh_assert_no_output(ssh_cmd) + + def lsvdiskfcmappings(self, vdisk): + ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!', vdisk] + return self.run_ssh_info(ssh_cmd, with_header=True) + + def lsfcmap(self, fc_map_id): + ssh_cmd = ['svcinfo', 'lsfcmap', '-filtervalue', + 'id=%s' % fc_map_id, '-delim', '!'] + return self.run_ssh_info(ssh_cmd, with_header=True) + + def addvdiskcopy(self, vdisk, dest_pool, params): + ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp', + dest_pool, vdisk]) + return self.run_ssh_check_created(ssh_cmd) + + def lsvdiskcopy(self, vdisk, copy_id=None): + ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!'] + with_header = True + if copy_id: + ssh_cmd += ['-copy', copy_id] + with_header = False + ssh_cmd += [vdisk] + return self.run_ssh_info(ssh_cmd, with_header=with_header) + + def rmvdiskcopy(self, vdisk, copy_id): + ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, vdisk] + self.run_ssh_assert_no_output(ssh_cmd) + + +class CLIResponse(object): + '''Parse SVC CLI output and generate iterable.''' + + def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True): + super(CLIResponse, self).__init__() + if ssh_cmd: + self.ssh_cmd = ' '.join(ssh_cmd) + else: + self.ssh_cmd = 'None' + self.raw = raw + self.delim = delim + self.with_header = with_header + self.result = self._parse() + + def select(self, *keys): + for a in self.result: + vs = [] + for k in keys: + v = a.get(k, None) + if isinstance(v, basestring) or v is None: + v = [v] + if isinstance(v, list): + vs.append(v) + for item in zip(*vs): + if len(item) == 1: + yield item[0] + else: + yield item + + def __getitem__(self, key): + try: + return self.result[key] + except KeyError: + msg = (_('Did not find expected key %(key)s in %(fun)s: %(raw)s') % + {'key': key, 'fun': self.ssh_cmd, 'raw': self.raw}) + raise exception.VolumeBackendAPIException(data=msg) + + def __iter__(self): + for a in self.result: + yield a + + def __len__(self): + return len(self.result) + + def _parse(self): + def get_reader(content, delim): + for line in content.lstrip().splitlines(): + line = line.strip() + if line: + yield line.split(delim) + else: + yield [] + + if isinstance(self.raw, basestring): + stdout, stderr = self.raw, '' + else: + stdout, stderr = self.raw + reader = get_reader(stdout, self.delim) + result = [] + + if self.with_header: + hds = tuple() + for row in reader: + hds = row + break + for row in reader: + cur = dict() + if len(hds) != len(row): + msg = (_('Unexpected CLI response: header/row mismatch. ' + 'header: %(header)s, row: %(row)s') + % {'header': str(hds), 'row': str(row)}) + raise exception.VolumeBackendAPIException(data=msg) + for k, v in zip(hds, row): + CLIResponse.append_dict(cur, k, v) + result.append(cur) + else: + cur = dict() + for row in reader: + if row: + CLIResponse.append_dict(cur, row[0], ' '.join(row[1:])) + elif cur: # start new section + result.append(cur) + cur = dict() + if cur: + result.append(cur) + return result + + @staticmethod + def append_dict(dict_, key, value): + key, value = key.strip(), value.strip() + obj = dict_.get(key, None) + if obj is None: + dict_[key] = value + elif isinstance(obj, list): + obj.append(value) + dict_[key] = obj + else: + dict_[key] = [obj, value] + return dict_ diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py new file mode 100644 index 0000000000..fd4378c41a --- /dev/null +++ b/cinder/volume/drivers/lvm.py @@ -0,0 +1,753 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Driver for Linux servers running LVM. + +""" + +import os +import re +import socket + +from oslo.config import cfg + +from cinder.brick import exception as brick_exception +from cinder.brick.iscsi import iscsi +from cinder.brick.local_dev import lvm as lvm +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import fileutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import utils +from cinder.volume import driver +from cinder.volume import utils as volutils + +LOG = logging.getLogger(__name__) + +volume_opts = [ + cfg.StrOpt('volume_group', + default='cinder-volumes', + help='Name for the VG that will contain exported volumes'), + cfg.IntOpt('lvm_mirrors', + default=0, + help='If set, create lvms with multiple mirrors. Note that ' + 'this requires lvm_mirrors + 2 pvs with available space'), + cfg.StrOpt('lvm_type', + default='default', + help='Type of LVM volumes to deploy; (default or thin)'), +] + +CONF = cfg.CONF +CONF.register_opts(volume_opts) + + +class LVMVolumeDriver(driver.VolumeDriver): + """Executes commands relating to Volumes.""" + + VERSION = '2.0.0' + + def __init__(self, vg_obj=None, *args, **kwargs): + super(LVMVolumeDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(volume_opts) + self.hostname = socket.gethostname() + self.vg = vg_obj + self.backend_name =\ + self.configuration.safe_get('volume_backend_name') or 'LVM' + self.protocol = 'local' + + def set_execute(self, execute): + self._execute = execute + + def check_for_setup_error(self): + """Verify that requirements are in place to use LVM driver.""" + if self.vg is None: + root_helper = utils.get_root_helper() + try: + self.vg = lvm.LVM(self.configuration.volume_group, + root_helper, + lvm_type=self.configuration.lvm_type, + executor=self._execute) + except brick_exception.VolumeGroupNotFound: + message = ("Volume Group %s does not exist" % + self.configuration.volume_group) + raise exception.VolumeBackendAPIException(data=message) + + vg_list = volutils.get_all_volume_groups( + self.configuration.volume_group) + vg_dict = \ + (vg for vg in vg_list if vg['name'] == self.vg.vg_name).next() + if vg_dict is None: + message = ("Volume Group %s does not exist" % + self.configuration.volume_group) + raise exception.VolumeBackendAPIException(data=message) + + if self.configuration.lvm_type == 'thin': + # Specific checks for using Thin provisioned LV's + if not volutils.supports_thin_provisioning(): + message = ("Thin provisioning not supported " + "on this version of LVM.") + raise exception.VolumeBackendAPIException(data=message) + + pool_name = "%s-pool" % self.configuration.volume_group + if self.vg.get_volume(pool_name) is None: + try: + self.vg.create_thin_pool(pool_name) + except processutils.ProcessExecutionError as exc: + exception_message = ("Failed to create thin pool, " + "error message was: %s" + % exc.stderr) + raise exception.VolumeBackendAPIException( + data=exception_message) + + def _sizestr(self, size_in_g): + if int(size_in_g) == 0: + return '100m' + return '%sg' % size_in_g + + def _volume_not_present(self, volume_name): + return self.vg.get_volume(volume_name) is None + + def _delete_volume(self, volume, is_snapshot=False): + """Deletes a logical volume.""" + + # zero out old volumes to prevent data leaking between users + # TODO(ja): reclaiming space should be done lazy and low priority + if not self.configuration.lvm_type == 'thin' and \ + self.configuration.volume_clear != 'none': + if is_snapshot: + # if the volume to be cleared is a snapshot of another volume + # we need to clear out the volume using the -cow instead of the + # directly volume path. We need to skip this if we are using + # thin provisioned LVs. + # bug# lp1191812 + dev_path = self.local_path(volume) + "-cow" + else: + dev_path = self.local_path(volume) + + # TODO(jdg): Maybe we could optimize this for snaps by looking at + # the cow table and only overwriting what's necessary? + # for now we're still skipping on snaps due to hang issue + if not os.path.exists(dev_path): + msg = (_('Volume device file path %s does not exist.') + % dev_path) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + size_in_g = volume.get('size', volume.get('volume_size', None)) + if size_in_g is None: + msg = (_("Size for volume: %s not found, " + "cannot secure delete.") % volume['id']) + LOG.error(msg) + raise exception.InvalidParameterValue(msg) + vol_size = size_in_g * 1024 + + volutils.clear_volume( + vol_size, dev_path, + volume_clear=self.configuration.volume_clear, + volume_clear_size=self.configuration.volume_clear_size) + name = volume['name'] + if is_snapshot: + name = self._escape_snapshot(volume['name']) + self.vg.delete(name) + + def _escape_snapshot(self, snapshot_name): + # Linux LVM reserves name that starts with snapshot, so that + # such volume name can't be created. Mangle it. + if not snapshot_name.startswith('snapshot'): + return snapshot_name + return '_' + snapshot_name + + def _create_volume(self, name, size, lvm_type, mirror_count, vg=None): + vg_ref = self.vg + if vg is not None: + vg_ref = vg + + vg_ref.create_volume(name, size, lvm_type, mirror_count) + + def create_volume(self, volume): + """Creates a logical volume.""" + mirror_count = 0 + if self.configuration.lvm_mirrors: + mirror_count = self.configuration.lvm_mirrors + + self._create_volume(volume['name'], + self._sizestr(volume['size']), + self.configuration.lvm_type, + mirror_count) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self._create_volume(volume['name'], + self._sizestr(volume['size']), + self.configuration.lvm_type, + self.configuration.lvm_mirrors) + + # Some configurations of LVM do not automatically activate + # ThinLVM snapshot LVs. + self.vg.activate_lv(snapshot['name'], is_snapshot=True) + + volutils.copy_volume(self.local_path(snapshot), + self.local_path(volume), + snapshot['volume_size'] * 1024, + self.configuration.volume_dd_blocksize, + execute=self._execute) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + + # NOTE(jdg): We don't need to explicitly call + # remove export here because we already did it + # in the manager before we got here. + + if self._volume_not_present(volume['name']): + # If the volume isn't present, then don't attempt to delete + return True + + if self.vg.lv_has_snapshot(volume['name']): + LOG.error(_('Unabled to delete due to existing snapshot ' + 'for volume: %s') % volume['name']) + raise exception.VolumeIsBusy(volume_name=volume['name']) + + self._delete_volume(volume) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + + self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']), + snapshot['volume_name'], + self.configuration.lvm_type) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + if self._volume_not_present(self._escape_snapshot(snapshot['name'])): + # If the snapshot isn't present, then don't attempt to delete + LOG.warning(_("snapshot: %s not found, " + "skipping delete operations") % snapshot['name']) + return True + + # TODO(yamahata): zeroing out the whole snapshot triggers COW. + # it's quite slow. + self._delete_volume(snapshot, is_snapshot=True) + + def local_path(self, volume, vg=None): + if vg is None: + vg = self.configuration.volume_group + # NOTE(vish): stops deprecation warning + escaped_group = vg.replace('-', '--') + escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') + return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + image_utils.fetch_to_raw(context, + image_service, + image_id, + self.local_path(volume), + self.configuration.volume_dd_blocksize, + size=volume['size']) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + image_utils.upload_volume(context, + image_service, + image_meta, + self.local_path(volume)) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + + mirror_count = 0 + if self.configuration.lvm_mirrors: + mirror_count = self.configuration.lvm_mirrors + LOG.info(_('Creating clone of volume: %s') % src_vref['id']) + volume_name = src_vref['name'] + temp_id = 'tmp-snap-%s' % volume['id'] + temp_snapshot = {'volume_name': volume_name, + 'size': src_vref['size'], + 'volume_size': src_vref['size'], + 'name': 'clone-snap-%s' % volume['id'], + 'id': temp_id} + + self.create_snapshot(temp_snapshot) + self._create_volume(volume['name'], + self._sizestr(volume['size']), + self.configuration.lvm_type, + mirror_count) + + self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True) + + try: + volutils.copy_volume( + self.local_path(temp_snapshot), + self.local_path(volume), + src_vref['size'] * 1024, + self.configuration.volume_dd_blocksize, + execute=self._execute) + finally: + self.delete_snapshot(temp_snapshot) + + def clone_image(self, volume, image_location, image_id, image_meta): + return None, False + + def backup_volume(self, context, backup, backup_service): + """Create a new backup from an existing volume.""" + volume = self.db.volume_get(context, backup['volume_id']) + volume_path = self.local_path(volume) + with utils.temporary_chown(volume_path): + with fileutils.file_open(volume_path) as volume_file: + backup_service.backup(backup, volume_file) + + def restore_backup(self, context, backup, volume, backup_service): + """Restore an existing backup to a new or existing volume.""" + volume_path = self.local_path(volume) + with utils.temporary_chown(volume_path): + with fileutils.file_open(volume_path, 'wb') as volume_file: + backup_service.restore(backup, volume['id'], volume_file) + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update the stats first. + """ + + if refresh: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + + LOG.debug(_("Updating volume stats")) + if self.vg is None: + LOG.warning(_('Unable to update stats on non-initialized ' + 'Volume Group: %s'), self.configuration.volume_group) + return + + self.vg.update_volume_group_info() + data = {} + + # Note(zhiteng): These information are driver/backend specific, + # each driver may define these values in its own config options + # or fetch from driver specific configuration file. + data["volume_backend_name"] = self.backend_name + data["vendor_name"] = 'Open Source' + data["driver_version"] = self.VERSION + data["storage_protocol"] = self.protocol + + if self.configuration.lvm_mirrors > 0: + data['total_capacity_gb'] =\ + self.vg.vg_mirror_size(self.configuration.lvm_mirrors) + data['free_capacity_gb'] =\ + self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors) + elif self.configuration.lvm_type == 'thin': + data['total_capacity_gb'] = self.vg.vg_thin_pool_size + data['free_capacity_gb'] = self.vg.vg_thin_pool_free_space + else: + data['total_capacity_gb'] = self.vg.vg_size + data['free_capacity_gb'] = self.vg.vg_free_space + data['reserved_percentage'] = self.configuration.reserved_percentage + data['QoS_support'] = False + data['location_info'] =\ + ('LVMVolumeDriver:%(hostname)s:%(vg)s' + ':%(lvm_type)s:%(lvm_mirrors)s' % + {'hostname': self.hostname, + 'vg': self.configuration.volume_group, + 'lvm_type': self.configuration.lvm_type, + 'lvm_mirrors': self.configuration.lvm_mirrors}) + + self._stats = data + + def extend_volume(self, volume, new_size): + """Extend an existing volume's size.""" + self.vg.extend_volume(volume['name'], + self._sizestr(new_size)) + + +class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver): + """Executes commands relating to ISCSI volumes. + + We make use of model provider properties as follows: + + ``provider_location`` + if present, contains the iSCSI target information in the same + format as an ietadm discovery + i.e. ':, ' + + ``provider_auth`` + if present, contains a space-separated triple: + ' '. + `CHAP` is the only auth_method in use at the moment. + """ + + def __init__(self, *args, **kwargs): + self.tgtadm = self.get_target_admin() + super(LVMISCSIDriver, self).__init__(*args, **kwargs) + self.backend_name =\ + self.configuration.safe_get('volume_backend_name') or 'LVM_iSCSI' + self.protocol = 'iSCSI' + + def set_execute(self, execute): + super(LVMISCSIDriver, self).set_execute(execute) + self.tgtadm.set_execute(execute) + + def _create_tgtadm_target(self, iscsi_name, iscsi_target, + volume_path, chap_auth, lun=0, + check_exit_code=False, old_name=None): + # NOTE(jdg): tgt driver has an issue where with a lot of activity + # (or sometimes just randomly) it will get *confused* and attempt + # to reuse a target ID, resulting in a target already exists error + # Typically a simple retry will address this + + # For now we have this while loop, might be useful in the + # future to throw a retry decorator in common or utils + attempts = 2 + while attempts > 0: + attempts -= 1 + try: + # NOTE(jdg): For TgtAdm case iscsi_name is all we need + # should clean this all up at some point in the future + tid = self.tgtadm.create_iscsi_target( + iscsi_name, + iscsi_target, + 0, + volume_path, + chap_auth, + check_exit_code=check_exit_code, + old_name=old_name) + break + + except brick_exception.ISCSITargetCreateFailed: + if attempts == 0: + raise + else: + LOG.warning(_('Error creating iSCSI target, retrying ' + 'creation for target: %s') % iscsi_name) + return tid + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + # NOTE(jdg): tgtadm doesn't use the iscsi_targets table + # TODO(jdg): In the future move all of the dependent stuff into the + # corresponding target admin class + + if isinstance(self.tgtadm, iscsi.LioAdm): + try: + volume_info = self.db.volume_get(context, volume['id']) + (auth_method, + auth_user, + auth_pass) = volume_info['provider_auth'].split(' ', 3) + chap_auth = self._iscsi_authentication(auth_method, + auth_user, + auth_pass) + except exception.NotFound: + LOG.debug(_("volume_info:%s"), volume_info) + LOG.info(_("Skipping ensure_export. No iscsi_target " + "provision for volume: %s"), volume['id']) + return + + iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, + volume['name']) + volume_path = "/dev/%s/%s" % (self.configuration.volume_group, + volume['name']) + iscsi_target = 1 + + self._create_tgtadm_target(iscsi_name, iscsi_target, + volume_path, chap_auth) + + return + + if not isinstance(self.tgtadm, iscsi.TgtAdm): + try: + iscsi_target = self.db.volume_get_iscsi_target_num( + context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping ensure_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) + return + else: + iscsi_target = 1 # dummy value when using TgtAdm + + chap_auth = None + + # Check for https://bugs.launchpad.net/cinder/+bug/1065702 + old_name = None + volume_name = volume['name'] + if (volume['provider_location'] is not None and + volume['name'] not in volume['provider_location']): + + msg = _('Detected inconsistency in provider_location id') + LOG.debug(_('%s'), msg) + old_name = self._fix_id_migration(context, volume) + if 'in-use' in volume['status']: + volume_name = old_name + old_name = None + + iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, + volume_name) + volume_path = "/dev/%s/%s" % (self.configuration.volume_group, + volume_name) + + # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need + # should clean this all up at some point in the future + self._create_tgtadm_target(iscsi_name, iscsi_target, + volume_path, chap_auth, + lun=0, + check_exit_code=False, + old_name=old_name) + + return + + def _fix_id_migration(self, context, volume): + """Fix provider_location and dev files to address bug 1065702. + + For volumes that the provider_location has NOT been updated + and are not currently in-use we'll create a new iscsi target + and remove the persist file. + + If the volume is in-use, we'll just stick with the old name + and when detach is called we'll feed back into ensure_export + again if necessary and fix things up then. + + Details at: https://bugs.launchpad.net/cinder/+bug/1065702 + """ + + model_update = {} + pattern = re.compile(r":|\s") + fields = pattern.split(volume['provider_location']) + old_name = fields[3] + + volume['provider_location'] = \ + volume['provider_location'].replace(old_name, volume['name']) + model_update['provider_location'] = volume['provider_location'] + + self.db.volume_update(context, volume['id'], model_update) + + start = os.getcwd() + os.chdir('/dev/%s' % self.configuration.volume_group) + + try: + (out, err) = self._execute('readlink', old_name) + except processutils.ProcessExecutionError: + link_path = '/dev/%s/%s' % (self.configuration.volume_group, + old_name) + LOG.debug(_('Symbolic link %s not found') % link_path) + os.chdir(start) + return + + rel_path = out.rstrip() + self._execute('ln', + '-s', + rel_path, volume['name'], + run_as_root=True) + os.chdir(start) + return old_name + + def _ensure_iscsi_targets(self, context, host): + """Ensure that target ids have been created in datastore.""" + # NOTE(jdg): tgtadm doesn't use the iscsi_targets table + # TODO(jdg): In the future move all of the dependent stuff into the + # corresponding target admin class + if not isinstance(self.tgtadm, iscsi.TgtAdm): + host_iscsi_targets = self.db.iscsi_target_count_by_host(context, + host) + if host_iscsi_targets >= self.configuration.iscsi_num_targets: + return + + # NOTE(vish): Target ids start at 1, not 0. + target_end = self.configuration.iscsi_num_targets + 1 + for target_num in xrange(1, target_end): + target = {'host': host, 'target_num': target_num} + self.db.iscsi_target_create_safe(context, target) + + def create_export(self, context, volume): + return self._create_export(context, volume) + + def _create_export(self, context, volume, vg=None): + """Creates an export for a logical volume.""" + if vg is None: + vg = self.configuration.volume_group + + iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, + volume['name']) + volume_path = "/dev/%s/%s" % (vg, volume['name']) + model_update = {} + + # TODO(jdg): In the future move all of the dependent stuff into the + # corresponding target admin class + if not isinstance(self.tgtadm, iscsi.TgtAdm): + lun = 0 + self._ensure_iscsi_targets(context, volume['host']) + iscsi_target = self.db.volume_allocate_iscsi_target(context, + volume['id'], + volume['host']) + else: + lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1 + iscsi_target = 0 # NOTE(jdg): Not used by tgtadm + + # Use the same method to generate the username and the password. + chap_username = utils.generate_username() + chap_password = utils.generate_password() + chap_auth = self._iscsi_authentication('IncomingUser', chap_username, + chap_password) + + tid = self._create_tgtadm_target(iscsi_name, iscsi_target, + volume_path, chap_auth) + + model_update['provider_location'] = self._iscsi_location( + self.configuration.iscsi_ip_address, tid, iscsi_name, lun) + model_update['provider_auth'] = self._iscsi_authentication( + 'CHAP', chap_username, chap_password) + return model_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + # NOTE(jdg): tgtadm doesn't use the iscsi_targets table + # TODO(jdg): In the future move all of the dependent stuff into the + # corresponding target admin class + + if isinstance(self.tgtadm, iscsi.LioAdm): + try: + iscsi_target = self.db.volume_get_iscsi_target_num( + context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) + return + + self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'], + volume['name']) + + return + + elif not isinstance(self.tgtadm, iscsi.TgtAdm): + try: + iscsi_target = self.db.volume_get_iscsi_target_num( + context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + "provisioned for volume: %s"), volume['id']) + return + else: + iscsi_target = 0 + + try: + + # NOTE: provider_location may be unset if the volume hasn't + # been exported + location = volume['provider_location'].split(' ') + iqn = location[1] + + # ietadm show will exit with an error + # this export has already been removed + self.tgtadm.show_target(iscsi_target, iqn=iqn) + + except Exception: + LOG.info(_("Skipping remove_export. No iscsi_target " + "is presently exported for volume: %s"), volume['id']) + return + + self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['name_id'], + volume['name']) + + def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): + """Optimize the migration if the destination is on the same server. + + If the specified host is another back-end on the same server, and + the volume is not attached, we can do the migration locally without + going through iSCSI. + """ + + false_ret = (False, None) + if volume['status'] != 'available': + return false_ret + if 'location_info' not in host['capabilities']: + return false_ret + info = host['capabilities']['location_info'] + try: + (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ + info.split(':') + lvm_mirrors = int(lvm_mirrors) + except ValueError: + return false_ret + if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname): + return false_ret + + if dest_vg != self.vg.vg_name: + vg_list = volutils.get_all_volume_groups() + vg_dict = \ + (vg for vg in vg_list if vg['name'] == dest_vg).next() + if vg_dict is None: + message = ("Destination Volume Group %s does not exist" % + dest_vg) + LOG.error(_('%s'), message) + return false_ret + + helper = utils.get_root_helper() + dest_vg_ref = lvm.LVM(dest_vg, helper, + lvm_type=lvm_type, + executor=self._execute) + self.remove_export(ctxt, volume) + self._create_volume(volume['name'], + self._sizestr(volume['size']), + lvm_type, + lvm_mirrors, + dest_vg_ref) + + volutils.copy_volume(self.local_path(volume), + self.local_path(volume, vg=dest_vg), + volume['size'], + self.configuration.volume_dd_blocksize, + execute=self._execute) + self._delete_volume(volume) + model_update = self._create_export(ctxt, volume, vg=dest_vg) + + return (True, model_update) + + def _iscsi_location(self, ip, target, iqn, lun=None): + return "%s:%s,%s %s %s" % (ip, self.configuration.iscsi_port, + target, iqn, lun) + + def _iscsi_authentication(self, chap, name, password): + return "%s %s %s" % (chap, name, password) + + +class LVMISERDriver(LVMISCSIDriver, driver.ISERDriver): + """Executes commands relating to ISER volumes. + + We make use of model provider properties as follows: + + ``provider_location`` + if present, contains the iSER target information in the same + format as an ietadm discovery + i.e. ':, ' + + ``provider_auth`` + if present, contains a space-separated triple: + ' '. + `CHAP` is the only auth_method in use at the moment. + """ + + def __init__(self, *args, **kwargs): + self.tgtadm = self.get_target_admin() + LVMVolumeDriver.__init__(self, *args, **kwargs) + self.backend_name =\ + self.configuration.safe_get('volume_backend_name') or 'LVM_iSER' + self.protocol = 'iSER' diff --git a/cinder/volume/drivers/netapp/__init__.py b/cinder/volume/drivers/netapp/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/drivers/netapp/api.py b/cinder/volume/drivers/netapp/api.py new file mode 100644 index 0000000000..640ec65556 --- /dev/null +++ b/cinder/volume/drivers/netapp/api.py @@ -0,0 +1,484 @@ +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +NetApp api for ONTAP and OnCommand DFM. + +Contains classes required to issue api calls to ONTAP and OnCommand DFM. +""" + +from lxml import etree +import urllib2 + +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class NaServer(object): + """Encapsulates server connection logic.""" + + TRANSPORT_TYPE_HTTP = 'http' + TRANSPORT_TYPE_HTTPS = 'https' + SERVER_TYPE_FILER = 'filer' + SERVER_TYPE_DFM = 'dfm' + URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer' + URL_DFM = 'apis/XMLrequest' + NETAPP_NS = 'http://www.netapp.com/filer/admin' + STYLE_LOGIN_PASSWORD = 'basic_auth' + STYLE_CERTIFICATE = 'certificate_auth' + + def __init__(self, host, server_type=SERVER_TYPE_FILER, + transport_type=TRANSPORT_TYPE_HTTP, + style=STYLE_LOGIN_PASSWORD, username=None, + password=None): + self._host = host + self.set_server_type(server_type) + self.set_transport_type(transport_type) + self.set_style(style) + self._username = username + self._password = password + self._refresh_conn = True + + def get_transport_type(self): + """Get the transport type protocol.""" + return self._protocol + + def set_transport_type(self, transport_type): + """Set the transport type protocol for api. + + Supports http and https transport types. + """ + if transport_type.lower() not in ( + NaServer.TRANSPORT_TYPE_HTTP, + NaServer.TRANSPORT_TYPE_HTTPS): + raise ValueError('Unsupported transport type') + self._protocol = transport_type.lower() + if self._protocol == NaServer.TRANSPORT_TYPE_HTTP: + if self._server_type == NaServer.SERVER_TYPE_FILER: + self.set_port(80) + else: + self.set_port(8088) + else: + if self._server_type == NaServer.SERVER_TYPE_FILER: + self.set_port(443) + else: + self.set_port(8488) + self._refresh_conn = True + + def get_style(self): + """Get the authorization style for communicating with the server.""" + return self._auth_style + + def set_style(self, style): + """Set the authorization style for communicating with the server. + + Supports basic_auth for now. Certificate_auth mode to be done. + """ + if style.lower() not in (NaServer.STYLE_LOGIN_PASSWORD, + NaServer.STYLE_CERTIFICATE): + raise ValueError('Unsupported authentication style') + self._auth_style = style.lower() + + def get_server_type(self): + """Get the target server type.""" + return self._server_type + + def set_server_type(self, server_type): + """Set the target server type. + + Supports filer and dfm server types. + """ + if server_type.lower() not in (NaServer.SERVER_TYPE_FILER, + NaServer.SERVER_TYPE_DFM): + raise ValueError('Unsupported server type') + self._server_type = server_type.lower() + if self._server_type == NaServer.SERVER_TYPE_FILER: + self._url = NaServer.URL_FILER + else: + self._url = NaServer.URL_DFM + self._ns = NaServer.NETAPP_NS + self._refresh_conn = True + + def set_api_version(self, major, minor): + """Set the api version.""" + try: + self._api_major_version = int(major) + self._api_minor_version = int(minor) + self._api_version = str(major) + "." + str(minor) + except ValueError: + raise ValueError('Major and minor versions must be integers') + self._refresh_conn = True + + def get_api_version(self): + """Gets the api version tuple.""" + if hasattr(self, '_api_version'): + return (self._api_major_version, self._api_minor_version) + return None + + def set_port(self, port): + """Set the server communication port.""" + try: + int(port) + except ValueError: + raise ValueError('Port must be integer') + self._port = str(port) + self._refresh_conn = True + + def get_port(self): + """Get the server communication port.""" + return self._port + + def set_timeout(self, seconds): + """Sets the timeout in seconds.""" + try: + self._timeout = int(seconds) + except ValueError: + raise ValueError('timeout in seconds must be integer') + + def get_timeout(self): + """Gets the timeout in seconds if set.""" + if hasattr(self, '_timeout'): + return self._timeout + return None + + def get_vfiler(self): + """Get the vfiler to use in tunneling.""" + return self._vfiler + + def set_vfiler(self, vfiler): + """Set the vfiler to use if tunneling gets enabled.""" + self._vfiler = vfiler + + def get_vserver(self): + """Get the vserver to use in tunneling.""" + return self._vserver + + def set_vserver(self, vserver): + """Set the vserver to use if tunneling gets enabled.""" + self._vserver = vserver + + def set_username(self, username): + """Set the user name for authentication.""" + self._username = username + self._refresh_conn = True + + def set_password(self, password): + """Set the password for authentication.""" + self._password = password + self._refresh_conn = True + + def invoke_elem(self, na_element, enable_tunneling=False): + """Invoke the api on the server.""" + if na_element and not isinstance(na_element, NaElement): + ValueError('NaElement must be supplied to invoke api') + request = self._create_request(na_element, enable_tunneling) + if not hasattr(self, '_opener') or not self._opener \ + or self._refresh_conn: + self._build_opener() + try: + if hasattr(self, '_timeout'): + response = self._opener.open(request, timeout=self._timeout) + else: + response = self._opener.open(request) + except urllib2.HTTPError as e: + raise NaApiError(e.code, e.msg) + except Exception as e: + raise NaApiError('Unexpected error', e) + xml = response.read() + return self._get_result(xml) + + def invoke_successfully(self, na_element, enable_tunneling=False): + """Invokes api and checks execution status as success. + + Need to set enable_tunneling to True explicitly to achieve it. + This helps to use same connection instance to enable or disable + tunneling. The vserver or vfiler should be set before this call + otherwise tunneling remains disabled. + """ + result = self.invoke_elem(na_element, enable_tunneling) + if result.has_attr('status') and result.get_attr('status') == 'passed': + return result + code = result.get_attr('errno')\ + or result.get_child_content('errorno')\ + or 'ESTATUSFAILED' + msg = result.get_attr('reason')\ + or result.get_child_content('reason')\ + or 'Execution status is failed due to unknown reason' + raise NaApiError(code, msg) + + def _create_request(self, na_element, enable_tunneling=False): + """Creates request in the desired format.""" + netapp_elem = NaElement('netapp') + netapp_elem.add_attr('xmlns', self._ns) + if hasattr(self, '_api_version'): + netapp_elem.add_attr('version', self._api_version) + if enable_tunneling: + self._enable_tunnel_request(netapp_elem) + netapp_elem.add_child_elem(na_element) + request_d = netapp_elem.to_string() + request = urllib2.Request( + self._get_url(), data=request_d, + headers={'Content-Type': 'text/xml', 'charset': 'utf-8'}) + return request + + def _enable_tunnel_request(self, netapp_elem): + """Enables vserver or vfiler tunneling.""" + if hasattr(self, '_vfiler') and self._vfiler: + if hasattr(self, '_api_major_version') and \ + hasattr(self, '_api_minor_version') and \ + self._api_major_version >= 1 and \ + self._api_minor_version >= 7: + netapp_elem.add_attr('vfiler', self._vfiler) + else: + raise ValueError('ontapi version has to be atleast 1.7' + ' to send request to vfiler') + if hasattr(self, '_vserver') and self._vserver: + if hasattr(self, '_api_major_version') and \ + hasattr(self, '_api_minor_version') and \ + self._api_major_version >= 1 and \ + self._api_minor_version >= 15: + netapp_elem.add_attr('vfiler', self._vserver) + else: + raise ValueError('ontapi version has to be atleast 1.15' + ' to send request to vserver') + + def _parse_response(self, response): + """Get the NaElement for the response.""" + if not response: + raise NaApiError('No response received') + xml = etree.XML(response) + return NaElement(xml) + + def _get_result(self, response): + """Gets the call result.""" + processed_response = self._parse_response(response) + return processed_response.get_child_by_name('results') + + def _get_url(self): + return '%s://%s:%s/%s' % (self._protocol, self._host, self._port, + self._url) + + def _build_opener(self): + if self._auth_style == NaServer.STYLE_LOGIN_PASSWORD: + auth_handler = self._create_basic_auth_handler() + else: + auth_handler = self._create_certificate_auth_handler() + opener = urllib2.build_opener(auth_handler) + self._opener = opener + + def _create_basic_auth_handler(self): + password_man = urllib2.HTTPPasswordMgrWithDefaultRealm() + password_man.add_password(None, self._get_url(), self._username, + self._password) + auth_handler = urllib2.HTTPBasicAuthHandler(password_man) + return auth_handler + + def _create_certificate_auth_handler(self): + raise NotImplementedError() + + def __str__(self): + return "server: %s" % (self._host) + + +class NaElement(object): + """Class wraps basic building block for NetApp api request.""" + + def __init__(self, name): + """Name of the element or etree.Element.""" + if isinstance(name, etree._Element): + self._element = name + else: + self._element = etree.Element(name) + + def get_name(self): + """Returns the tag name of the element.""" + return self._element.tag + + def set_content(self, text): + """Set the text string for the element.""" + self._element.text = text + + def get_content(self): + """Get the text for the element.""" + return self._element.text + + def add_attr(self, name, value): + """Add the attribute to the element.""" + self._element.set(name, value) + + def add_attrs(self, **attrs): + """Add multiple attributes to the element.""" + for attr in attrs.keys(): + self._element.set(attr, attrs.get(attr)) + + def add_child_elem(self, na_element): + """Add the child element to the element.""" + if isinstance(na_element, NaElement): + self._element.append(na_element._element) + return + raise + + def get_child_by_name(self, name): + """Get the child element by the tag name.""" + for child in self._element.iterchildren(): + if child.tag == name or etree.QName(child.tag).localname == name: + return NaElement(child) + return None + + def get_child_content(self, name): + """Get the content of the child.""" + for child in self._element.iterchildren(): + if child.tag == name or etree.QName(child.tag).localname == name: + return child.text + return None + + def get_children(self): + """Get the children for the element.""" + return [NaElement(el) for el in self._element.iterchildren()] + + def has_attr(self, name): + """Checks whether element has attribute.""" + attributes = self._element.attrib or {} + return name in attributes.keys() + + def get_attr(self, name): + """Get the attribute with the given name.""" + attributes = self._element.attrib or {} + return attributes.get(name) + + def get_attr_names(self): + """Returns the list of attribute names.""" + attributes = self._element.attrib or {} + return attributes.keys() + + def add_new_child(self, name, content, convert=False): + """Add child with tag name and context. + + Convert replaces entity refs to chars. + """ + child = NaElement(name) + if convert: + content = NaElement._convert_entity_refs(content) + child.set_content(content) + self.add_child_elem(child) + + @staticmethod + def _convert_entity_refs(text): + """Converts entity refs to chars to handle etree auto conversions.""" + text = text.replace("<", "<") + text = text.replace(">", ">") + return text + + @staticmethod + def create_node_with_children(node, **children): + """Creates and returns named node with children.""" + parent = NaElement(node) + for child in children.keys(): + parent.add_new_child(child, children.get(child, None)) + return parent + + def add_node_with_children(self, node, **children): + """Creates named node with children.""" + parent = NaElement.create_node_with_children(node, **children) + self.add_child_elem(parent) + + def to_string(self, pretty=False, method='xml', encoding='UTF-8'): + """Prints the element to string.""" + return etree.tostring(self._element, method=method, encoding=encoding, + pretty_print=pretty) + + def __getitem__(self, key): + """Dict getter method for NaElement. + + Returns NaElement list if present, + text value in case no NaElement node + children or attribute value if present. + """ + + child = self.get_child_by_name(key) + if child: + if child.get_children(): + return child + else: + return child.get_content() + elif self.has_attr(key): + return self.get_attr(key) + raise KeyError(_('No element by given name %s.') % (key)) + + def __setitem__(self, key, value): + """Dict setter method for NaElement.""" + if key: + if value: + if isinstance(value, NaElement): + child = NaElement(key) + child.add_child_elem(value) + self.add_child_elem(child) + elif isinstance(value, str): + child = self.get_child_by_name(key) + if child: + child.set_content(value) + else: + self.add_new_child(key, value) + elif isinstance(value, dict): + child = NaElement(key) + child.translate_struct(value) + self.add_child_elem(child) + else: + raise TypeError(_('Not a valid value for NaElement.')) + else: + self.add_child_elem(NaElement(key)) + else: + raise KeyError(_('NaElement name cannot be null.')) + + def translate_struct(self, data_struct): + """Convert list, tuple, dict to NaElement and appends. + + Useful for NaElement queries which have unique + query parameters. + """ + + if isinstance(data_struct, list) or isinstance(data_struct, tuple): + for el in data_struct: + self.add_child_elem(NaElement(el)) + elif isinstance(data_struct, dict): + for k in data_struct.keys(): + child = NaElement(k) + if (isinstance(data_struct[k], dict) or + isinstance(data_struct[k], list) or + isinstance(data_struct[k], tuple)): + child.translate_struct(data_struct[k]) + else: + if data_struct[k]: + child.set_content(str(data_struct[k])) + self.add_child_elem(child) + else: + raise ValueError(_('Type cannot be converted into NaElement.')) + + +class NaApiError(Exception): + """Base exception class for NetApp api errors.""" + + def __init__(self, code='unknown', message='unknown'): + self.code = code + self.message = message + + def __str__(self, *args, **kwargs): + return 'NetApp api failed. Reason - %s:%s' % (self.code, self.message) + + +NaErrors = {'API_NOT_FOUND': NaApiError('13005', 'Unable to find API'), + 'INSUFFICIENT_PRIVS': NaApiError('13003', + 'Insufficient privileges')} diff --git a/cinder/volume/drivers/netapp/common.py b/cinder/volume/drivers/netapp/common.py new file mode 100644 index 0000000000..969414b14e --- /dev/null +++ b/cinder/volume/drivers/netapp/common.py @@ -0,0 +1,173 @@ +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unified driver for NetApp storage systems. + +Supports call to multiple storage systems of different families and protocols. +""" + +from cinder import exception +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.volume import driver +from cinder.volume.drivers.netapp.options import netapp_proxy_opts + + +LOG = logging.getLogger(__name__) + + +#NOTE(singn): Holds family:{protocol:driver} registration information. +#Plug in new families and protocols to support new drivers. +#No other code modification required. +netapp_unified_plugin_registry =\ + {'ontap_cluster': + { + 'iscsi': + 'cinder.volume.drivers.netapp.iscsi.NetAppDirectCmodeISCSIDriver', + 'nfs': 'cinder.volume.drivers.netapp.nfs.NetAppDirectCmodeNfsDriver' + }, 'ontap_7mode': + { + 'iscsi': + 'cinder.volume.drivers.netapp.iscsi.NetAppDirect7modeISCSIDriver', + 'nfs': + 'cinder.volume.drivers.netapp.nfs.NetAppDirect7modeNfsDriver' + }, + } + +#NOTE(singn): Holds family:protocol information. +#Protocol represents the default protocol driver option +#in case no protocol is specified by the user in configuration. +netapp_family_default =\ + { + 'ontap_cluster': 'nfs', + 'ontap_7mode': 'nfs' + } + + +class NetAppDriver(object): + """"NetApp unified block storage driver. + + Acts as a mediator to NetApp storage drivers. + Proxies requests based on the storage family and protocol configured. + Override the proxy driver method by adding method in this driver. + """ + + def __init__(self, *args, **kwargs): + super(NetAppDriver, self).__init__() + self.configuration = kwargs.get('configuration', None) + if self.configuration: + self.configuration.append_config_values(netapp_proxy_opts) + else: + raise exception.InvalidInput( + reason=_("Required configuration not found")) + self.driver = NetAppDriverFactory.create_driver( + self.configuration.netapp_storage_family, + self.configuration.netapp_storage_protocol, + *args, **kwargs) + + def __setattr__(self, name, value): + """Sets the attribute.""" + if getattr(self, 'driver', None): + self.driver.__setattr__(name, value) + return + object.__setattr__(self, name, value) + + def __getattr__(self, name): + """"Gets the attribute.""" + drv = object.__getattribute__(self, 'driver') + return getattr(drv, name) + + +class NetAppDriverFactory(object): + """Factory to instantiate appropriate NetApp driver.""" + + @staticmethod + def create_driver( + storage_family, storage_protocol, *args, **kwargs): + """"Creates an appropriate driver based on family and protocol.""" + fmt = {'storage_family': storage_family, + 'storage_protocol': storage_protocol} + LOG.info(_('Requested unified config: %(storage_family)s and ' + '%(storage_protocol)s') % fmt) + storage_family = storage_family.lower() + family_meta = netapp_unified_plugin_registry.get(storage_family) + if family_meta is None: + raise exception.InvalidInput( + reason=_('Storage family %s is not supported') + % storage_family) + if storage_protocol is None: + storage_protocol = netapp_family_default.get(storage_family) + fmt['storage_protocol'] = storage_protocol + if storage_protocol is None: + raise exception.InvalidInput( + reason=_('No default storage protocol found' + ' for storage family %(storage_family)s') + % fmt) + storage_protocol = storage_protocol.lower() + driver_loc = family_meta.get(storage_protocol) + if driver_loc is None: + raise exception.InvalidInput( + reason=_('Protocol %(storage_protocol)s is not supported' + ' for storage family %(storage_family)s') + % fmt) + NetAppDriverFactory.check_netapp_driver(driver_loc) + kwargs = kwargs or {} + kwargs['netapp_mode'] = 'proxy' + driver = importutils.import_object(driver_loc, *args, **kwargs) + LOG.info(_('NetApp driver of family %(storage_family)s and protocol' + ' %(storage_protocol)s loaded') % fmt) + return driver + + @staticmethod + def check_netapp_driver(location): + """Checks if the driver requested is a netapp driver.""" + if location.find(".netapp.") == -1: + raise exception.InvalidInput( + reason=_("Only loading netapp drivers supported.")) + + +class Deprecated(driver.VolumeDriver): + """Deprecated driver for NetApp. + + This driver is used for mapping deprecated + drivers to itself in manager. It prevents cinder + from getting errored out in case of upgrade scenarios + and also suggests further steps. + """ + + def __init__(self, *args, **kwargs): + self._log_deprecated_warn() + + def _log_deprecated_warn(self): + """Logs appropriate warning and suggestion.""" + + link = "https://communities.netapp.com/groups/openstack" + msg = _("The configured NetApp driver is deprecated." + " Please refer the link to resolve the issue '%s'.") + LOG.warn(msg % link) + + def check_for_setup_error(self): + pass + + def ensure_export(self, context, volume): + pass + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first. + """ + self._log_deprecated_warn() + return None diff --git a/cinder/volume/drivers/netapp/iscsi.py b/cinder/volume/drivers/netapp/iscsi.py new file mode 100644 index 0000000000..50581cf898 --- /dev/null +++ b/cinder/volume/drivers/netapp/iscsi.py @@ -0,0 +1,1495 @@ +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume driver for NetApp iSCSI storage systems. + +This driver requires NetApp Clustered Data ONTAP or 7-mode +storage systems with installed iSCSI licenses. +""" + +import copy +import sys +import time +import uuid + +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import units +from cinder import utils +from cinder.volume import driver +from cinder.volume.drivers.netapp.api import NaApiError +from cinder.volume.drivers.netapp.api import NaElement +from cinder.volume.drivers.netapp.api import NaServer +from cinder.volume.drivers.netapp.options import netapp_7mode_opts +from cinder.volume.drivers.netapp.options import netapp_basicauth_opts +from cinder.volume.drivers.netapp.options import netapp_cluster_opts +from cinder.volume.drivers.netapp.options import netapp_connection_opts +from cinder.volume.drivers.netapp.options import netapp_provisioning_opts +from cinder.volume.drivers.netapp.options import netapp_transport_opts +from cinder.volume.drivers.netapp import ssc_utils +from cinder.volume.drivers.netapp.utils import get_volume_extra_specs +from cinder.volume.drivers.netapp.utils import provide_ems +from cinder.volume.drivers.netapp.utils import set_safe_attr +from cinder.volume.drivers.netapp.utils import validate_instantiation +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +class NetAppLun(object): + """Represents a LUN on NetApp storage.""" + + def __init__(self, handle, name, size, metadata_dict): + self.handle = handle + self.name = name + self.size = size + self.metadata = metadata_dict or {} + + def get_metadata_property(self, prop): + """Get the metadata property of a LUN.""" + if prop in self.metadata: + return self.metadata[prop] + name = self.name + msg = _("No metadata property %(prop)s defined for the" + " LUN %(name)s") + msg_fmt = {'prop': prop, 'name': name} + LOG.debug(msg % msg_fmt) + + def __str__(self, *args, **kwargs): + return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\ + % (self.handle, self.name, self.size, self.metadata) + + +class NetAppDirectISCSIDriver(driver.ISCSIDriver): + """NetApp Direct iSCSI volume driver.""" + + VERSION = "1.0.0" + + IGROUP_PREFIX = 'openstack-' + required_flags = ['netapp_transport_type', 'netapp_login', + 'netapp_password', 'netapp_server_hostname', + 'netapp_server_port'] + + def __init__(self, *args, **kwargs): + super(NetAppDirectISCSIDriver, self).__init__(*args, **kwargs) + validate_instantiation(**kwargs) + self.configuration.append_config_values(netapp_connection_opts) + self.configuration.append_config_values(netapp_basicauth_opts) + self.configuration.append_config_values(netapp_transport_opts) + self.configuration.append_config_values(netapp_provisioning_opts) + self.lun_table = {} + + def _create_client(self, **kwargs): + """Instantiate a client for NetApp server. + + This method creates NetApp server client for api communication. + """ + + host_filer = kwargs['hostname'] + LOG.debug(_('Using NetApp filer: %s') % host_filer) + self.client = NaServer(host=host_filer, + server_type=NaServer.SERVER_TYPE_FILER, + transport_type=kwargs['transport_type'], + style=NaServer.STYLE_LOGIN_PASSWORD, + username=kwargs['login'], + password=kwargs['password']) + + def _do_custom_setup(self): + """Does custom setup depending on the type of filer.""" + raise NotImplementedError() + + def _check_flags(self): + """Ensure that the flags we care about are set.""" + required_flags = self.required_flags + for flag in required_flags: + if not getattr(self.configuration, flag, None): + msg = _('%s is not set') % flag + raise exception.InvalidInput(reason=msg) + + def do_setup(self, context): + """Setup the NetApp Volume driver. + + Called one time by the manager after the driver is loaded. + Validate the flags we care about and setup NetApp + client. + """ + + self._check_flags() + self._create_client( + transport_type=self.configuration.netapp_transport_type, + login=self.configuration.netapp_login, + password=self.configuration.netapp_password, + hostname=self.configuration.netapp_server_hostname, + port=self.configuration.netapp_server_port) + self._do_custom_setup() + + def check_for_setup_error(self): + """Check that the driver is working and can communicate. + + Discovers the LUNs on the NetApp server. + """ + + self.lun_table = {} + self._get_lun_list() + LOG.debug(_("Success getting LUN list from server")) + + def create_volume(self, volume): + """Driver entry point for creating a new volume.""" + default_size = '104857600' # 100 MB + gigabytes = 1073741824L # 2^30 + name = volume['name'] + if int(volume['size']) == 0: + size = default_size + else: + size = str(int(volume['size']) * gigabytes) + metadata = {} + metadata['OsType'] = 'linux' + metadata['SpaceReserved'] = 'true' + extra_specs = get_volume_extra_specs(volume) + self._create_lun_on_eligible_vol(name, size, metadata, extra_specs) + LOG.debug(_("Created LUN with name %s") % name) + handle = self._create_lun_handle(metadata) + self._add_lun_to_table(NetAppLun(handle, name, size, metadata)) + + def delete_volume(self, volume): + """Driver entry point for destroying existing volumes.""" + name = volume['name'] + metadata = self._get_lun_attr(name, 'metadata') + if not metadata: + msg = _("No entry in LUN table for volume/snapshot %(name)s.") + msg_fmt = {'name': name} + LOG.warn(msg % msg_fmt) + return + self._destroy_lun(metadata['Path']) + self.lun_table.pop(name) + + def _destroy_lun(self, path, force=True): + """Destroys the lun at the path.""" + lun_destroy = NaElement.create_node_with_children( + 'lun-destroy', + **{'path': path}) + if force: + lun_destroy.add_new_child('force', 'true') + self.client.invoke_successfully(lun_destroy, True) + seg = path.split("/") + LOG.debug(_("Destroyed LUN %s") % seg[-1]) + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + handle = self._get_lun_attr(volume['name'], 'handle') + return {'provider_location': handle} + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + handle = self._get_lun_attr(volume['name'], 'handle') + return {'provider_location': handle} + + def remove_export(self, context, volume): + """Driver entry point to remove an export for a volume. + + Since exporting is idempotent in this driver, we have nothing + to do for unexporting. + """ + + pass + + def initialize_connection(self, volume, connector): + """Driver entry point to attach a volume to an instance. + + Do the LUN masking on the storage system so the initiator can access + the LUN on the target. Also return the iSCSI properties so the + initiator can find the LUN. This implementation does not call + _get_iscsi_properties() to get the properties because cannot store the + LUN number in the database. We only find out what the LUN number will + be during this method call so we construct the properties dictionary + ourselves. + """ + + initiator_name = connector['initiator'] + name = volume['name'] + lun_id = self._map_lun(name, initiator_name, 'iscsi', None) + msg = _("Mapped LUN %(name)s to the initiator %(initiator_name)s") + msg_fmt = {'name': name, 'initiator_name': initiator_name} + LOG.debug(msg % msg_fmt) + iqn = self._get_iscsi_service_details() + target_details_list = self._get_target_details() + msg = _("Succesfully fetched target details for LUN %(name)s and " + "initiator %(initiator_name)s") + msg_fmt = {'name': name, 'initiator_name': initiator_name} + LOG.debug(msg % msg_fmt) + + if not target_details_list: + msg = _('Failed to get LUN target details for the LUN %s') + raise exception.VolumeBackendAPIException(data=msg % name) + target_details = None + for tgt_detail in target_details_list: + if tgt_detail.get('interface-enabled', 'true') == 'true': + target_details = tgt_detail + break + if not target_details: + target_details = target_details_list[0] + + if not target_details['address'] and target_details['port']: + msg = _('Failed to get target portal for the LUN %s') + raise exception.VolumeBackendAPIException(data=msg % name) + if not iqn: + msg = _('Failed to get target IQN for the LUN %s') + raise exception.VolumeBackendAPIException(data=msg % name) + + properties = {} + properties['target_discovered'] = False + (address, port) = (target_details['address'], target_details['port']) + properties['target_portal'] = '%s:%s' % (address, port) + properties['target_iqn'] = iqn + properties['target_lun'] = lun_id + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return { + 'driver_volume_type': 'iscsi', + 'data': properties, + } + + def create_snapshot(self, snapshot): + """Driver entry point for creating a snapshot. + + This driver implements snapshots by using efficient single-file + (LUN) cloning. + """ + + vol_name = snapshot['volume_name'] + snapshot_name = snapshot['name'] + lun = self.lun_table[vol_name] + self._clone_lun(lun.name, snapshot_name, 'false') + + def delete_snapshot(self, snapshot): + """Driver entry point for deleting a snapshot.""" + self.delete_volume(snapshot) + LOG.debug(_("Snapshot %s deletion successful") % snapshot['name']) + + def create_volume_from_snapshot(self, volume, snapshot): + """Driver entry point for creating a new volume from a snapshot. + + Many would call this "cloning" and in fact we use cloning to implement + this feature. + """ + + vol_size = volume['size'] + snap_size = snapshot['volume_size'] + snapshot_name = snapshot['name'] + new_name = volume['name'] + self._clone_lun(snapshot_name, new_name, 'true') + if vol_size != snap_size: + try: + self.extend_volume(volume, volume['size']) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error( + _("Resizing %s failed. Cleaning volume."), new_name) + self.delete_volume(volume) + + def terminate_connection(self, volume, connector, **kwargs): + """Driver entry point to unattach a volume from an instance. + + Unmask the LUN on the storage system so the given initiator can no + longer access it. + """ + + initiator_name = connector['initiator'] + name = volume['name'] + metadata = self._get_lun_attr(name, 'metadata') + path = metadata['Path'] + self._unmap_lun(path, initiator_name) + msg = _("Unmapped LUN %(name)s from the initiator " + "%(initiator_name)s") + msg_fmt = {'name': name, 'initiator_name': initiator_name} + LOG.debug(msg % msg_fmt) + + def _get_ontapi_version(self): + """Gets the supported ontapi version.""" + ontapi_version = NaElement('system-get-ontapi-version') + res = self.client.invoke_successfully(ontapi_version, False) + major = res.get_child_content('major-version') + minor = res.get_child_content('minor-version') + return (major, minor) + + def _create_lun_on_eligible_vol(self, name, size, metadata, + extra_specs=None): + """Creates an actual lun on filer.""" + raise NotImplementedError() + + def _create_lun(self, volume, lun, size, metadata): + """Issues api request for creating lun on volume.""" + path = '/vol/%s/%s' % (volume, lun) + lun_create = NaElement.create_node_with_children( + 'lun-create-by-size', + **{'path': path, 'size': size, + 'ostype': metadata['OsType'], + 'space-reservation-enabled': metadata['SpaceReserved']}) + self.client.invoke_successfully(lun_create, True) + + def _get_iscsi_service_details(self): + """Returns iscsi iqn.""" + raise NotImplementedError() + + def _get_target_details(self): + """Gets the target portal details.""" + raise NotImplementedError() + + def _create_lun_handle(self, metadata): + """Returns lun handle based on filer type.""" + raise NotImplementedError() + + def _get_lun_list(self): + """Gets the list of luns on filer.""" + raise NotImplementedError() + + def _extract_and_populate_luns(self, api_luns): + """Extracts the luns from api. + + Populates in the lun table. + """ + + for lun in api_luns: + meta_dict = self._create_lun_meta(lun) + path = lun.get_child_content('path') + (rest, splitter, name) = path.rpartition('/') + handle = self._create_lun_handle(meta_dict) + size = lun.get_child_content('size') + discovered_lun = NetAppLun(handle, name, + size, meta_dict) + self._add_lun_to_table(discovered_lun) + + def _is_naelement(self, elem): + """Checks if element is NetApp element.""" + if not isinstance(elem, NaElement): + raise ValueError('Expects NaElement') + + def _map_lun(self, name, initiator, initiator_type='iscsi', lun_id=None): + """Maps lun to the initiator and returns lun id assigned.""" + metadata = self._get_lun_attr(name, 'metadata') + os = metadata['OsType'] + path = metadata['Path'] + if self._check_allowed_os(os): + os = os + else: + os = 'default' + igroup_name = self._get_or_create_igroup(initiator, + initiator_type, os) + lun_map = NaElement.create_node_with_children( + 'lun-map', **{'path': path, + 'initiator-group': igroup_name}) + if lun_id: + lun_map.add_new_child('lun-id', lun_id) + try: + result = self.client.invoke_successfully(lun_map, True) + return result.get_child_content('lun-id-assigned') + except NaApiError as e: + code = e.code + message = e.message + msg = _('Error mapping lun. Code :%(code)s, Message:%(message)s') + msg_fmt = {'code': code, 'message': message} + exc_info = sys.exc_info() + LOG.warn(msg % msg_fmt) + (igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator) + if lun_id is not None: + return lun_id + else: + raise exc_info[0], exc_info[1], exc_info[2] + + def _unmap_lun(self, path, initiator): + """Unmaps a lun from given initiator.""" + (igroup_name, lun_id) = self._find_mapped_lun_igroup(path, initiator) + lun_unmap = NaElement.create_node_with_children( + 'lun-unmap', + **{'path': path, 'initiator-group': igroup_name}) + try: + self.client.invoke_successfully(lun_unmap, True) + except NaApiError as e: + msg = _("Error unmapping lun. Code :%(code)s," + " Message:%(message)s") + msg_fmt = {'code': e.code, 'message': e.message} + exc_info = sys.exc_info() + LOG.warn(msg % msg_fmt) + # if the lun is already unmapped + if e.code == '13115' or e.code == '9016': + pass + else: + raise exc_info[0], exc_info[1], exc_info[2] + + def _find_mapped_lun_igroup(self, path, initiator, os=None): + """Find the igroup for mapped lun with initiator.""" + raise NotImplementedError() + + def _get_or_create_igroup(self, initiator, initiator_type='iscsi', + os='default'): + """Checks for an igroup for an initiator. + + Creates igroup if not found. + """ + + igroups = self._get_igroup_by_initiator(initiator=initiator) + igroup_name = None + for igroup in igroups: + if igroup['initiator-group-os-type'] == os: + if igroup['initiator-group-type'] == initiator_type or \ + igroup['initiator-group-type'] == 'mixed': + if igroup['initiator-group-name'].startswith( + self.IGROUP_PREFIX): + igroup_name = igroup['initiator-group-name'] + break + if not igroup_name: + igroup_name = self.IGROUP_PREFIX + str(uuid.uuid4()) + self._create_igroup(igroup_name, initiator_type, os) + self._add_igroup_initiator(igroup_name, initiator) + return igroup_name + + def _get_igroup_by_initiator(self, initiator): + """Get igroups by initiator.""" + raise NotImplementedError() + + def _check_allowed_os(self, os): + """Checks if the os type supplied is NetApp supported.""" + if os in ['linux', 'aix', 'hpux', 'windows', 'solaris', + 'netware', 'vmware', 'openvms', 'xen', 'hyper_v']: + return True + else: + return False + + def _create_igroup(self, igroup, igroup_type='iscsi', os_type='default'): + """Creates igroup with specified args.""" + igroup_create = NaElement.create_node_with_children( + 'igroup-create', + **{'initiator-group-name': igroup, + 'initiator-group-type': igroup_type, + 'os-type': os_type}) + self.client.invoke_successfully(igroup_create, True) + + def _add_igroup_initiator(self, igroup, initiator): + """Adds initiators to the specified igroup.""" + igroup_add = NaElement.create_node_with_children( + 'igroup-add', + **{'initiator-group-name': igroup, + 'initiator': initiator}) + self.client.invoke_successfully(igroup_add, True) + + def _get_qos_type(self, volume): + """Get the storage service type for a volume.""" + type_id = volume['volume_type_id'] + if not type_id: + return None + volume_type = volume_types.get_volume_type(None, type_id) + if not volume_type: + return None + return volume_type['name'] + + def _add_lun_to_table(self, lun): + """Adds LUN to cache table.""" + if not isinstance(lun, NetAppLun): + msg = _("Object is not a NetApp LUN.") + raise exception.VolumeBackendAPIException(data=msg) + self.lun_table[lun.name] = lun + + def _get_lun_from_table(self, name): + """Gets LUN from cache table. + + Refreshes cache if lun not found in cache. + """ + lun = self.lun_table.get(name) + if lun is None: + self._get_lun_list() + lun = self.lun_table.get(name) + if lun is None: + raise exception.VolumeNotFound(volume_id=name) + return lun + + def _clone_lun(self, name, new_name, space_reserved='true', + start_block=0, end_block=0, block_count=0): + """Clone LUN with the given name to the new name.""" + raise NotImplementedError() + + def _get_lun_by_args(self, **args): + """Retrieves luns with specified args.""" + raise NotImplementedError() + + def _get_lun_attr(self, name, attr): + """Get the lun attribute if found else None.""" + try: + attr = getattr(self._get_lun_from_table(name), attr) + return attr + except exception.VolumeNotFound as e: + LOG.error(_("Message: %s"), e.msg) + except Exception as e: + LOG.error(_("Error getting lun attribute. Exception: %s"), + e.__str__()) + return None + + def _create_lun_meta(self, lun): + raise NotImplementedError() + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + vol_size = volume['size'] + src_vol = self.lun_table[src_vref['name']] + src_vol_size = src_vref['size'] + new_name = volume['name'] + self._clone_lun(src_vol.name, new_name, 'true') + if vol_size != src_vol_size: + try: + self.extend_volume(volume, volume['size']) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error( + _("Resizing %s failed. Cleaning volume."), new_name) + self.delete_volume(volume) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, run update the stats first. + """ + + if refresh: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + raise NotImplementedError() + + def extend_volume(self, volume, new_size): + """Extend an existing volume to the new size.""" + name = volume['name'] + path = self.lun_table[name].metadata['Path'] + curr_size_bytes = str(self.lun_table[name].size) + new_size_bytes = str(int(new_size) * units.GiB) + # Reused by clone scenarios. + # Hence comparing the stored size. + if curr_size_bytes != new_size_bytes: + lun_geometry = self._get_lun_geometry(path) + if (lun_geometry and lun_geometry.get("max_resize") + and lun_geometry.get("max_resize") >= new_size_bytes): + self._do_direct_resize(path, new_size_bytes) + else: + self._do_sub_clone_resize(path, new_size_bytes) + self.lun_table[name].size = new_size_bytes + else: + LOG.info(_("No need to extend volume %s" + " as it is already the requested new size."), name) + + def _do_direct_resize(self, path, new_size_bytes, force=True): + """Uses the resize api to resize the lun.""" + seg = path.split("/") + LOG.info(_("Resizing lun %s directly to new size."), seg[-1]) + lun_resize = NaElement("lun-resize") + lun_resize.add_new_child('path', path) + lun_resize.add_new_child('size', new_size_bytes) + if force: + lun_resize.add_new_child('force', 'true') + self.client.invoke_successfully(lun_resize, True) + + def _get_lun_geometry(self, path): + """Gets the lun geometry.""" + geometry = {} + lun_geo = NaElement("lun-get-geometry") + lun_geo.add_new_child('path', path) + try: + result = self.client.invoke_successfully(lun_geo, True) + geometry['size'] = result.get_child_content("size") + geometry['bytes_per_sector'] =\ + result.get_child_content("bytes-per-sector") + geometry['sectors_per_track'] =\ + result.get_child_content("sectors-per-track") + geometry['tracks_per_cylinder'] =\ + result.get_child_content("tracks-per-cylinder") + geometry['cylinders'] =\ + result.get_child_content("cylinders") + geometry['max_resize'] =\ + result.get_child_content("max-resize-size") + except Exception as e: + LOG.error(_("Lun %(path)s geometry failed. Message - %(msg)s") + % {'path': path, 'msg': e.message}) + return geometry + + def _get_volume_options(self, volume_name): + """Get the value for the volume option.""" + opts = [] + vol_option_list = NaElement("volume-options-list-info") + vol_option_list.add_new_child('volume', volume_name) + result = self.client.invoke_successfully(vol_option_list, True) + options = result.get_child_by_name("options") + if options: + opts = options.get_children() + return opts + + def _get_vol_option(self, volume_name, option_name): + """Get the value for the volume option.""" + value = None + options = self._get_volume_options(volume_name) + for opt in options: + if opt.get_child_content('name') == option_name: + value = opt.get_child_content('value') + break + return value + + def _move_lun(self, path, new_path): + """Moves the lun at path to new path.""" + seg = path.split("/") + new_seg = new_path.split("/") + LOG.debug(_("Moving lun %(name)s to %(new_name)s.") + % {'name': seg[-1], 'new_name': new_seg[-1]}) + lun_move = NaElement("lun-move") + lun_move.add_new_child("path", path) + lun_move.add_new_child("new-path", new_path) + self.client.invoke_successfully(lun_move, True) + + def _do_sub_clone_resize(self, path, new_size_bytes): + """Does sub lun clone after verification. + + Clones the block ranges and swaps + the luns also deletes older lun + after a successful clone. + """ + seg = path.split("/") + LOG.info(_("Resizing lun %s using sub clone to new size."), seg[-1]) + name = seg[-1] + vol_name = seg[2] + lun = self.lun_table[name] + metadata = lun.metadata + compression = self._get_vol_option(vol_name, 'compression') + if compression == "on": + msg = _('%s cannot be sub clone resized' + ' as it is hosted on compressed volume') + raise exception.VolumeBackendAPIException(data=msg % name) + else: + block_count = self._get_lun_block_count(path) + if block_count == 0: + msg = _('%s cannot be sub clone resized' + ' as it contains no blocks.') + raise exception.VolumeBackendAPIException(data=msg % name) + new_lun = 'new-%s' % (name) + self._create_lun(vol_name, new_lun, new_size_bytes, metadata) + try: + self._clone_lun(name, new_lun, block_count=block_count) + self._post_sub_clone_resize(path) + except Exception: + with excutils.save_and_reraise_exception(): + new_path = '/vol/%s/%s' % (vol_name, new_lun) + self._destroy_lun(new_path) + + def _post_sub_clone_resize(self, path): + """Try post sub clone resize in a transactional manner.""" + st_tm_mv, st_nw_mv, st_del_old = None, None, None + seg = path.split("/") + LOG.info(_("Post clone resize lun %s"), seg[-1]) + new_lun = 'new-%s' % (seg[-1]) + tmp_lun = 'tmp-%s' % (seg[-1]) + tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun) + new_path = "/vol/%s/%s" % (seg[2], new_lun) + try: + st_tm_mv = self._move_lun(path, tmp_path) + st_nw_mv = self._move_lun(new_path, path) + st_del_old = self._destroy_lun(tmp_path) + except Exception as e: + if st_tm_mv is None: + msg = _("Failure staging lun %s to tmp.") + raise exception.VolumeBackendAPIException(data=msg % (seg[-1])) + else: + if st_nw_mv is None: + self._move_lun(tmp_path, path) + msg = _("Failure moving new cloned lun to %s.") + raise exception.VolumeBackendAPIException( + data=msg % (seg[-1])) + elif st_del_old is None: + LOG.error(_("Failure deleting staged tmp lun %s."), + tmp_lun) + else: + LOG.error(_("Unknown exception in" + " post clone resize lun %s."), seg[-1]) + LOG.error(_("Exception details: %s") % (e.__str__())) + + def _get_lun_block_count(self, path): + """Gets block counts for the lun.""" + LOG.debug(_("Getting lun block count.")) + block_count = 0 + lun_infos = self._get_lun_by_args(path=path) + if not lun_infos: + seg = path.split('/') + msg = _('Failure getting lun info for %s.') + raise exception.VolumeBackendAPIException(data=msg % seg[-1]) + lun_info = lun_infos[-1] + bs = int(lun_info.get_child_content('block-size')) + ls = int(lun_info.get_child_content('size')) + block_count = ls / bs + return block_count + + +class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver): + """NetApp C-mode iSCSI volume driver.""" + + DEFAULT_VS = 'openstack' + + def __init__(self, *args, **kwargs): + super(NetAppDirectCmodeISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(netapp_cluster_opts) + + def _do_custom_setup(self): + """Does custom setup for ontap cluster.""" + self.vserver = self.configuration.netapp_vserver + self.vserver = self.vserver if self.vserver else self.DEFAULT_VS + # We set vserver in client permanently. + # To use tunneling enable_tunneling while invoking api + self.client.set_vserver(self.vserver) + # Default values to run first api + self.client.set_api_version(1, 15) + (major, minor) = self._get_ontapi_version() + self.client.set_api_version(major, minor) + self.ssc_vols = None + self.stale_vols = set() + + def check_for_setup_error(self): + """Check that the driver is working and can communicate.""" + ssc_utils.check_ssc_api_permissions(self.client) + super(NetAppDirectCmodeISCSIDriver, self).check_for_setup_error() + + def _create_lun_on_eligible_vol(self, name, size, metadata, + extra_specs=None): + """Creates an actual lun on filer.""" + req_size = float(size) *\ + float(self.configuration.netapp_size_multiplier) + volumes = self._get_avl_volumes(req_size, extra_specs) + if not volumes: + msg = _('Failed to get vol with required' + ' size and extra specs for volume: %s') + raise exception.VolumeBackendAPIException(data=msg % name) + for volume in volumes: + try: + self._create_lun(volume.id['name'], name, size, metadata) + metadata['Path'] = '/vol/%s/%s' % (volume.id['name'], name) + metadata['Volume'] = volume.id['name'] + metadata['Qtree'] = None + return + except NaApiError: + LOG.warn(_("Error provisioning vol %(name)s on %(volume)s") + % {'name': name, 'volume': volume.id['name']}) + finally: + self._update_stale_vols(volume=volume) + + def _get_avl_volumes(self, size, extra_specs=None): + """Get the available volume by size, extra_specs.""" + result = [] + volumes = ssc_utils.get_volumes_for_specs( + self.ssc_vols, extra_specs) + if volumes: + sorted_vols = sorted(volumes, reverse=True) + for vol in sorted_vols: + if int(vol.space['size_avl_bytes']) >= int(size): + result.append(vol) + return result + + def _get_target_details(self): + """Gets the target portal details.""" + iscsi_if_iter = NaElement('iscsi-interface-get-iter') + result = self.client.invoke_successfully(iscsi_if_iter, True) + tgt_list = [] + if result.get_child_content('num-records')\ + and int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + iscsi_if_list = attr_list.get_children() + for iscsi_if in iscsi_if_list: + d = dict() + d['address'] = iscsi_if.get_child_content('ip-address') + d['port'] = iscsi_if.get_child_content('ip-port') + d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag') + d['interface-enabled'] = iscsi_if.get_child_content( + 'is-interface-enabled') + tgt_list.append(d) + return tgt_list + + def _get_iscsi_service_details(self): + """Returns iscsi iqn.""" + iscsi_service_iter = NaElement('iscsi-service-get-iter') + result = self.client.invoke_successfully(iscsi_service_iter, True) + if result.get_child_content('num-records') and\ + int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + iscsi_service = attr_list.get_child_by_name('iscsi-service-info') + return iscsi_service.get_child_content('node-name') + LOG.debug(_('No iscsi service found for vserver %s') % (self.vserver)) + return None + + def _create_lun_handle(self, metadata): + """Returns lun handle based on filer type.""" + return '%s:%s' % (self.vserver, metadata['Path']) + + def _get_lun_list(self): + """Gets the list of luns on filer. + + Gets the luns from cluster with vserver. + """ + + tag = None + while True: + api = NaElement('lun-get-iter') + api.add_new_child('max-records', '100') + if tag: + api.add_new_child('tag', tag, True) + lun_info = NaElement('lun-info') + lun_info.add_new_child('vserver', self.vserver) + query = NaElement('query') + query.add_child_elem(lun_info) + api.add_child_elem(query) + result = self.client.invoke_successfully(api) + if result.get_child_by_name('num-records') and\ + int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + self._extract_and_populate_luns(attr_list.get_children()) + tag = result.get_child_content('next-tag') + if tag is None: + break + + def _find_mapped_lun_igroup(self, path, initiator, os=None): + """Find the igroup for mapped lun with initiator.""" + initiator_igroups = self._get_igroup_by_initiator(initiator=initiator) + lun_maps = self._get_lun_map(path) + if initiator_igroups and lun_maps: + for igroup in initiator_igroups: + igroup_name = igroup['initiator-group-name'] + if igroup_name.startswith(self.IGROUP_PREFIX): + for lun_map in lun_maps: + if lun_map['initiator-group'] == igroup_name: + return (igroup_name, lun_map['lun-id']) + return (None, None) + + def _get_lun_map(self, path): + """Gets the lun map by lun path.""" + tag = None + map_list = [] + while True: + lun_map_iter = NaElement('lun-map-get-iter') + lun_map_iter.add_new_child('max-records', '100') + if tag: + lun_map_iter.add_new_child('tag', tag, True) + query = NaElement('query') + lun_map_iter.add_child_elem(query) + query.add_node_with_children('lun-map-info', **{'path': path}) + result = self.client.invoke_successfully(lun_map_iter, True) + tag = result.get_child_content('next-tag') + if result.get_child_content('num-records') and \ + int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + lun_maps = attr_list.get_children() + for lun_map in lun_maps: + lun_m = dict() + lun_m['initiator-group'] = lun_map.get_child_content( + 'initiator-group') + lun_m['lun-id'] = lun_map.get_child_content('lun-id') + lun_m['vserver'] = lun_map.get_child_content('vserver') + map_list.append(lun_m) + if tag is None: + break + return map_list + + def _get_igroup_by_initiator(self, initiator): + """Get igroups by initiator.""" + tag = None + igroup_list = [] + while True: + igroup_iter = NaElement('igroup-get-iter') + igroup_iter.add_new_child('max-records', '100') + if tag: + igroup_iter.add_new_child('tag', tag, True) + query = NaElement('query') + igroup_iter.add_child_elem(query) + igroup_info = NaElement('initiator-group-info') + query.add_child_elem(igroup_info) + igroup_info.add_new_child('vserver', self.vserver) + initiators = NaElement('initiators') + igroup_info.add_child_elem(initiators) + initiators.add_node_with_children('initiator-info', + **{'initiator-name': initiator}) + des_attrs = NaElement('desired-attributes') + des_ig_info = NaElement('initiator-group-info') + des_attrs.add_child_elem(des_ig_info) + des_ig_info.add_node_with_children('initiators', + **{'initiator-info': None}) + des_ig_info.add_new_child('vserver', None) + des_ig_info.add_new_child('initiator-group-name', None) + des_ig_info.add_new_child('initiator-group-type', None) + des_ig_info.add_new_child('initiator-group-os-type', None) + igroup_iter.add_child_elem(des_attrs) + result = self.client.invoke_successfully(igroup_iter, False) + tag = result.get_child_content('next-tag') + if result.get_child_content('num-records') and\ + int(result.get_child_content('num-records')) > 0: + attr_list = result.get_child_by_name('attributes-list') + igroups = attr_list.get_children() + for igroup in igroups: + ig = dict() + ig['initiator-group-os-type'] = igroup.get_child_content( + 'initiator-group-os-type') + ig['initiator-group-type'] = igroup.get_child_content( + 'initiator-group-type') + ig['initiator-group-name'] = igroup.get_child_content( + 'initiator-group-name') + igroup_list.append(ig) + if tag is None: + break + return igroup_list + + def _clone_lun(self, name, new_name, space_reserved='true', + start_block=0, end_block=0, block_count=0): + """Clone LUN with the given handle to the new name.""" + metadata = self._get_lun_attr(name, 'metadata') + volume = metadata['Volume'] + clone_create = NaElement.create_node_with_children( + 'clone-create', + **{'volume': volume, 'source-path': name, + 'destination-path': new_name, 'space-reserve': space_reserved}) + if block_count > 0: + block_ranges = NaElement("block-ranges") + block_range = NaElement.create_node_with_children( + 'block-range', + **{'source-block-number': str(start_block), + 'destination-block-number': str(end_block), + 'block-count': str(block_count)}) + block_ranges.add_child_elem(block_range) + clone_create.add_child_elem(block_ranges) + self.client.invoke_successfully(clone_create, True) + LOG.debug(_("Cloned LUN with new name %s") % new_name) + lun = self._get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s' + % (volume, new_name)) + if len(lun) == 0: + msg = _("No clonned lun named %s found on the filer") + raise exception.VolumeBackendAPIException(data=msg % (new_name)) + clone_meta = self._create_lun_meta(lun[0]) + self._add_lun_to_table(NetAppLun('%s:%s' % (clone_meta['Vserver'], + clone_meta['Path']), + new_name, + lun[0].get_child_content('size'), + clone_meta)) + self._update_stale_vols( + volume=ssc_utils.NetAppVolume(volume, self.vserver)) + + def _get_lun_by_args(self, **args): + """Retrieves lun with specified args.""" + lun_iter = NaElement('lun-get-iter') + lun_iter.add_new_child('max-records', '100') + query = NaElement('query') + lun_iter.add_child_elem(query) + query.add_node_with_children('lun-info', **args) + luns = self.client.invoke_successfully(lun_iter) + attr_list = luns.get_child_by_name('attributes-list') + return attr_list.get_children() + + def _create_lun_meta(self, lun): + """Creates lun metadata dictionary.""" + self._is_naelement(lun) + meta_dict = {} + self._is_naelement(lun) + meta_dict['Vserver'] = lun.get_child_content('vserver') + meta_dict['Volume'] = lun.get_child_content('volume') + meta_dict['Qtree'] = lun.get_child_content('qtree') + meta_dict['Path'] = lun.get_child_content('path') + meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') + meta_dict['SpaceReserved'] = \ + lun.get_child_content('is-space-reservation-enabled') + return meta_dict + + def _configure_tunneling(self, do_tunneling=False): + """Configures tunneling for ontap cluster.""" + if do_tunneling: + self.client.set_vserver(self.vserver) + else: + self.client.set_vserver(None) + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + + LOG.debug(_("Updating volume stats")) + data = {} + netapp_backend = 'NetApp_iSCSI_Cluster_direct' + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = ( + backend_name or netapp_backend) + data["vendor_name"] = 'NetApp' + data["driver_version"] = '1.0' + data["storage_protocol"] = 'iSCSI' + + data['total_capacity_gb'] = 0 + data['free_capacity_gb'] = 0 + data['reserved_percentage'] = 0 + data['QoS_support'] = False + self._update_cluster_vol_stats(data) + provide_ems(self, self.client, data, netapp_backend) + self._stats = data + + def _update_cluster_vol_stats(self, data): + """Updates vol stats with cluster config.""" + sync = True if self.ssc_vols is None else False + ssc_utils.refresh_cluster_ssc(self, self.client, self.vserver, + synchronous=sync) + if self.ssc_vols: + data['netapp_mirrored'] = 'true'\ + if self.ssc_vols['mirrored'] else 'false' + data['netapp_unmirrored'] = 'true'\ + if len(self.ssc_vols['all']) > len(self.ssc_vols['mirrored'])\ + else 'false' + data['netapp_dedup'] = 'true'\ + if self.ssc_vols['dedup'] else 'false' + data['netapp_nodedupe'] = 'true'\ + if len(self.ssc_vols['all']) > len(self.ssc_vols['dedup'])\ + else 'false' + data['netapp_compression'] = 'true'\ + if self.ssc_vols['compression'] else 'false' + data['netapp_nocompression'] = 'true'\ + if len(self.ssc_vols['all']) >\ + len(self.ssc_vols['compression'])\ + else 'false' + data['netapp_thin_provisioned'] = 'true'\ + if self.ssc_vols['thin'] else 'false' + data['netapp_thick_provisioned'] = 'true'\ + if len(self.ssc_vols['all']) >\ + len(self.ssc_vols['thin']) else 'false' + if self.ssc_vols['all']: + vol_max = max(self.ssc_vols['all']) + data['total_capacity_gb'] =\ + int(vol_max.space['size_total_bytes']) / units.GiB + data['free_capacity_gb'] =\ + int(vol_max.space['size_avl_bytes']) / units.GiB + else: + data['total_capacity_gb'] = 0 + data['free_capacity_gb'] = 0 + else: + LOG.warn(_("Cluster ssc is not updated. No volume stats found.")) + + @utils.synchronized('update_stale') + def _update_stale_vols(self, volume=None, reset=False): + """Populates stale vols with vol and returns set copy if reset.""" + if volume: + self.stale_vols.add(volume) + if reset: + set_copy = copy.deepcopy(self.stale_vols) + self.stale_vols.clear() + return set_copy + + @utils.synchronized("refresh_ssc_vols") + def refresh_ssc_vols(self, vols): + """Refreshes ssc_vols with latest entries.""" + self.ssc_vols = vols + + def delete_volume(self, volume): + """Driver entry point for destroying existing volumes.""" + lun = self.lun_table.get(volume['name']) + netapp_vol = None + if lun: + netapp_vol = lun.get_metadata_property('Volume') + super(NetAppDirectCmodeISCSIDriver, self).delete_volume(volume) + if netapp_vol: + self._update_stale_vols( + volume=ssc_utils.NetAppVolume(netapp_vol, self.vserver)) + + +class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver): + """NetApp 7-mode iSCSI volume driver.""" + + def __init__(self, *args, **kwargs): + super(NetAppDirect7modeISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(netapp_7mode_opts) + + def _do_custom_setup(self): + """Does custom setup depending on the type of filer.""" + self.vfiler = self.configuration.netapp_vfiler + self.volume_list = self.configuration.netapp_volume_list + if self.volume_list: + self.volume_list = self.volume_list.split(',') + self.volume_list = [el.strip() for el in self.volume_list] + (major, minor) = self._get_ontapi_version() + self.client.set_api_version(major, minor) + if self.vfiler: + self.client.set_vfiler(self.vfiler) + self.vol_refresh_time = None + self.vol_refresh_interval = 1800 + self.vol_refresh_running = False + self.vol_refresh_voluntary = False + # Setting it infinite at set up + # This will not rule out backend from scheduling + self.total_gb = 'infinite' + self.free_gb = 'infinite' + + def check_for_setup_error(self): + """Check that the driver is working and can communicate.""" + api_version = self.client.get_api_version() + if api_version: + major, minor = api_version + if major == 1 and minor < 9: + msg = _("Unsupported ONTAP version." + " ONTAP version 7.3.1 and above is supported.") + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _("Api version could not be determined.") + raise exception.VolumeBackendAPIException(data=msg) + super(NetAppDirect7modeISCSIDriver, self).check_for_setup_error() + + def _create_lun_on_eligible_vol(self, name, size, metadata, + extra_specs=None): + """Creates an actual lun on filer.""" + req_size = float(size) *\ + float(self.configuration.netapp_size_multiplier) + volume = self._get_avl_volume_by_size(req_size) + if not volume: + msg = _('Failed to get vol with required size for volume: %s') + raise exception.VolumeBackendAPIException(data=msg % name) + self._create_lun(volume['name'], name, size, metadata) + metadata['Path'] = '/vol/%s/%s' % (volume['name'], name) + metadata['Volume'] = volume['name'] + metadata['Qtree'] = None + self.vol_refresh_voluntary = True + + def _get_filer_volumes(self, volume=None): + """Returns list of filer volumes in api format.""" + vol_request = NaElement('volume-list-info') + if volume: + vol_request.add_new_child('volume', volume) + res = self.client.invoke_successfully(vol_request, True) + volumes = res.get_child_by_name('volumes') + if volumes: + return volumes.get_children() + return [] + + def _get_avl_volume_by_size(self, size): + """Get the available volume by size.""" + vols = self._get_filer_volumes() + for vol in vols: + avl_size = vol.get_child_content('size-available') + state = vol.get_child_content('state') + if float(avl_size) >= float(size) and state == 'online': + avl_vol = dict() + avl_vol['name'] = vol.get_child_content('name') + avl_vol['block-type'] = vol.get_child_content('block-type') + avl_vol['type'] = vol.get_child_content('type') + avl_vol['size-available'] = avl_size + if self.volume_list: + if avl_vol['name'] in self.volume_list: + return avl_vol + elif self._get_vol_option(avl_vol['name'], 'root') != 'true': + return avl_vol + return None + + def _get_igroup_by_initiator(self, initiator): + """Get igroups by initiator.""" + igroup_list = NaElement('igroup-list-info') + result = self.client.invoke_successfully(igroup_list, True) + igroups = [] + igs = result.get_child_by_name('initiator-groups') + if igs: + ig_infos = igs.get_children() + if ig_infos: + for info in ig_infos: + initiators = info.get_child_by_name('initiators') + init_infos = initiators.get_children() + if init_infos: + for init in init_infos: + if init.get_child_content('initiator-name')\ + == initiator: + d = dict() + d['initiator-group-os-type'] = \ + info.get_child_content( + 'initiator-group-os-type') + d['initiator-group-type'] = \ + info.get_child_content( + 'initiator-group-type') + d['initiator-group-name'] = \ + info.get_child_content( + 'initiator-group-name') + igroups.append(d) + return igroups + + def _get_target_details(self): + """Gets the target portal details.""" + iscsi_if_iter = NaElement('iscsi-portal-list-info') + result = self.client.invoke_successfully(iscsi_if_iter, True) + tgt_list = [] + portal_list_entries = result.get_child_by_name( + 'iscsi-portal-list-entries') + if portal_list_entries: + portal_list = portal_list_entries.get_children() + for iscsi_if in portal_list: + d = dict() + d['address'] = iscsi_if.get_child_content('ip-address') + d['port'] = iscsi_if.get_child_content('ip-port') + d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag') + tgt_list.append(d) + return tgt_list + + def _get_iscsi_service_details(self): + """Returns iscsi iqn.""" + iscsi_service_iter = NaElement('iscsi-node-get-name') + result = self.client.invoke_successfully(iscsi_service_iter, True) + return result.get_child_content('node-name') + + def _create_lun_handle(self, metadata): + """Returns lun handle based on filer type.""" + if self.vfiler: + owner = '%s:%s' % (self.configuration.netapp_server_hostname, + self.vfiler) + else: + owner = self.configuration.netapp_server_hostname + return '%s:%s' % (owner, metadata['Path']) + + def _get_lun_list(self): + """Gets the list of luns on filer.""" + lun_list = [] + if self.volume_list: + for vol in self.volume_list: + try: + luns = self._get_vol_luns(vol) + if luns: + lun_list.extend(luns) + except NaApiError: + LOG.warn(_("Error finding luns for volume %s." + " Verify volume exists.") % (vol)) + else: + luns = self._get_vol_luns(None) + lun_list.extend(luns) + self._extract_and_populate_luns(lun_list) + + def _get_vol_luns(self, vol_name): + """Gets the luns for a volume.""" + api = NaElement('lun-list-info') + if vol_name: + api.add_new_child('volume-name', vol_name) + result = self.client.invoke_successfully(api, True) + luns = result.get_child_by_name('luns') + return luns.get_children() + + def _find_mapped_lun_igroup(self, path, initiator, os=None): + """Find the igroup for mapped lun with initiator.""" + lun_map_list = NaElement.create_node_with_children( + 'lun-map-list-info', + **{'path': path}) + result = self.client.invoke_successfully(lun_map_list, True) + igroups = result.get_child_by_name('initiator-groups') + if igroups: + igroup = None + lun_id = None + found = False + igroup_infs = igroups.get_children() + for ig in igroup_infs: + initiators = ig.get_child_by_name('initiators') + init_infs = initiators.get_children() + for info in init_infs: + if info.get_child_content('initiator-name') == initiator: + found = True + igroup = ig.get_child_content('initiator-group-name') + lun_id = ig.get_child_content('lun-id') + break + if found: + break + return (igroup, lun_id) + + def _clone_lun(self, name, new_name, space_reserved='true', + start_block=0, end_block=0, block_count=0): + """Clone LUN with the given handle to the new name.""" + metadata = self._get_lun_attr(name, 'metadata') + path = metadata['Path'] + (parent, splitter, name) = path.rpartition('/') + clone_path = '%s/%s' % (parent, new_name) + clone_start = NaElement.create_node_with_children( + 'clone-start', **{'source-path': path, + 'destination-path': clone_path, + 'no-snap': 'true'}) + if block_count > 0: + block_ranges = NaElement("block-ranges") + block_range = NaElement.create_node_with_children( + 'block-range', + **{'source-block-number': str(start_block), + 'destination-block-number': str(end_block), + 'block-count': str(block_count)}) + block_ranges.add_child_elem(block_range) + clone_start.add_child_elem(block_ranges) + result = self.client.invoke_successfully(clone_start, True) + clone_id_el = result.get_child_by_name('clone-id') + cl_id_info = clone_id_el.get_child_by_name('clone-id-info') + vol_uuid = cl_id_info.get_child_content('volume-uuid') + clone_id = cl_id_info.get_child_content('clone-op-id') + if vol_uuid: + self._check_clone_status(clone_id, vol_uuid, name, new_name) + self.vol_refresh_voluntary = True + luns = self._get_lun_by_args(path=clone_path) + if luns: + cloned_lun = luns[0] + self._set_space_reserve(clone_path, space_reserved) + clone_meta = self._create_lun_meta(cloned_lun) + handle = self._create_lun_handle(clone_meta) + self._add_lun_to_table( + NetAppLun(handle, new_name, + cloned_lun.get_child_content('size'), + clone_meta)) + else: + raise NaApiError('ENOLUNENTRY', 'No Lun entry found on the filer') + + def _set_space_reserve(self, path, enable): + """Sets the space reserve info.""" + space_res = NaElement.create_node_with_children( + 'lun-set-space-reservation-info', + **{'path': path, 'enable': enable}) + self.client.invoke_successfully(space_res, True) + + def _check_clone_status(self, clone_id, vol_uuid, name, new_name): + """Checks for the job till completed.""" + clone_status = NaElement('clone-list-status') + cl_id = NaElement('clone-id') + clone_status.add_child_elem(cl_id) + cl_id.add_node_with_children( + 'clone-id-info', + **{'clone-op-id': clone_id, 'volume-uuid': vol_uuid}) + running = True + clone_ops_info = None + while running: + result = self.client.invoke_successfully(clone_status, True) + status = result.get_child_by_name('status') + ops_info = status.get_children() + if ops_info: + for info in ops_info: + if info.get_child_content('clone-state') == 'running': + time.sleep(1) + break + else: + running = False + clone_ops_info = info + break + else: + if clone_ops_info: + fmt = {'name': name, 'new_name': new_name} + if clone_ops_info.get_child_content('clone-state')\ + == 'completed': + LOG.debug(_("Clone operation with src %(name)s" + " and dest %(new_name)s completed") % fmt) + else: + LOG.debug(_("Clone operation with src %(name)s" + " and dest %(new_name)s failed") % fmt) + raise NaApiError( + clone_ops_info.get_child_content('error'), + clone_ops_info.get_child_content('reason')) + + def _get_lun_by_args(self, **args): + """Retrieves luns with specified args.""" + lun_info = NaElement.create_node_with_children('lun-list-info', **args) + result = self.client.invoke_successfully(lun_info, True) + luns = result.get_child_by_name('luns') + return luns.get_children() + + def _create_lun_meta(self, lun): + """Creates lun metadata dictionary.""" + self._is_naelement(lun) + meta_dict = {} + self._is_naelement(lun) + meta_dict['Path'] = lun.get_child_content('path') + meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') + meta_dict['SpaceReserved'] = lun.get_child_content( + 'is-space-reservation-enabled') + return meta_dict + + def _update_volume_stats(self): + """Retrieve status info from volume group.""" + LOG.debug(_("Updating volume stats")) + data = {} + netapp_backend = 'NetApp_iSCSI_7mode_direct' + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = ( + backend_name or 'NetApp_iSCSI_7mode_direct') + data["vendor_name"] = 'NetApp' + data["driver_version"] = self.VERSION + data["storage_protocol"] = 'iSCSI' + data['reserved_percentage'] = 0 + data['QoS_support'] = False + self._get_capacity_info(data) + provide_ems(self, self.client, data, netapp_backend, + server_type="7mode") + self._stats = data + + def _get_lun_block_count(self, path): + """Gets block counts for the lun.""" + bs = super( + NetAppDirect7modeISCSIDriver, self)._get_lun_block_count(path) + api_version = self.client.get_api_version() + if api_version: + major = api_version[0] + minor = api_version[1] + if major == 1 and minor < 15: + bs = bs - 1 + return bs + + def _get_capacity_info(self, data): + """Calculates the capacity information for the filer.""" + if (self.vol_refresh_time is None or self.vol_refresh_voluntary or + timeutils.is_newer_than(self.vol_refresh_time, + self.vol_refresh_interval)): + try: + job_set = set_safe_attr(self, 'vol_refresh_running', True) + if not job_set: + LOG.warn( + _("Volume refresh job already running. Returning...")) + return + self.vol_refresh_voluntary = False + self._refresh_capacity_info() + self.vol_refresh_time = timeutils.utcnow() + except Exception as e: + LOG.warn(_("Error refreshing vol capacity. Message: %s"), e) + finally: + set_safe_attr(self, 'vol_refresh_running', False) + data['total_capacity_gb'] = self.total_gb + data['free_capacity_gb'] = self.free_gb + + def _refresh_capacity_info(self): + """Gets the latest capacity information.""" + LOG.info(_("Refreshing capacity info for %s."), self.client) + total_bytes = 0 + free_bytes = 0 + vols = self._get_filer_volumes() + for vol in vols: + volume = vol.get_child_content('name') + if self.volume_list and not volume in self.volume_list: + continue + state = vol.get_child_content('state') + inconsistent = vol.get_child_content('is-inconsistent') + invalid = vol.get_child_content('is-invalid') + if (state == 'online' and inconsistent == 'false' + and invalid == 'false'): + total_size = vol.get_child_content('size-total') + if total_size: + total_bytes = total_bytes + int(total_size) + avl_size = vol.get_child_content('size-available') + if avl_size: + free_bytes = free_bytes + int(avl_size) + self.total_gb = total_bytes / units.GiB + self.free_gb = free_bytes / units.GiB + + def delete_volume(self, volume): + """Driver entry point for destroying existing volumes.""" + super(NetAppDirect7modeISCSIDriver, self).delete_volume(volume) + self.vol_refresh_voluntary = True diff --git a/cinder/volume/drivers/netapp/nfs.py b/cinder/volume/drivers/netapp/nfs.py new file mode 100755 index 0000000000..8ed66c5752 --- /dev/null +++ b/cinder/volume/drivers/netapp/nfs.py @@ -0,0 +1,1465 @@ +# Copyright (c) 2012 NetApp, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume driver for NetApp NFS storage. +""" + +import copy +import os +import re +import socket +from threading import Timer +import time +import urlparse +import uuid + +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import units +from cinder import utils +from cinder.volume.drivers.netapp.api import NaApiError +from cinder.volume.drivers.netapp.api import NaElement +from cinder.volume.drivers.netapp.api import NaServer +from cinder.volume.drivers.netapp.options import netapp_basicauth_opts +from cinder.volume.drivers.netapp.options import netapp_cluster_opts +from cinder.volume.drivers.netapp.options import netapp_connection_opts +from cinder.volume.drivers.netapp.options import netapp_img_cache_opts +from cinder.volume.drivers.netapp.options import netapp_nfs_extra_opts +from cinder.volume.drivers.netapp.options import netapp_transport_opts +from cinder.volume.drivers.netapp import ssc_utils +from cinder.volume.drivers.netapp import utils as na_utils +from cinder.volume.drivers.netapp.utils import get_volume_extra_specs +from cinder.volume.drivers.netapp.utils import provide_ems +from cinder.volume.drivers.netapp.utils import validate_instantiation +from cinder.volume.drivers import nfs + + +LOG = logging.getLogger(__name__) + + +class NetAppNFSDriver(nfs.NfsDriver): + """Base class for NetApp NFS driver. + Executes commands relating to Volumes. + """ + + VERSION = "1.0.0" + + def __init__(self, *args, **kwargs): + # NOTE(vish): db is set by Manager + validate_instantiation(**kwargs) + self._execute = None + self._context = None + super(NetAppNFSDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(netapp_connection_opts) + self.configuration.append_config_values(netapp_basicauth_opts) + self.configuration.append_config_values(netapp_transport_opts) + self.configuration.append_config_values(netapp_img_cache_opts) + + def set_execute(self, execute): + self._execute = execute + + def do_setup(self, context): + super(NetAppNFSDriver, self).do_setup(context) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + raise NotImplementedError() + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + vol_size = volume.size + snap_size = snapshot.volume_size + + self._clone_volume(snapshot.name, volume.name, snapshot.volume_id) + share = self._get_volume_location(snapshot.volume_id) + volume['provider_location'] = share + path = self.local_path(volume) + + if self._discover_file_till_timeout(path): + self._set_rw_permissions_for_all(path) + if vol_size != snap_size: + try: + self.extend_volume(volume, vol_size) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error( + _("Resizing %s failed. Cleaning volume."), + volume.name) + self._execute('rm', path, run_as_root=True) + else: + raise exception.CinderException( + _("NFS file %s not discovered.") % volume['name']) + + return {'provider_location': volume['provider_location']} + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + self._clone_volume(snapshot['volume_name'], + snapshot['name'], + snapshot['volume_id']) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + nfs_mount = self._get_provider_location(snapshot.volume_id) + + if self._volume_not_present(nfs_mount, snapshot.name): + return True + + self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name), + run_as_root=True) + + def _get_client(self): + """Creates client for server.""" + raise NotImplementedError() + + def _get_volume_location(self, volume_id): + """Returns NFS mount address as :.""" + nfs_server_ip = self._get_host_ip(volume_id) + export_path = self._get_export_path(volume_id) + return (nfs_server_ip + ':' + export_path) + + def _clone_volume(self, volume_name, clone_name, volume_id, share=None): + """Clones mounted volume using NetApp api.""" + raise NotImplementedError() + + def _get_provider_location(self, volume_id): + """Returns provider location for given volume.""" + volume = self.db.volume_get(self._context, volume_id) + return volume.provider_location + + def _get_host_ip(self, volume_id): + """Returns IP address for the given volume.""" + return self._get_provider_location(volume_id).split(':')[0] + + def _get_export_path(self, volume_id): + """Returns NFS export path for the given volume.""" + return self._get_provider_location(volume_id).split(':')[1] + + def _volume_not_present(self, nfs_mount, volume_name): + """Check if volume exists.""" + try: + self._try_execute('ls', self._get_volume_path(nfs_mount, + volume_name)) + except processutils.ProcessExecutionError: + # If the volume isn't present + return True + return False + + def _try_execute(self, *command, **kwargs): + # NOTE(vish): Volume commands can partially fail due to timing, but + # running them a second time on failure will usually + # recover nicely. + tries = 0 + while True: + try: + self._execute(*command, **kwargs) + return True + except processutils.ProcessExecutionError: + tries = tries + 1 + if tries >= self.configuration.num_shell_tries: + raise + LOG.exception(_("Recovering from a failed execute. " + "Try number %s"), tries) + time.sleep(tries ** 2) + + def _get_volume_path(self, nfs_share, volume_name): + """Get volume path (local fs path) for given volume name on given nfs + share. + + @param nfs_share string, example 172.18.194.100:/var/nfs + @param volume_name string, + example volume-91ee65ec-c473-4391-8c09-162b00c68a8c + """ + + return os.path.join(self._get_mount_point_for_share(nfs_share), + volume_name) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + vol_size = volume.size + src_vol_size = src_vref.size + self._clone_volume(src_vref.name, volume.name, src_vref.id) + share = self._get_volume_location(src_vref.id) + volume['provider_location'] = share + path = self.local_path(volume) + + if self._discover_file_till_timeout(path): + self._set_rw_permissions_for_all(path) + if vol_size != src_vol_size: + try: + self.extend_volume(volume, vol_size) + except Exception as e: + LOG.error( + _("Resizing %s failed. Cleaning volume."), volume.name) + self._execute('rm', path, run_as_root=True) + raise e + else: + raise exception.CinderException( + _("NFS file %s not discovered.") % volume['name']) + + return {'provider_location': volume['provider_location']} + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + super(NetAppNFSDriver, self)._update_volume_stats() + self._spawn_clean_cache_job() + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + super(NetAppNFSDriver, self).copy_image_to_volume( + context, volume, image_service, image_id) + LOG.info(_('Copied image to volume %s using regular download.'), + volume['name']) + self._register_image_in_cache(volume, image_id) + + def _register_image_in_cache(self, volume, image_id): + """Stores image in the cache.""" + file_name = 'img-cache-%s' % image_id + LOG.info(_("Registering image in cache %s"), file_name) + try: + self._do_clone_rel_img_cache( + volume['name'], file_name, + volume['provider_location'], file_name) + except Exception as e: + LOG.warn( + _('Exception while registering image %(image_id)s' + ' in cache. Exception: %(exc)s') + % {'image_id': image_id, 'exc': e.__str__()}) + + def _find_image_in_cache(self, image_id): + """Finds image in cache and returns list of shares with file name.""" + result = [] + if getattr(self, '_mounted_shares', None): + for share in self._mounted_shares: + dir = self._get_mount_point_for_share(share) + file_name = 'img-cache-%s' % image_id + file_path = '%s/%s' % (dir, file_name) + if os.path.exists(file_path): + LOG.debug(_('Found cache file for image %(image_id)s' + ' on share %(share)s') + % {'image_id': image_id, 'share': share}) + result.append((share, file_name)) + return result + + def _do_clone_rel_img_cache(self, src, dst, share, cache_file): + """Do clone operation w.r.t image cache file.""" + @utils.synchronized(cache_file, external=True) + def _do_clone(): + dir = self._get_mount_point_for_share(share) + file_path = '%s/%s' % (dir, dst) + if not os.path.exists(file_path): + LOG.info(_('Cloning img from cache for %s'), dst) + self._clone_volume(src, dst, volume_id=None, share=share) + _do_clone() + + @utils.synchronized('clean_cache') + def _spawn_clean_cache_job(self): + """Spawns a clean task if not running.""" + if getattr(self, 'cleaning', None): + LOG.debug(_('Image cache cleaning in progress. Returning... ')) + return + else: + #set cleaning to True + self.cleaning = True + t = Timer(0, self._clean_image_cache) + t.start() + + def _clean_image_cache(self): + """Clean the image cache files in cache of space crunch.""" + try: + LOG.debug(_('Image cache cleaning in progress.')) + thres_size_perc_start =\ + self.configuration.thres_avl_size_perc_start + thres_size_perc_stop =\ + self.configuration.thres_avl_size_perc_stop + for share in getattr(self, '_mounted_shares', []): + try: + total_size, total_avl, total_alc =\ + self._get_capacity_info(share) + avl_percent = int((total_avl / total_size) * 100) + if avl_percent <= thres_size_perc_start: + LOG.info(_('Cleaning cache for share %s.'), share) + eligible_files = self._find_old_cache_files(share) + threshold_size = int( + (thres_size_perc_stop * total_size) / 100) + bytes_to_free = int(threshold_size - total_avl) + LOG.debug(_('Files to be queued for deletion %s'), + eligible_files) + self._delete_files_till_bytes_free( + eligible_files, share, bytes_to_free) + else: + continue + except Exception as e: + LOG.warn(_( + 'Exception during cache cleaning' + ' %(share)s. Message - %(ex)s') + % {'share': share, 'ex': e.__str__()}) + continue + finally: + LOG.debug(_('Image cache cleaning done.')) + self.cleaning = False + + def _shortlist_del_eligible_files(self, share, old_files): + """Prepares list of eligible files to be deleted from cache.""" + raise NotImplementedError() + + def _find_old_cache_files(self, share): + """Finds the old files in cache.""" + mount_fs = self._get_mount_point_for_share(share) + threshold_minutes = self.configuration.expiry_thres_minutes + cmd = ['find', mount_fs, '-maxdepth', '1', '-name', + 'img-cache*', '-amin', '+%s' % (threshold_minutes)] + res, __ = self._execute(*cmd, run_as_root=True) + if res: + old_file_paths = res.strip('\n').split('\n') + mount_fs_len = len(mount_fs) + old_files = [x[mount_fs_len + 1:] for x in old_file_paths] + eligible_files = self._shortlist_del_eligible_files( + share, old_files) + return eligible_files + return [] + + def _delete_files_till_bytes_free(self, file_list, share, bytes_to_free=0): + """Delete files from disk till bytes are freed or list exhausted.""" + LOG.debug(_('Bytes to free %s'), bytes_to_free) + if file_list and bytes_to_free > 0: + sorted_files = sorted(file_list, key=lambda x: x[1], reverse=True) + mount_fs = self._get_mount_point_for_share(share) + for f in sorted_files: + if f: + file_path = '%s/%s' % (mount_fs, f[0]) + LOG.debug(_('Delete file path %s'), file_path) + + @utils.synchronized(f[0], external=True) + def _do_delete(): + if self._delete_file(file_path): + return True + return False + if _do_delete(): + bytes_to_free = bytes_to_free - int(f[1]) + if bytes_to_free <= 0: + return + + def _delete_file(self, path): + """Delete file from disk and return result as boolean.""" + try: + LOG.debug(_('Deleting file at path %s'), path) + cmd = ['rm', '-f', path] + self._execute(*cmd, run_as_root=True) + return True + except Exception as ex: + LOG.warning(_('Exception during deleting %s'), ex.__str__()) + return False + + def clone_image(self, volume, image_location, image_id, image_meta): + """Create a volume efficiently from an existing image. + + image_location is a string whose format depends on the + image service backend in use. The driver should use it + to determine whether cloning is possible. + + image_id is a string which represents id of the image. + It can be used by the driver to introspect internal + stores or registry to do an efficient image clone. + + Returns a dict of volume properties eg. provider_location, + boolean indicating whether cloning occurred. + """ + + cloned = False + post_clone = False + share = None + try: + cache_result = self._find_image_in_cache(image_id) + if cache_result: + cloned = self._clone_from_cache(volume, image_id, cache_result) + else: + cloned = self._direct_nfs_clone(volume, image_location, + image_id) + if cloned: + post_clone = self._post_clone_image(volume) + except Exception as e: + msg = e.msg if getattr(e, 'msg', None) else e.__str__() + LOG.info(_('Image cloning unsuccessful for image' + ' %(image_id)s. Message: %(msg)s') + % {'image_id': image_id, 'msg': msg}) + vol_path = self.local_path(volume) + volume['provider_location'] = None + if os.path.exists(vol_path): + self._delete_file(vol_path) + finally: + cloned = cloned and post_clone + share = volume['provider_location'] if cloned else None + bootable = True if cloned else False + return {'provider_location': share, 'bootable': bootable}, cloned + + def _clone_from_cache(self, volume, image_id, cache_result): + """Clones a copy from image cache.""" + cloned = False + LOG.info(_('Cloning image %s from cache'), image_id) + for res in cache_result: + # Repeat tries in other shares if failed in some + (share, file_name) = res + LOG.debug(_('Cache share: %s'), share) + if (share and + self._is_share_vol_compatible(volume, share)): + try: + self._do_clone_rel_img_cache( + file_name, volume['name'], share, file_name) + cloned = True + volume['provider_location'] = share + break + except Exception: + LOG.warn(_('Unexpected exception during' + ' image cloning in share %s'), share) + return cloned + + def _direct_nfs_clone(self, volume, image_location, image_id): + """Clone directly in nfs share.""" + LOG.info(_('Checking image clone %s from glance share.'), image_id) + cloned = False + image_location = self._construct_image_nfs_url(image_location) + share = self._is_cloneable_share(image_location) + if share and self._is_share_vol_compatible(volume, share): + LOG.debug(_('Share is cloneable %s'), share) + volume['provider_location'] = share + (__, ___, img_file) = image_location.rpartition('/') + dir_path = self._get_mount_point_for_share(share) + img_path = '%s/%s' % (dir_path, img_file) + img_info = image_utils.qemu_img_info(img_path) + if img_info.file_format == 'raw': + LOG.debug(_('Image is raw %s'), image_id) + self._clone_volume( + img_file, volume['name'], + volume_id=None, share=share) + cloned = True + else: + LOG.info( + _('Image will locally be converted to raw %s'), + image_id) + dst = '%s/%s' % (dir_path, volume['name']) + image_utils.convert_image(img_path, dst, 'raw') + data = image_utils.qemu_img_info(dst) + if data.file_format != "raw": + raise exception.InvalidResults( + _("Converted to raw, but" + " format is now %s") % data.file_format) + else: + cloned = True + self._register_image_in_cache( + volume, image_id) + return cloned + + def _post_clone_image(self, volume): + """Do operations post image cloning.""" + LOG.info(_('Performing post clone for %s'), volume['name']) + vol_path = self.local_path(volume) + if self._discover_file_till_timeout(vol_path): + self._set_rw_permissions_for_all(vol_path) + self._resize_image_file(vol_path, volume['size']) + return True + raise exception.InvalidResults( + _("NFS file could not be discovered.")) + + def _resize_image_file(self, path, new_size): + """Resize the image file on share to new size.""" + LOG.debug(_('Checking file for resize')) + if self._is_file_size_equal(path, new_size): + return + else: + LOG.info(_('Resizing file to %sG'), new_size) + image_utils.resize_image(path, new_size) + if self._is_file_size_equal(path, new_size): + return + else: + raise exception.InvalidResults( + _('Resizing image file failed.')) + + def _is_file_size_equal(self, path, size): + """Checks if file size at path is equal to size.""" + data = image_utils.qemu_img_info(path) + virt_size = data.virtual_size / units.GiB + if virt_size == size: + return True + else: + return False + + def _discover_file_till_timeout(self, path, timeout=45): + """Checks if file size at path is equal to size.""" + # Sometimes nfs takes time to discover file + # Retrying in case any unexpected situation occurs + retry_seconds = timeout + sleep_interval = 2 + while True: + if os.path.exists(path): + return True + else: + if retry_seconds <= 0: + LOG.warn(_('Discover file retries exhausted.')) + return False + else: + time.sleep(sleep_interval) + retry_seconds = retry_seconds - sleep_interval + + def _is_cloneable_share(self, image_location): + """Finds if the image at location is cloneable.""" + conn, dr = self._check_get_nfs_path_segs(image_location) + return self._check_share_in_use(conn, dr) + + def _check_get_nfs_path_segs(self, image_location): + """Checks if the nfs path format is matched. + + WebNFS url format with relative-path is supported. + Accepting all characters in path-names and checking + against the mounted shares which will contain only + allowed path segments. Returns connection and dir details. + """ + conn, dr = None, None + if image_location: + nfs_loc_pattern =\ + ('^nfs://(([\w\-\.]+:{1}[\d]+|[\w\-\.]+)(/[^\/].*)' + '*(/[^\/\\\\]+)$)') + matched = re.match(nfs_loc_pattern, image_location, flags=0) + if not matched: + LOG.debug(_('Image location not in the' + ' expected format %s'), image_location) + else: + conn = matched.group(2) + dr = matched.group(3) or '/' + return (conn, dr) + + def _share_match_for_ip(self, ip, shares): + """Returns the share that is served by ip. + + Multiple shares can have same dir path but + can be served using different ips. It finds the + share which is served by ip on same nfs server. + """ + raise NotImplementedError() + + def _check_share_in_use(self, conn, dir): + """Checks if share is cinder mounted and returns it.""" + try: + if conn: + host = conn.split(':')[0] + ip = self._resolve_hostname(host) + share_candidates = [] + for sh in self._mounted_shares: + sh_exp = sh.split(':')[1] + if sh_exp == dir: + share_candidates.append(sh) + if share_candidates: + LOG.debug(_('Found possible share matches %s'), + share_candidates) + return self._share_match_for_ip(ip, share_candidates) + except Exception: + LOG.warn(_("Unexpected exception while short listing used share.")) + return None + + def _construct_image_nfs_url(self, image_location): + """Construct direct url for nfs backend. + + It creates direct url from image_location + which is a tuple with direct_url and locations. + Returns url with nfs scheme if nfs store + else returns url. It needs to be verified + by backend before use. + """ + + direct_url, locations = image_location + if not direct_url and not locations: + raise exception.NotFound(_('Image location not present.')) + + # Locations will be always a list of one until + # bp multiple-image-locations is introduced + if not locations: + return direct_url + location = locations[0] + url = location['url'] + if not location['metadata']: + return url + location_type = location['metadata'].get('type') + if not location_type or location_type.lower() != "nfs": + return url + share_location = location['metadata'].get('share_location') + mount_point = location['metadata'].get('mount_point') + if not share_location or not mount_point: + return url + url_parse = urlparse.urlparse(url) + abs_path = os.path.join(url_parse.netloc, url_parse.path) + rel_path = os.path.relpath(abs_path, mount_point) + direct_url = "%s/%s" % (share_location, rel_path) + return direct_url + + def extend_volume(self, volume, new_size): + """Extend an existing volume to the new size.""" + LOG.info(_('Extending volume %s.'), volume['name']) + path = self.local_path(volume) + self._resize_image_file(path, new_size) + + def _is_share_vol_compatible(self, volume, share): + """Checks if share is compatible with volume to host it.""" + raise NotImplementedError() + + def _resolve_hostname(self, hostname): + """Resolves hostname to IP address.""" + res = socket.getaddrinfo(hostname, None)[0] + family, socktype, proto, canonname, sockaddr = res + return sockaddr[0] + + def _check_volume_can_hold_img(self, volume, img_info): + """Checks if volume can hold image with size.""" + virt_size = int(img_info.virtual_size / units.GiB) + vol_size = int(volume['size']) + if vol_size < virt_size: + msg = _("Volume size smaller than image size.") + raise exception.ImageCopyFailure(reason=msg) + + def _move_nfs_file(self, source_path, dest_path): + """Moves souce to destination.""" + @utils.synchronized(dest_path, external=True) + def _move_file(src, dst): + if os.path.exists(dst): + LOG.warn(_("Destination %s already exists."), dst) + return False + self._execute('mv', src, dst, run_as_root=True) + return True + + try: + moved = False + moved = _move_file(source_path, dest_path) + except Exception as e: + LOG.warn(_('Exception moving file %(src)s. Message - %(e)s') + % {'src': source_path, 'e': e}) + return moved + + +class NetAppDirectNfsDriver (NetAppNFSDriver): + """Executes commands related to volumes on NetApp filer.""" + + def __init__(self, *args, **kwargs): + super(NetAppDirectNfsDriver, self).__init__(*args, **kwargs) + + def do_setup(self, context): + super(NetAppDirectNfsDriver, self).do_setup(context) + self._context = context + self._client = self._get_client() + self._do_custom_setup(self._client) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_flags() + + def _check_flags(self): + """Raises error if any required configuration flag is missing.""" + required_flags = ['netapp_login', + 'netapp_password', + 'netapp_server_hostname', + 'netapp_server_port', + 'netapp_transport_type'] + for flag in required_flags: + if not getattr(self.configuration, flag, None): + raise exception.CinderException(_('%s is not set') % flag) + + def _get_client(self): + """Creates NetApp api client.""" + client = NaServer( + host=self.configuration.netapp_server_hostname, + server_type=NaServer.SERVER_TYPE_FILER, + transport_type=self.configuration.netapp_transport_type, + style=NaServer.STYLE_LOGIN_PASSWORD, + username=self.configuration.netapp_login, + password=self.configuration.netapp_password) + return client + + def _do_custom_setup(self, client): + """Do the customized set up on client if any for different types.""" + raise NotImplementedError() + + def _is_naelement(self, elem): + """Checks if element is NetApp element.""" + if not isinstance(elem, NaElement): + raise ValueError('Expects NaElement') + + def _get_ontapi_version(self): + """Gets the supported ontapi version.""" + ontapi_version = NaElement('system-get-ontapi-version') + res = self._client.invoke_successfully(ontapi_version, False) + major = res.get_child_content('major-version') + minor = res.get_child_content('minor-version') + return (major, minor) + + def _get_export_ip_path(self, volume_id=None, share=None): + """Returns export ip and path. + + One of volume id or share is used to return the values. + """ + + if volume_id: + host_ip = self._get_host_ip(volume_id) + export_path = self._get_export_path(volume_id) + elif share: + host_ip = share.split(':')[0] + export_path = share.split(':')[1] + else: + raise exception.InvalidInput('None of vol id or share specified.') + return (host_ip, export_path) + + def _create_file_usage_req(self, path): + """Creates the request element for file_usage_get.""" + file_use = NaElement.create_node_with_children( + 'file-usage-get', **{'path': path}) + return file_use + + +class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver): + """Executes commands related to volumes on c mode.""" + + def __init__(self, *args, **kwargs): + super(NetAppDirectCmodeNfsDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(netapp_cluster_opts) + self.configuration.append_config_values(netapp_nfs_extra_opts) + + def _do_custom_setup(self, client): + """Do the customized set up on client for cluster mode.""" + # Default values to run first api + client.set_api_version(1, 15) + (major, minor) = self._get_ontapi_version() + client.set_api_version(major, minor) + self.vserver = self.configuration.netapp_vserver + self.ssc_vols = None + self.stale_vols = set() + if self.vserver: + self.ssc_enabled = True + LOG.info(_("Shares on vserver %s will only" + " be used for provisioning.") % (self.vserver)) + else: + self.ssc_enabled = False + LOG.warn(_("No vserver set in config. SSC will be disabled.")) + + def check_for_setup_error(self): + """Check that the driver is working and can communicate.""" + super(NetAppDirectCmodeNfsDriver, self).check_for_setup_error() + if self.ssc_enabled: + ssc_utils.check_ssc_api_permissions(self._client) + + def _invoke_successfully(self, na_element, vserver=None): + """Invoke the api for successful result. + + If vserver is present then invokes vserver api + else Cluster api. + :param vserver: vserver name. + """ + + self._is_naelement(na_element) + server = copy.copy(self._client) + if vserver: + server.set_vserver(vserver) + else: + server.set_vserver(None) + result = server.invoke_successfully(na_element, True) + return result + + def create_volume(self, volume): + """Creates a volume. + + :param volume: volume reference + """ + self._ensure_shares_mounted() + extra_specs = get_volume_extra_specs(volume) + eligible = self._find_shares(volume['size'], extra_specs) + if not eligible: + raise exception.NfsNoSuitableShareFound( + volume_size=volume['size']) + for sh in eligible: + try: + volume['provider_location'] = sh + LOG.info(_('casted to %s') % volume['provider_location']) + self._do_create_volume(volume) + return {'provider_location': volume['provider_location']} + except Exception: + LOG.warn(_("Exception creating vol %(name)s" + " on share %(share)s") + % {'name': volume['name'], + 'share': volume['provider_location']}) + volume['provider_location'] = None + finally: + if self.ssc_enabled: + self._update_stale_vols(self._get_vol_for_share(sh)) + msg = _("Volume %s could not be created on shares.") + raise exception.VolumeBackendAPIException(data=msg % (volume['name'])) + + def _find_shares(self, size, extra_specs): + """Finds suitable shares for given params.""" + shares = [] + containers = [] + if self.ssc_enabled: + vols = ssc_utils.get_volumes_for_specs(self.ssc_vols, extra_specs) + containers = [x.export['path'] for x in vols] + else: + containers = self._mounted_shares + for sh in containers: + if self._is_share_eligible(sh, size): + size, avl, alloc = self._get_capacity_info(sh) + shares.append((sh, avl)) + shares = [a for a, b in sorted( + shares, key=lambda x: x[1], reverse=True)] + return shares + + def _clone_volume(self, volume_name, clone_name, + volume_id, share=None): + """Clones mounted volume on NetApp Cluster.""" + (vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share) + self._clone_file(exp_volume, volume_name, clone_name, vserver) + share = share if share else self._get_provider_location(volume_id) + self._post_prov_deprov_in_ssc(share) + + def _get_vserver_and_exp_vol(self, volume_id=None, share=None): + """Gets the vserver and export volume for share.""" + (host_ip, export_path) = self._get_export_ip_path(volume_id, share) + ifs = self._get_if_info_by_ip(host_ip) + vserver = ifs[0].get_child_content('vserver') + exp_volume = self._get_vol_by_junc_vserver(vserver, export_path) + return (vserver, exp_volume) + + def _get_if_info_by_ip(self, ip): + """Gets the network interface info by ip.""" + net_if_iter = NaElement('net-interface-get-iter') + net_if_iter.add_new_child('max-records', '10') + query = NaElement('query') + net_if_iter.add_child_elem(query) + query.add_node_with_children('net-interface-info', + **{'address': self._resolve_hostname(ip)}) + result = self._invoke_successfully(net_if_iter) + if result.get_child_content('num-records') and\ + int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + return attr_list.get_children() + raise exception.NotFound( + _('No interface found on cluster for ip %s') + % (ip)) + + def _get_vserver_ips(self, vserver): + """Get ips for the vserver.""" + result = na_utils.invoke_api( + self._client, api_name='net-interface-get-iter', + is_iter=True, tunnel=vserver) + if_list = [] + for res in result: + records = res.get_child_content('num-records') + if records > 0: + attr_list = res['attributes-list'] + ifs = attr_list.get_children() + if_list.extend(ifs) + return if_list + + def _get_vol_by_junc_vserver(self, vserver, junction): + """Gets the volume by junction path and vserver.""" + vol_iter = NaElement('volume-get-iter') + vol_iter.add_new_child('max-records', '10') + query = NaElement('query') + vol_iter.add_child_elem(query) + vol_attrs = NaElement('volume-attributes') + query.add_child_elem(vol_attrs) + vol_attrs.add_node_with_children( + 'volume-id-attributes', + **{'junction-path': junction, + 'owning-vserver-name': vserver}) + des_attrs = NaElement('desired-attributes') + des_attrs.add_node_with_children('volume-attributes', + **{'volume-id-attributes': None}) + vol_iter.add_child_elem(des_attrs) + result = self._invoke_successfully(vol_iter, vserver) + if result.get_child_content('num-records') and\ + int(result.get_child_content('num-records')) >= 1: + attr_list = result.get_child_by_name('attributes-list') + vols = attr_list.get_children() + vol_id = vols[0].get_child_by_name('volume-id-attributes') + return vol_id.get_child_content('name') + msg_fmt = {'vserver': vserver, 'junction': junction} + raise exception.NotFound(_("""No volume on cluster with vserver + %(vserver)s and junction path %(junction)s + """) % msg_fmt) + + def _clone_file(self, volume, src_path, dest_path, vserver=None): + """Clones file on vserver.""" + msg = _("""Cloning with params volume %(volume)s, src %(src_path)s, + dest %(dest_path)s, vserver %(vserver)s""") + msg_fmt = {'volume': volume, 'src_path': src_path, + 'dest_path': dest_path, 'vserver': vserver} + LOG.debug(msg % msg_fmt) + clone_create = NaElement.create_node_with_children( + 'clone-create', + **{'volume': volume, 'source-path': src_path, + 'destination-path': dest_path}) + self._invoke_successfully(clone_create, vserver) + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + super(NetAppDirectCmodeNfsDriver, self)._update_volume_stats() + netapp_backend = 'NetApp_NFS_cluster_direct' + backend_name = self.configuration.safe_get('volume_backend_name') + self._stats["volume_backend_name"] = (backend_name or + netapp_backend) + self._stats["vendor_name"] = 'NetApp' + self._stats["driver_version"] = '1.0' + self._update_cluster_vol_stats(self._stats) + provide_ems(self, self._client, self._stats, netapp_backend) + + def _update_cluster_vol_stats(self, data): + """Updates vol stats with cluster config.""" + if self.ssc_enabled: + sync = True if self.ssc_vols is None else False + ssc_utils.refresh_cluster_ssc(self, self._client, self.vserver, + synchronous=sync) + else: + LOG.warn(_("No vserver set in config. SSC will be disabled.")) + if self.ssc_vols: + data['netapp_mirrored'] = 'true'\ + if self.ssc_vols['mirrored'] else 'false' + data['netapp_unmirrored'] = 'true'\ + if len(self.ssc_vols['all']) >\ + len(self.ssc_vols['mirrored']) else 'false' + data['netapp_dedup'] = 'true'\ + if self.ssc_vols['dedup'] else 'false' + data['netapp_nodedupe'] = 'true'\ + if len(self.ssc_vols['all']) >\ + len(self.ssc_vols['dedup']) else 'false' + data['netapp_compression'] = 'true'\ + if self.ssc_vols['compression'] else 'false' + data['netapp_nocompression'] = 'true'\ + if len(self.ssc_vols['all']) >\ + len(self.ssc_vols['compression']) else 'false' + data['netapp_thin_provisioned'] = 'true'\ + if self.ssc_vols['thin'] else 'false' + data['netapp_thick_provisioned'] = 'true'\ + if len(self.ssc_vols['all']) >\ + len(self.ssc_vols['thin']) else 'false' + if self.ssc_vols['all']: + vol_max = max(self.ssc_vols['all']) + data['total_capacity_gb'] =\ + int(vol_max.space['size_total_bytes']) / units.GiB + data['free_capacity_gb'] =\ + int(vol_max.space['size_avl_bytes']) / units.GiB + else: + data['total_capacity_gb'] = 0 + data['free_capacity_gb'] = 0 + elif self.ssc_enabled: + LOG.warn(_("No cluster ssc stats found." + " Wait for next volume stats update.")) + + @utils.synchronized('update_stale') + def _update_stale_vols(self, volume=None, reset=False): + """Populates stale vols with vol and returns set copy.""" + if volume: + self.stale_vols.add(volume) + set_copy = self.stale_vols.copy() + if reset: + self.stale_vols.clear() + return set_copy + + @utils.synchronized("refresh_ssc_vols") + def refresh_ssc_vols(self, vols): + """Refreshes ssc_vols with latest entries.""" + if not self._mounted_shares: + LOG.warn(_("No shares found hence skipping ssc refresh.")) + return + mnt_share_vols = set() + vs_ifs = self._get_vserver_ips(self.vserver) + for vol in vols['all']: + for sh in self._mounted_shares: + host = sh.split(':')[0] + junction = sh.split(':')[1] + ip = self._resolve_hostname(host) + if (self._ip_in_ifs(ip, vs_ifs) and + junction == vol.id['junction_path']): + mnt_share_vols.add(vol) + vol.export['path'] = sh + break + for key in vols.keys(): + vols[key] = vols[key] & mnt_share_vols + self.ssc_vols = vols + + def _ip_in_ifs(self, ip, api_ifs): + """Checks if ip is listed for ifs in api format.""" + if api_ifs is None: + return False + for ifc in api_ifs: + ifc_ip = ifc.get_child_content("address") + if ifc_ip == ip: + return True + return False + + def _shortlist_del_eligible_files(self, share, old_files): + """Prepares list of eligible files to be deleted from cache.""" + file_list = [] + (vserver, exp_volume) = self._get_vserver_and_exp_vol( + volume_id=None, share=share) + for file in old_files: + path = '/vol/%s/%s' % (exp_volume, file) + u_bytes = self._get_cluster_file_usage(path, vserver) + file_list.append((file, u_bytes)) + LOG.debug(_('Shortlisted del elg files %s'), file_list) + return file_list + + def _get_cluster_file_usage(self, path, vserver): + """Gets the file unique bytes.""" + LOG.debug(_('Getting file usage for %s'), path) + file_use = NaElement.create_node_with_children( + 'file-usage-get', **{'path': path}) + res = self._invoke_successfully(file_use, vserver) + bytes = res.get_child_content('unique-bytes') + LOG.debug(_('file-usage for path %(path)s is %(bytes)s') + % {'path': path, 'bytes': bytes}) + return bytes + + def _share_match_for_ip(self, ip, shares): + """Returns the share that is served by ip. + + Multiple shares can have same dir path but + can be served using different ips. It finds the + share which is served by ip on same nfs server. + """ + ip_vserver = self._get_vserver_for_ip(ip) + if ip_vserver and shares: + for share in shares: + ip_sh = share.split(':')[0] + sh_vserver = self._get_vserver_for_ip(ip_sh) + if sh_vserver == ip_vserver: + LOG.debug(_('Share match found for ip %s'), ip) + return share + LOG.debug(_('No share match found for ip %s'), ip) + return None + + def _get_vserver_for_ip(self, ip): + """Get vserver for the mentioned ip.""" + try: + ifs = self._get_if_info_by_ip(ip) + vserver = ifs[0].get_child_content('vserver') + return vserver + except Exception: + return None + + def _get_vol_for_share(self, nfs_share): + """Gets the ssc vol with given share.""" + if self.ssc_vols: + for vol in self.ssc_vols['all']: + if vol.export['path'] == nfs_share: + return vol + return None + + def _is_share_vol_compatible(self, volume, share): + """Checks if share is compatible with volume to host it.""" + compatible = self._is_share_eligible(share, volume['size']) + if compatible and self.ssc_enabled: + matched = self._is_share_vol_type_match(volume, share) + compatible = compatible and matched + return compatible + + def _is_share_vol_type_match(self, volume, share): + """Checks if share matches volume type.""" + netapp_vol = self._get_vol_for_share(share) + LOG.debug(_("Found volume %(vol)s for share %(share)s.") + % {'vol': netapp_vol, 'share': share}) + extra_specs = get_volume_extra_specs(volume) + vols = ssc_utils.get_volumes_for_specs(self.ssc_vols, extra_specs) + return netapp_vol in vols + + def delete_volume(self, volume): + """Deletes a logical volume.""" + share = volume['provider_location'] + super(NetAppDirectCmodeNfsDriver, self).delete_volume(volume) + self._post_prov_deprov_in_ssc(share) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + share = self._get_provider_location(snapshot.volume_id) + super(NetAppDirectCmodeNfsDriver, self).delete_snapshot(snapshot) + self._post_prov_deprov_in_ssc(share) + + def _post_prov_deprov_in_ssc(self, share): + if self.ssc_enabled and share: + netapp_vol = self._get_vol_for_share(share) + if netapp_vol: + self._update_stale_vols(volume=netapp_vol) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + try: + self._try_copyoffload(context, volume, image_service, image_id) + LOG.info(_('Copied image %(img)s to volume %(vol)s using copy' + ' offload.') % {'img': image_id, 'vol': volume['id']}) + except Exception as e: + LOG.info(_('Copy offload unsuccessful. %s'), e) + super(NetAppDirectCmodeNfsDriver, self).copy_image_to_volume( + context, volume, image_service, image_id) + finally: + if self.ssc_enabled: + sh = self._get_provider_location(volume['id']) + self._update_stale_vols(self._get_vol_for_share(sh)) + + def _try_copyoffload(self, context, volume, image_service, image_id): + """Tries server side file copy offload.""" + major, minor = self._client.get_api_version() + col_path = self.configuration.netapp_copyoffload_tool_path + if (major == 1 and minor >= 20 and col_path): + cache_result = self._find_image_in_cache(image_id) + if cache_result: + copied = self._copy_from_cache(col_path, volume, image_id, + cache_result) + if not cache_result or not copied: + self._copy_from_img_service(col_path, context, volume, + image_service, image_id) + else: + msg = _("Copy offload either not configured or unsupported.") + raise exception.VolumeBackendAPIException(msg) + + def _get_ip_verify_on_cluster(self, host): + """Verifies if host on same cluster and returns ip.""" + ip = socket.gethostbyname(host) + vserver = self._get_vserver_for_ip(ip) + if not vserver: + raise exception.NotFound(_("No vserver owning the ip %s."), ip) + return ip + + def _copy_from_cache(self, copy_offload_path, volume, image_id, + cache_result): + """Try copying image file from cached file.""" + LOG.debug(_("Trying copy from cache using copy offload.")) + copied = False + for res in cache_result: + try: + (share, file_name) = res + LOG.debug(_("Found cache file on share %s."), share) + src_ip = self._get_ip_verify_on_cluster(share.split(':')[0]) + src_path = os.path.join(share.split(':')[1], file_name) + dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip( + volume['id'])) + dst_path = os.path.join(self._get_export_path(volume['id']), + volume['name']) + self._execute(copy_offload_path, src_ip, dst_ip, + src_path, dst_path, run_as_root=False, + check_exit_code=0) + self._set_rw_permissions_for_all(self.local_path(volume)) + self._resize_image_file(self.local_path(volume), + volume['size']) + if share != self._get_export_path(volume['id']): + self._register_image_in_cache(volume, image_id) + LOG.debug(_("Copied image from cache to volume %s using copy" + " offload."), volume['id']) + copied = True + break + except Exception as e: + LOG.error(_('Error copying from cache in copy offload. %s'), e) + copied = False + return copied + + def _copy_from_img_service(self, copy_offload_path, context, volume, + image_service, image_id): + """Copies from the image service using copy offload.""" + LOG.debug(_("Trying copy from image service using copy offload.")) + image_loc = image_service.get_location(context, image_id) + image_loc = self._construct_image_nfs_url(image_loc) + conn, dr = self._check_get_nfs_path_segs(image_loc) + if conn: + src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0]) + else: + raise exception.NotFound(_("Source host details not found.")) + (__, ___, img_file) = image_loc.rpartition('/') + src_path = os.path.join(dr, img_file) + dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip( + volume['id'])) + # tmp file is required to deal with img formats + tmp_img_file = str(uuid.uuid4()) + dst_img_serv_path = os.path.join(self._get_export_path(volume['id']), + tmp_img_file) + self._execute(copy_offload_path, src_ip, dst_ip, src_path, + dst_img_serv_path, run_as_root=False, check_exit_code=0) + LOG.debug(_('Copied image %(img)s to tmp file %(tmp)s.') + % {'img': image_id, 'tmp': tmp_img_file}) + dst_share = self._get_provider_location(volume['id']) + dst_dir = self._get_mount_point_for_share(dst_share) + dst_img_local = os.path.join(dst_dir, tmp_img_file) + dst_img_cache_local = os.path.join(dst_dir, + 'img-cache-%s' % (image_id)) + dst_vol_serv_path = os.path.join(self._get_export_path(volume['id']), + volume['name']) + img_info = image_utils.qemu_img_info(dst_img_local) + try: + self._check_volume_can_hold_img(volume, img_info) + if img_info.file_format == 'raw': + LOG.debug(_('Image is raw %s.'), image_id) + self._execute(copy_offload_path, dst_ip, dst_ip, + dst_img_serv_path, dst_vol_serv_path, + run_as_root=False, check_exit_code=0) + self._set_rw_permissions_for_all(self.local_path(volume)) + self._move_nfs_file(dst_img_local, dst_img_cache_local) + self._resize_image_file(self.local_path(volume), + volume['size']) + LOG.debug(_('Copied raw image %(img)s to volume %(vol)s.') + % {'img': image_id, 'vol': volume['id']}) + else: + LOG.debug(_('Image will be converted to raw %s.'), image_id) + img_conv = str(uuid.uuid4()) + dst_img_conv_local = os.path.join(dst_dir, img_conv) + try: + image_utils.convert_image(dst_img_local, + dst_img_conv_local, 'raw') + data = image_utils.qemu_img_info(dst_img_conv_local) + self._check_volume_can_hold_img(volume, data) + if data.file_format != "raw": + raise exception.InvalidResults( + _("Converted to raw, but format is now %s.") + % data.file_format) + else: + dst_conv_serv_path = os.path.join( + self._get_export_path(volume['id']), + img_conv) + self._execute(copy_offload_path, dst_ip, dst_ip, + dst_conv_serv_path, dst_vol_serv_path, + run_as_root=False, check_exit_code=0) + self._set_rw_permissions_for_all( + self.local_path(volume)) + self._move_nfs_file(dst_img_conv_local, + dst_img_cache_local) + self._resize_image_file(self.local_path(volume), + volume['size']) + LOG.debug(_('Copied locally converted raw image' + ' %(img)s to volume %(vol)s.') + % {'img': image_id, 'vol': volume['id']}) + finally: + if os.path.exists(dst_img_conv_local): + self._delete_file(dst_img_conv_local) + finally: + if os.path.exists(dst_img_local): + self._delete_file(dst_img_local) + + +class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver): + """Executes commands related to volumes on 7 mode.""" + + def __init__(self, *args, **kwargs): + super(NetAppDirect7modeNfsDriver, self).__init__(*args, **kwargs) + + def _do_custom_setup(self, client): + """Do the customized set up on client if any for 7 mode.""" + (major, minor) = self._get_ontapi_version() + client.set_api_version(major, minor) + + def check_for_setup_error(self): + """Checks if setup occurred properly.""" + api_version = self._client.get_api_version() + if api_version: + major, minor = api_version + if major == 1 and minor < 9: + msg = _("Unsupported ONTAP version." + " ONTAP version 7.3.1 and above is supported.") + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _("Api version could not be determined.") + raise exception.VolumeBackendAPIException(data=msg) + super(NetAppDirect7modeNfsDriver, self).check_for_setup_error() + + def _invoke_successfully(self, na_element, vfiler=None): + """Invoke the api for successful result. + + If vfiler is present then invokes vfiler api + else filer api. + :param vfiler: vfiler name. + """ + + self._is_naelement(na_element) + server = copy.copy(self._client) + if vfiler: + server.set_vfiler(vfiler) + else: + server.set_vfiler(None) + result = server.invoke_successfully(na_element, True) + return result + + def _clone_volume(self, volume_name, clone_name, + volume_id, share=None): + """Clones mounted volume with NetApp filer.""" + (host_ip, export_path) = self._get_export_ip_path(volume_id, share) + storage_path = self._get_actual_path_for_export(export_path) + target_path = '%s/%s' % (storage_path, clone_name) + (clone_id, vol_uuid) = self._start_clone('%s/%s' % (storage_path, + volume_name), + target_path) + if vol_uuid: + try: + self._wait_for_clone_finish(clone_id, vol_uuid) + except NaApiError as e: + if e.code != 'UnknownCloneId': + self._clear_clone(clone_id) + raise e + + def _get_actual_path_for_export(self, export_path): + """Gets the actual path on the filer for export path.""" + storage_path = NaElement.create_node_with_children( + 'nfs-exportfs-storage-path', **{'pathname': export_path}) + result = self._invoke_successfully(storage_path, None) + if result.get_child_content('actual-pathname'): + return result.get_child_content('actual-pathname') + raise exception.NotFound(_('No storage path found for export path %s') + % (export_path)) + + def _start_clone(self, src_path, dest_path): + """Starts the clone operation. + + :returns: clone-id + """ + + msg_fmt = {'src_path': src_path, 'dest_path': dest_path} + LOG.debug(_("""Cloning with src %(src_path)s, dest %(dest_path)s""") + % msg_fmt) + clone_start = NaElement.create_node_with_children( + 'clone-start', + **{'source-path': src_path, + 'destination-path': dest_path, + 'no-snap': 'true'}) + result = self._invoke_successfully(clone_start, None) + clone_id_el = result.get_child_by_name('clone-id') + cl_id_info = clone_id_el.get_child_by_name('clone-id-info') + vol_uuid = cl_id_info.get_child_content('volume-uuid') + clone_id = cl_id_info.get_child_content('clone-op-id') + return (clone_id, vol_uuid) + + def _wait_for_clone_finish(self, clone_op_id, vol_uuid): + """Waits till a clone operation is complete or errored out.""" + clone_ls_st = NaElement('clone-list-status') + clone_id = NaElement('clone-id') + clone_ls_st.add_child_elem(clone_id) + clone_id.add_node_with_children('clone-id-info', + **{'clone-op-id': clone_op_id, + 'volume-uuid': vol_uuid}) + task_running = True + while task_running: + result = self._invoke_successfully(clone_ls_st, None) + status = result.get_child_by_name('status') + ops_info = status.get_children() + if ops_info: + state = ops_info[0].get_child_content('clone-state') + if state == 'completed': + task_running = False + elif state == 'failed': + code = ops_info[0].get_child_content('error') + reason = ops_info[0].get_child_content('reason') + raise NaApiError(code, reason) + else: + time.sleep(1) + else: + raise NaApiError( + 'UnknownCloneId', + 'No clone operation for clone id %s found on the filer' + % (clone_id)) + + def _clear_clone(self, clone_id): + """Clear the clone information. + + Invoke this in case of failed clone. + """ + + clone_clear = NaElement.create_node_with_children( + 'clone-clear', + **{'clone-id': clone_id}) + retry = 3 + while retry: + try: + self._invoke_successfully(clone_clear, None) + break + except Exception as e: + # Filer might be rebooting + time.sleep(5) + retry = retry - 1 + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + super(NetAppDirect7modeNfsDriver, self)._update_volume_stats() + netapp_backend = 'NetApp_NFS_7mode_direct' + backend_name = self.configuration.safe_get('volume_backend_name') + self._stats["volume_backend_name"] = (backend_name or + 'NetApp_NFS_7mode_direct') + self._stats["vendor_name"] = 'NetApp' + self._stats["driver_version"] = self.VERSION + provide_ems(self, self._client, self._stats, netapp_backend, + server_type="7mode") + + def _shortlist_del_eligible_files(self, share, old_files): + """Prepares list of eligible files to be deleted from cache.""" + file_list = [] + exp_volume = self._get_actual_path_for_export(share) + for file in old_files: + path = '/vol/%s/%s' % (exp_volume, file) + u_bytes = self._get_filer_file_usage(path) + file_list.append((file, u_bytes)) + LOG.debug(_('Shortlisted del elg files %s'), file_list) + return file_list + + def _get_filer_file_usage(self, path): + """Gets the file unique bytes.""" + LOG.debug(_('Getting file usage for %s'), path) + file_use = NaElement.create_node_with_children( + 'file-usage-get', **{'path': path}) + res = self._invoke_successfully(file_use) + bytes = res.get_child_content('unique-bytes') + LOG.debug(_('file-usage for path %(path)s is %(bytes)s') + % {'path': path, 'bytes': bytes}) + return bytes + + def _is_filer_ip(self, ip): + """Checks whether ip is on the same filer.""" + try: + ifconfig = NaElement('net-ifconfig-get') + res = self._invoke_successfully(ifconfig, None) + if_info = res.get_child_by_name('interface-config-info') + if if_info: + ifs = if_info.get_children() + for intf in ifs: + v4_addr = intf.get_child_by_name('v4-primary-address') + if v4_addr: + ip_info = v4_addr.get_child_by_name('ip-address-info') + if ip_info: + address = ip_info.get_child_content('address') + if ip == address: + return True + else: + continue + except Exception: + return False + return False + + def _share_match_for_ip(self, ip, shares): + """Returns the share that is served by ip. + + Multiple shares can have same dir path but + can be served using different ips. It finds the + share which is served by ip on same nfs server. + """ + if self._is_filer_ip(ip) and shares: + for share in shares: + ip_sh = share.split(':')[0] + if self._is_filer_ip(ip_sh): + LOG.debug(_('Share match found for ip %s'), ip) + return share + LOG.debug(_('No share match found for ip %s'), ip) + return None + + def _is_share_vol_compatible(self, volume, share): + """Checks if share is compatible with volume to host it.""" + return self._is_share_eligible(share, volume['size']) diff --git a/cinder/volume/drivers/netapp/options.py b/cinder/volume/drivers/netapp/options.py new file mode 100755 index 0000000000..9a39db1b59 --- /dev/null +++ b/cinder/volume/drivers/netapp/options.py @@ -0,0 +1,151 @@ +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Contains configuration options for NetApp drivers. + +Common place to hold configuration options for all NetApp drivers. +Options need to be grouped into granular units to be able to be reused +by different modules and classes. This does not restrict declaring options in +individual modules. If options are not re usable then can be declared in +individual modules. It is recommended to Keep options at a single +place to ensure re usability and better management of configuration options. +""" + +from oslo.config import cfg + +netapp_proxy_opts = [ + cfg.StrOpt('netapp_storage_family', + default='ontap_cluster', + help=('The storage family type used on the storage system; ' + 'valid values are ontap_7mode for using Data ONTAP ' + 'operating in 7-Mode or ontap_cluster for using ' + 'clustered Data ONTAP.')), + cfg.StrOpt('netapp_storage_protocol', + default=None, + help=('The storage protocol to be used on the data path with ' + 'the storage system; valid values are iscsi or nfs.')), ] + +netapp_connection_opts = [ + cfg.StrOpt('netapp_server_hostname', + default=None, + help='The hostname (or IP address) for the storage system.'), + cfg.IntOpt('netapp_server_port', + default=80, + help=('The TCP port to use for communication with ONTAPI on ' + 'the storage system. Traditionally, port 80 is used for ' + 'HTTP and port 443 is used for HTTPS; however, this ' + 'value should be changed if an alternate port has been ' + 'configured on the storage system.')), ] + +netapp_transport_opts = [ + cfg.StrOpt('netapp_transport_type', + default='http', + help=('The transport protocol used when communicating with ' + 'ONTAPI on the storage system. Valid values are http ' + 'or https.')), ] + +netapp_basicauth_opts = [ + cfg.StrOpt('netapp_login', + default=None, + help=('Administrative user account name used to access the ' + 'storage system.')), + cfg.StrOpt('netapp_password', + default=None, + help=('Password for the administrative user account ' + 'specified in the netapp_login option.'), + secret=True), ] + +netapp_provisioning_opts = [ + cfg.FloatOpt('netapp_size_multiplier', + default=1.2, + help=('The quantity to be multiplied by the requested ' + 'volume size to ensure enough space is available on ' + 'the virtual storage server (Vserver) to fulfill ' + 'the volume creation request.')), + cfg.StrOpt('netapp_volume_list', + default=None, + help=('This option is only utilized when the storage protocol ' + 'is configured to use iSCSI. This option is used to ' + 'restrict provisioning to the specified controller ' + 'volumes. Specify the value of this option to be a ' + 'comma separated list of NetApp controller volume names ' + 'to be used for provisioning.')), ] + +netapp_cluster_opts = [ + cfg.StrOpt('netapp_vserver', + default=None, + help=('This option specifies the virtual storage server ' + '(Vserver) name on the storage cluster on which ' + 'provisioning of block storage volumes should occur. If ' + 'using the NFS storage protocol, this parameter is ' + 'mandatory for storage service catalog support (utilized' + ' by Cinder volume type extra_specs support). If this ' + 'option is specified, the exports belonging to the ' + 'Vserver will only be used for provisioning in the ' + 'future. Block storage volumes on exports not belonging ' + 'to the Vserver specified by this option will continue ' + 'to function normally.')), ] + +netapp_7mode_opts = [ + cfg.StrOpt('netapp_vfiler', + default=None, + help=('The vFiler unit on which provisioning of block storage ' + 'volumes will be done. This option is only used by the ' + 'driver when connecting to an instance with a storage ' + 'family of Data ONTAP operating in 7-Mode and the ' + 'storage protocol selected is iSCSI. Only use this ' + 'option when utilizing the MultiStore feature on the ' + 'NetApp storage system.')), ] + +netapp_img_cache_opts = [ + cfg.IntOpt('thres_avl_size_perc_start', + default=20, + help=('If the percentage of available space for an NFS share ' + 'has dropped below the value specified by this option, ' + 'the NFS image cache will be cleaned.')), + cfg.IntOpt('thres_avl_size_perc_stop', + default=60, + help=('When the percentage of available space on an NFS share ' + 'has reached the percentage specified by this option, ' + 'the driver will stop clearing files from the NFS image ' + 'cache that have not been accessed in the last M ' + 'minutes, where M is the value of the ' + 'expiry_thres_minutes configuration option.')), + cfg.IntOpt('expiry_thres_minutes', + default=720, + help=('This option specifies the threshold for last access ' + 'time for images in the NFS image cache. When a cache ' + 'cleaning cycle begins, images in the cache that have ' + 'not been accessed in the last M minutes, where M is ' + 'the value of this parameter, will be deleted from the ' + 'cache to create free space on the NFS share.')), ] + +netapp_nfs_extra_opts = [ + cfg.StrOpt('netapp_copyoffload_tool_path', + default=None, + help=('This option specifies the path of the copy offload' + ' tool binary.')), ] + +CONF = cfg.CONF +CONF.register_opts(netapp_proxy_opts) +CONF.register_opts(netapp_connection_opts) +CONF.register_opts(netapp_transport_opts) +CONF.register_opts(netapp_basicauth_opts) +CONF.register_opts(netapp_cluster_opts) +CONF.register_opts(netapp_7mode_opts) +CONF.register_opts(netapp_provisioning_opts) +CONF.register_opts(netapp_img_cache_opts) +CONF.register_opts(netapp_nfs_extra_opts) diff --git a/cinder/volume/drivers/netapp/ssc_utils.py b/cinder/volume/drivers/netapp/ssc_utils.py new file mode 100644 index 0000000000..76385dd9ac --- /dev/null +++ b/cinder/volume/drivers/netapp/ssc_utils.py @@ -0,0 +1,626 @@ +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Storage service catalog utility functions and classes for NetApp systems. +""" + +import copy +from threading import Timer + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import utils +from cinder.volume import driver +from cinder.volume.drivers.netapp import api +from cinder.volume.drivers.netapp import utils as na_utils + + +LOG = logging.getLogger(__name__) + + +class NetAppVolume(object): + """Represents a NetApp volume. + + Present attributes + id - name, vserver, junction_path, type + aggr - name, raid_type, ha_policy, disk_type + sis - dedup, compression + state - status, vserver_root, cluster_volume, + inconsistent, invalid, junction_active + qos - qos_policy_group + space - space-guarantee-enabled, space-guarantee, + thin_provisioned, size_avl_bytes, size_total_bytes + mirror - mirrored i.e. dp mirror + export - path + """ + def __init__(self, name, vserver=None): + self.id = {} + self.aggr = {} + self.sis = {} + self.state = {} + self.qos = {} + self.space = {} + self.mirror = {} + self.export = {} + self.id['name'] = name + self.id['vserver'] = vserver + + def __eq__(self, other): + """Checks for equality.""" + if (self.id['name'] == other.id['name'] and + self.id['vserver'] == other.id['vserver']): + return True + + def __hash__(self): + """Computes hash for the object.""" + return hash(self.id['name']) + + def __cmp__(self, other): + """Implements comparison logic for volumes.""" + self_size_avl = self.space.get('size_avl_bytes') + other_size_avl = other.space.get('size_avl_bytes') + if self_size_avl is None and other_size_avl is not None: + return -1 + elif self_size_avl is not None and other_size_avl is None: + return 1 + elif self_size_avl is None and other_size_avl is None: + return 0 + elif int(self_size_avl) < int(other_size_avl): + return -1 + elif int(self_size_avl) > int(other_size_avl): + return 1 + else: + return 0 + + def __str__(self): + """Returns human readable form for object.""" + vol_str = "NetApp Volume id: %s, aggr: %s,"\ + " space: %s, sis: %s, state: %s, qos: %s"\ + % (self.id, self.aggr, self.space, self.sis, self.state, self.qos) + return vol_str + + +def get_cluster_vols_with_ssc(na_server, vserver, volume=None): + """Gets ssc vols for cluster vserver.""" + volumes = query_cluster_vols_for_ssc(na_server, vserver, volume) + sis_vols = get_sis_vol_dict(na_server, vserver, volume) + mirrored_vols = get_snapmirror_vol_dict(na_server, vserver, volume) + aggrs = {} + for vol in volumes: + aggr_name = vol.aggr['name'] + if aggr_name: + if aggr_name in aggrs: + aggr_attrs = aggrs[aggr_name] + else: + aggr_attrs = query_aggr_options(na_server, aggr_name) + if aggr_attrs: + eff_disk_type = query_aggr_storage_disk(na_server, + aggr_name) + aggr_attrs['disk_type'] = eff_disk_type + aggrs[aggr_name] = aggr_attrs + vol.aggr['raid_type'] = aggr_attrs.get('raid_type') + vol.aggr['ha_policy'] = aggr_attrs.get('ha_policy') + vol.aggr['disk_type'] = aggr_attrs.get('disk_type') + if sis_vols: + if vol.id['name'] in sis_vols: + vol.sis['dedup'] = sis_vols[vol.id['name']]['dedup'] + vol.sis['compression'] =\ + sis_vols[vol.id['name']]['compression'] + else: + vol.sis['dedup'] = False + vol.sis['compression'] = False + if (vol.space['space-guarantee-enabled'] and + (vol.space['space-guarantee'] == 'file' or + vol.space['space-guarantee'] == 'volume')): + vol.space['thin_provisioned'] = False + else: + vol.space['thin_provisioned'] = True + if mirrored_vols: + vol.mirror['mirrored'] = False + if vol.id['name'] in mirrored_vols: + for mirr_attrs in mirrored_vols[vol.id['name']]: + if (mirr_attrs['rel_type'] == 'data_protection' and + mirr_attrs['mirr_state'] == 'snapmirrored'): + vol.mirror['mirrored'] = True + break + return volumes + + +def query_cluster_vols_for_ssc(na_server, vserver, volume=None): + """Queries cluster volumes for ssc.""" + query = {'volume-attributes': None} + volume_id = {'volume-id-attributes': {'owning-vserver-name': vserver}} + if volume: + volume_id['volume-id-attributes']['name'] = volume + query['volume-attributes'] = volume_id + des_attr = {'volume-attributes': + ['volume-id-attributes', + 'volume-space-attributes', + 'volume-state-attributes', + 'volume-qos-attributes']} + result = na_utils.invoke_api(na_server, api_name='volume-get-iter', + api_family='cm', query=query, + des_result=des_attr, + additional_elems=None, + is_iter=True) + vols = set() + for res in result: + records = res.get_child_content('num-records') + if records > 0: + attr_list = res.get_child_by_name('attributes-list') + if attr_list: + vol_attrs = attr_list.get_children() + vols_found = create_vol_list(vol_attrs) + vols.update(vols_found) + return vols + + +def create_vol_list(vol_attrs): + """Creates vol list with features from attr list.""" + vols = set() + for v in vol_attrs: + try: + # name and vserver are mandatory + # Absence will skip by giving KeyError. + name = v['volume-id-attributes']['name'] + vserver = v['volume-id-attributes']['owning-vserver-name'] + vol = NetAppVolume(name, vserver) + vol.id['type'] =\ + v['volume-id-attributes'].get_child_content('type') + if vol.id['type'] == "tmp": + continue + vol.id['junction_path'] =\ + v['volume-id-attributes'].get_child_content('junction-path') + # state attributes mandatory. + vol.state['vserver_root'] =\ + na_utils.to_bool( + v['volume-state-attributes'].get_child_content( + 'is-vserver-root')) + if vol.state['vserver_root']: + continue + vol.state['status'] =\ + v['volume-state-attributes'].get_child_content('state') + vol.state['inconsistent'] =\ + na_utils.to_bool( + v['volume-state-attributes'].get_child_content( + 'is-inconsistent')) + vol.state['invalid'] =\ + na_utils.to_bool( + v['volume-state-attributes'].get_child_content( + 'is-invalid')) + vol.state['junction_active'] =\ + na_utils.to_bool( + v['volume-state-attributes'].get_child_content( + 'is-junction-active')) + vol.state['cluster_volume'] =\ + na_utils.to_bool( + v['volume-state-attributes'].get_child_content( + 'is-cluster-volume')) + if (vol.state['status'] != 'online' or + vol.state['inconsistent'] or vol.state['invalid']): + # offline, invalid and inconsistent volumes are not usable + continue + # aggr attributes mandatory. + vol.aggr['name'] =\ + v['volume-id-attributes']['containing-aggregate-name'] + # space attributes mandatory. + vol.space['size_avl_bytes'] =\ + v['volume-space-attributes']['size-available'] + vol.space['size_total_bytes'] =\ + v['volume-space-attributes']['size-total'] + vol.space['space-guarantee-enabled'] =\ + na_utils.to_bool( + v['volume-space-attributes'].get_child_content( + 'is-space-guarantee-enabled')) + vol.space['space-guarantee'] =\ + v['volume-space-attributes'].get_child_content( + 'space-guarantee') + # qos attributes optional. + if v.get_child_by_name('volume-qos-attributes'): + vol.qos['qos_policy_group'] =\ + v['volume-qos-attributes'].get_child_content( + 'policy-group-name') + else: + vol.qos['qos_policy_group'] = None + vols.add(vol) + except KeyError as e: + LOG.debug(_('Unexpected error while creating' + ' ssc vol list. Message - %s') % (e.message)) + continue + return vols + + +def query_aggr_options(na_server, aggr_name): + """Queries cluster aggr for attributes. + + Currently queries for raid and ha-policy. + """ + + add_elems = {'aggregate': aggr_name} + attrs = {} + try: + result = na_utils.invoke_api(na_server, + api_name='aggr-options-list-info', + api_family='cm', query=None, + des_result=None, + additional_elems=add_elems, + is_iter=False) + for res in result: + options = res.get_child_by_name('options') + if options: + op_list = options.get_children() + for op in op_list: + if op.get_child_content('name') == 'ha_policy': + attrs['ha_policy'] = op.get_child_content('value') + if op.get_child_content('name') == 'raidtype': + attrs['raid_type'] = op.get_child_content('value') + except Exception as e: + LOG.debug(_("Exception querying aggr options. %s"), e) + return attrs + + +def get_sis_vol_dict(na_server, vserver, volume=None): + """Queries sis for volumes. + + If volume is present sis is queried for it. + Records dedup and compression enabled. + """ + + sis_vols = {} + query_attr = {'vserver': vserver} + if volume: + vol_path = '/vol/%s' % (volume) + query_attr['path'] = vol_path + query = {'sis-status-info': query_attr} + try: + result = na_utils.invoke_api(na_server, + api_name='sis-get-iter', + api_family='cm', + query=query, + is_iter=True) + for res in result: + attr_list = res.get_child_by_name('attributes-list') + if attr_list: + sis_status = attr_list.get_children() + for sis in sis_status: + path = sis.get_child_content('path') + if not path: + continue + (___, __, vol) = path.rpartition('/') + if not vol: + continue + v_sis = {} + v_sis['compression'] = na_utils.to_bool( + sis.get_child_content('is-compression-enabled')) + v_sis['dedup'] = na_utils.to_bool( + sis.get_child_content('state')) + sis_vols[vol] = v_sis + except Exception as e: + LOG.debug(_("Exception querying sis information. %s"), e) + return sis_vols + + +def get_snapmirror_vol_dict(na_server, vserver, volume=None): + """Queries snapmirror volumes.""" + mirrored_vols = {} + query_attr = {'source-vserver': vserver} + if volume: + query_attr['source-volume'] = volume + query = {'snapmirror-info': query_attr} + try: + result = na_utils.invoke_api(na_server, + api_name='snapmirror-get-iter', + api_family='cm', query=query, + is_iter=True) + for res in result: + attr_list = res.get_child_by_name('attributes-list') + if attr_list: + snap_info = attr_list.get_children() + for snap in snap_info: + src_volume = snap.get_child_content('source-volume') + v_snap = {} + v_snap['dest_loc'] =\ + snap.get_child_content('destination-location') + v_snap['rel_type'] =\ + snap.get_child_content('relationship-type') + v_snap['mirr_state'] =\ + snap.get_child_content('mirror-state') + if mirrored_vols.get(src_volume): + mirrored_vols.get(src_volume).append(v_snap) + else: + mirrored_vols[src_volume] = [v_snap] + except Exception as e: + LOG.debug(_("Exception querying mirror information. %s"), e) + return mirrored_vols + + +def query_aggr_storage_disk(na_server, aggr): + """Queries for storage disks associated to an aggregate.""" + query = {'storage-disk-info': {'disk-raid-info': + {'disk-aggregate-info': + {'aggregate-name': aggr}}}} + des_attr = {'storage-disk-info': + {'disk-raid-info': ['effective-disk-type']}} + try: + result = na_utils.invoke_api(na_server, + api_name='storage-disk-get-iter', + api_family='cm', query=query, + des_result=des_attr, + additional_elems=None, + is_iter=True) + for res in result: + attr_list = res.get_child_by_name('attributes-list') + if attr_list: + storage_disks = attr_list.get_children() + for disk in storage_disks: + raid_info = disk.get_child_by_name('disk-raid-info') + if raid_info: + eff_disk_type =\ + raid_info.get_child_content('effective-disk-type') + if eff_disk_type: + return eff_disk_type + else: + continue + except Exception as e: + LOG.debug(_("Exception querying storage disk. %s"), e) + return 'unknown' + + +def get_cluster_ssc(na_server, vserver): + """Provides cluster volumes with ssc.""" + netapp_volumes = get_cluster_vols_with_ssc(na_server, vserver) + mirror_vols = set() + dedup_vols = set() + compress_vols = set() + thin_prov_vols = set() + ssc_map = {'mirrored': mirror_vols, 'dedup': dedup_vols, + 'compression': compress_vols, + 'thin': thin_prov_vols, 'all': netapp_volumes} + for vol in netapp_volumes: + if vol.sis.get('dedup'): + dedup_vols.add(vol) + if vol.sis.get('compression'): + compress_vols.add(vol) + if vol.mirror.get('mirrored'): + mirror_vols.add(vol) + if vol.space.get('thin_provisioned'): + thin_prov_vols.add(vol) + return ssc_map + + +def refresh_cluster_stale_ssc(*args, **kwargs): + """Refreshes stale ssc volumes with latest.""" + backend = args[0] + na_server = args[1] + vserver = args[2] + identity = str(id(backend)) + lock_pr = '%s_%s' % ('refresh_ssc', identity) + try: + job_set = na_utils.set_safe_attr( + backend, 'refresh_stale_running', True) + if not job_set: + return + + @utils.synchronized(lock_pr) + def refresh_stale_ssc(): + stale_vols = backend._update_stale_vols(reset=True) + LOG.info(_('Running stale ssc refresh job for %(server)s' + ' and vserver %(vs)s') + % {'server': na_server, 'vs': vserver}) + # refreshing single volumes can create inconsistency + # hence doing manipulations on copy + ssc_vols_copy = copy.deepcopy(backend.ssc_vols) + refresh_vols = set() + expired_vols = set() + for vol in stale_vols: + name = vol.id['name'] + res = get_cluster_vols_with_ssc(na_server, vserver, name) + if res: + refresh_vols.add(res.pop()) + else: + expired_vols.add(vol) + for vol in refresh_vols: + for k in ssc_vols_copy: + vol_set = ssc_vols_copy[k] + vol_set.discard(vol) + if k == "mirrored" and vol.mirror.get('mirrored'): + vol_set.add(vol) + if k == "dedup" and vol.sis.get('dedup'): + vol_set.add(vol) + if k == "compression" and vol.sis.get('compression'): + vol_set.add(vol) + if k == "thin" and vol.space.get('thin_provisioned'): + vol_set.add(vol) + if k == "all": + vol_set.add(vol) + for vol in expired_vols: + for k in ssc_vols_copy: + vol_set = ssc_vols_copy[k] + vol_set.discard(vol) + backend.refresh_ssc_vols(ssc_vols_copy) + LOG.info(_('Successfully completed stale refresh job for' + ' %(server)s and vserver %(vs)s') + % {'server': na_server, 'vs': vserver}) + + refresh_stale_ssc() + finally: + na_utils.set_safe_attr(backend, 'refresh_stale_running', False) + + +def get_cluster_latest_ssc(*args, **kwargs): + """Updates volumes including ssc.""" + backend = args[0] + na_server = args[1] + vserver = args[2] + identity = str(id(backend)) + lock_pr = '%s_%s' % ('refresh_ssc', identity) + + # As this depends on stale job running state + # set flag as soon as job starts to avoid + # job accumulation. + try: + job_set = na_utils.set_safe_attr(backend, 'ssc_job_running', True) + if not job_set: + return + + @utils.synchronized(lock_pr) + def get_latest_ssc(): + LOG.info(_('Running cluster latest ssc job for %(server)s' + ' and vserver %(vs)s') + % {'server': na_server, 'vs': vserver}) + ssc_vols = get_cluster_ssc(na_server, vserver) + backend.refresh_ssc_vols(ssc_vols) + backend.ssc_run_time = timeutils.utcnow() + LOG.info(_('Successfully completed ssc job for %(server)s' + ' and vserver %(vs)s') + % {'server': na_server, 'vs': vserver}) + + get_latest_ssc() + finally: + na_utils.set_safe_attr(backend, 'ssc_job_running', False) + + +def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False): + """Refresh cluster ssc for backend.""" + if not isinstance(backend, driver.VolumeDriver): + raise exception.InvalidInput(reason=_("Backend not a VolumeDriver.")) + if not isinstance(na_server, api.NaServer): + raise exception.InvalidInput(reason=_("Backend server not NaServer.")) + delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800) + if getattr(backend, 'ssc_job_running', None): + LOG.warn(_('ssc job in progress. Returning... ')) + return + elif (getattr(backend, 'ssc_run_time', None) is None or + (backend.ssc_run_time and + timeutils.is_newer_than(backend.ssc_run_time, delta_secs))): + if synchronous: + get_cluster_latest_ssc(backend, na_server, vserver) + else: + t = Timer(0, get_cluster_latest_ssc, + args=[backend, na_server, vserver]) + t.start() + elif getattr(backend, 'refresh_stale_running', None): + LOG.warn(_('refresh stale ssc job in progress. Returning... ')) + return + else: + if backend.stale_vols: + if synchronous: + refresh_cluster_stale_ssc(backend, na_server, vserver) + else: + t = Timer(0, refresh_cluster_stale_ssc, + args=[backend, na_server, vserver]) + t.start() + + +def get_volumes_for_specs(ssc_vols, specs): + """Shortlists volumes for extra specs provided.""" + if specs is None or not isinstance(specs, dict): + return ssc_vols['all'] + result = copy.deepcopy(ssc_vols['all']) + raid_type = specs.get('netapp:raid_type') + disk_type = specs.get('netapp:disk_type') + qos_policy_group = specs.get('netapp:qos_policy_group') + bool_specs_list = ['netapp_mirrored', 'netapp_unmirrored', + 'netapp_dedup', 'netapp_nodedup', + 'netapp_compression', 'netapp_nocompression', + 'netapp_thin_provisioned', 'netapp_thick_provisioned'] + b_specs = {} + for spec in bool_specs_list: + b_specs[spec] = na_utils.to_bool(specs.get(spec))\ + if specs.get(spec) else None + + def _spec_ineffect(b_specs, spec, opp_spec): + """If the spec with opposite spec is ineffective.""" + if ((b_specs[spec] is None and b_specs[opp_spec] is None) + or (b_specs[spec] == b_specs[opp_spec])): + return True + else: + return False + + if _spec_ineffect(b_specs, 'netapp_mirrored', 'netapp_unmirrored'): + pass + else: + if b_specs['netapp_mirrored'] or b_specs['netapp_unmirrored'] is False: + result = result & ssc_vols['mirrored'] + else: + result = result - ssc_vols['mirrored'] + if _spec_ineffect(b_specs, 'netapp_dedup', 'netapp_nodedup'): + pass + else: + if b_specs['netapp_dedup'] or b_specs['netapp_nodedup'] is False: + result = result & ssc_vols['dedup'] + else: + result = result - ssc_vols['dedup'] + if _spec_ineffect(b_specs, 'netapp_compression', 'netapp_nocompression'): + pass + else: + if (b_specs['netapp_compression'] or + b_specs['netapp_nocompression'] is False): + result = result & ssc_vols['compression'] + else: + result = result - ssc_vols['compression'] + if _spec_ineffect(b_specs, 'netapp_thin_provisioned', + 'netapp_thick_provisioned'): + pass + else: + if (b_specs['netapp_thin_provisioned'] or + b_specs['netapp_thick_provisioned'] is False): + result = result & ssc_vols['thin'] + else: + result = result - ssc_vols['thin'] + if raid_type or disk_type or qos_policy_group: + tmp = copy.deepcopy(result) + for vol in tmp: + if raid_type: + vol_raid = vol.aggr['raid_type'] + vol_raid = vol_raid.lower() if vol_raid else None + if raid_type.lower() != vol_raid: + result.discard(vol) + if disk_type: + vol_dtype = vol.aggr['disk_type'] + vol_dtype = vol_dtype.lower() if vol_dtype else None + if disk_type.lower() != vol_dtype: + result.discard(vol) + if qos_policy_group: + vol_qos = vol.qos['qos_policy_group'] + vol_qos = vol_qos.lower() if vol_qos else None + if qos_policy_group.lower() != vol_qos: + result.discard(vol) + return result + + +def check_ssc_api_permissions(na_server): + """Checks backend ssc api permissions for the user.""" + api_map = {'storage-disk-get-iter': ['disk type'], + 'snapmirror-get-iter': ['data protection mirror'], + 'sis-get-iter': ['deduplication', 'compression'], + 'aggr-options-list-info': ['raid type'], + 'volume-get-iter': ['volume information']} + failed_apis = na_utils.check_apis_on_cluster(na_server, api_map.keys()) + if failed_apis: + if 'volume-get-iter' in failed_apis: + msg = _("Fatal error: User not permitted" + " to query NetApp volumes.") + raise exception.VolumeBackendAPIException(data=msg) + else: + unsupp_ssc_features = [] + for fail in failed_apis: + unsupp_ssc_features.extend(api_map[fail]) + LOG.warn(_("The user does not have access or sufficient" + " privileges to use all ssc apis. The ssc" + " features %s may not work as expected."), + unsupp_ssc_features) diff --git a/cinder/volume/drivers/netapp/utils.py b/cinder/volume/drivers/netapp/utils.py new file mode 100644 index 0000000000..6bdd3d6324 --- /dev/null +++ b/cinder/volume/drivers/netapp/utils.py @@ -0,0 +1,322 @@ +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Utilities for NetApp drivers. + +This module contains common utilities to be used by one or more +NetApp drivers to achieve the desired functionality. +""" + +import copy +import socket + +from cinder import context +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import utils +from cinder.volume.drivers.netapp.api import NaApiError +from cinder.volume.drivers.netapp.api import NaElement +from cinder.volume.drivers.netapp.api import NaErrors +from cinder.volume.drivers.netapp.api import NaServer +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +def provide_ems(requester, server, stats, netapp_backend, + server_type="cluster"): + """Provide ems with volume stats for the requester. + + :param server_type: cluster or 7mode. + """ + def _create_ems(stats, netapp_backend, server_type): + """Create ems api request.""" + ems_log = NaElement('ems-autosupport-log') + host = socket.getfqdn() or 'Cinder_node' + dest = "cluster node" if server_type == "cluster"\ + else "7 mode controller" + ems_log.add_new_child('computer-name', host) + ems_log.add_new_child('event-id', '0') + ems_log.add_new_child('event-source', + 'Cinder driver %s' % netapp_backend) + ems_log.add_new_child('app-version', stats.get('driver_version', + 'Undefined')) + ems_log.add_new_child('category', 'provisioning') + ems_log.add_new_child('event-description', + 'OpenStack volume created on %s' % dest) + ems_log.add_new_child('log-level', '6') + ems_log.add_new_child('auto-support', 'true') + return ems_log + + def _create_vs_get(): + """Create vs_get api request.""" + vs_get = NaElement('vserver-get-iter') + vs_get.add_new_child('max-records', '1') + query = NaElement('query') + query.add_node_with_children('vserver-info', + **{'vserver-type': 'node'}) + vs_get.add_child_elem(query) + desired = NaElement('desired-attributes') + desired.add_node_with_children( + 'vserver-info', **{'vserver-name': '', 'vserver-type': ''}) + vs_get.add_child_elem(desired) + return vs_get + + def _get_cluster_node(na_server): + """Get the cluster node for ems.""" + na_server.set_vserver(None) + vs_get = _create_vs_get() + res = na_server.invoke_successfully(vs_get) + if (res.get_child_content('num-records') and + int(res.get_child_content('num-records')) > 0): + attr_list = res.get_child_by_name('attributes-list') + vs_info = attr_list.get_child_by_name('vserver-info') + vs_name = vs_info.get_child_content('vserver-name') + return vs_name + return None + + do_ems = True + if hasattr(requester, 'last_ems'): + sec_limit = 604800 + if not (timeutils.is_older_than(requester.last_ems, sec_limit) or + timeutils.is_older_than(requester.last_ems, sec_limit - 59)): + do_ems = False + if do_ems: + na_server = copy.copy(server) + na_server.set_timeout(25) + ems = _create_ems(stats, netapp_backend, server_type) + try: + if server_type == "cluster": + api_version = na_server.get_api_version() + if api_version: + major, minor = api_version + else: + raise NaApiError(code='Not found', + message='No api version found') + if major == 1 and minor > 15: + node = getattr(requester, 'vserver', None) + else: + node = _get_cluster_node(na_server) + if node is None: + raise NaApiError(code='Not found', + message='No vserver found') + na_server.set_vserver(node) + else: + na_server.set_vfiler(None) + na_server.invoke_successfully(ems, True) + LOG.debug(_("ems executed successfully.")) + except NaApiError as e: + LOG.warn(_("Failed to invoke ems. Message : %s") % e) + finally: + requester.last_ems = timeutils.utcnow() + + +def validate_instantiation(**kwargs): + """Checks if a driver is instantiated other than by the unified driver. + + Helps check direct instantiation of netapp drivers. + Call this function in every netapp block driver constructor. + """ + if kwargs and kwargs.get('netapp_mode') == 'proxy': + return + LOG.warn(_("It is not the recommended way to use drivers by NetApp. " + "Please use NetAppDriver to achieve the functionality.")) + + +def invoke_api(na_server, api_name, api_family='cm', query=None, + des_result=None, additional_elems=None, + is_iter=False, records=0, tag=None, + timeout=0, tunnel=None): + """Invokes any given api call to a NetApp server. + + :param na_server: na_server instance + :param api_name: api name string + :param api_family: cm or 7m + :param query: api query as dict + :param des_result: desired result as dict + :param additional_elems: dict other than query and des_result + :param is_iter: is iterator api + :param records: limit for records, 0 for infinite + :param timeout: timeout seconds + :param tunnel: tunnel entity, vserver or vfiler name + """ + record_step = 50 + if not (na_server or isinstance(na_server, NaServer)): + msg = _("Requires an NaServer instance.") + raise exception.InvalidInput(reason=msg) + server = copy.copy(na_server) + if api_family == 'cm': + server.set_vserver(tunnel) + else: + server.set_vfiler(tunnel) + if timeout > 0: + server.set_timeout(timeout) + iter_records = 0 + cond = True + while cond: + na_element = create_api_request( + api_name, query, des_result, additional_elems, + is_iter, record_step, tag) + result = server.invoke_successfully(na_element, True) + if is_iter: + if records > 0: + iter_records = iter_records + record_step + if iter_records >= records: + cond = False + tag_el = result.get_child_by_name('next-tag') + tag = tag_el.get_content() if tag_el else None + if not tag: + cond = False + else: + cond = False + yield result + + +def create_api_request(api_name, query=None, des_result=None, + additional_elems=None, is_iter=False, + record_step=50, tag=None): + """Creates a NetApp api request. + + :param api_name: api name string + :param query: api query as dict + :param des_result: desired result as dict + :param additional_elems: dict other than query and des_result + :param is_iter: is iterator api + :param record_step: records at a time for iter api + :param tag: next tag for iter api + """ + api_el = NaElement(api_name) + if query: + query_el = NaElement('query') + query_el.translate_struct(query) + api_el.add_child_elem(query_el) + if des_result: + res_el = NaElement('desired-attributes') + res_el.translate_struct(des_result) + api_el.add_child_elem(res_el) + if additional_elems: + api_el.translate_struct(additional_elems) + if is_iter: + api_el.add_new_child('max-records', str(record_step)) + if tag: + api_el.add_new_child('tag', tag, True) + return api_el + + +def to_bool(val): + """Converts true, yes, y, 1 to True, False otherwise.""" + if val: + strg = str(val).lower() + if (strg == 'true' or strg == 'y' + or strg == 'yes' or strg == 'enabled' + or strg == '1'): + return True + else: + return False + else: + return False + + +@utils.synchronized("safe_set_attr") +def set_safe_attr(instance, attr, val): + """Sets the attribute in a thread safe manner. + + Returns if new val was set on attribute. + If attr already had the value then False. + """ + + if not instance or not attr: + return False + old_val = getattr(instance, attr, None) + if val is None and old_val is None: + return False + elif val == old_val: + return False + else: + setattr(instance, attr, val) + return True + + +def get_volume_extra_specs(volume): + """Provides extra specs associated with volume.""" + ctxt = context.get_admin_context() + type_id = volume.get('volume_type_id') + specs = None + if type_id is not None: + volume_type = volume_types.get_volume_type(ctxt, type_id) + specs = volume_type.get('extra_specs') + return specs + + +def check_apis_on_cluster(na_server, api_list=[]): + """Checks api availability and permissions on cluster. + + Checks api availability and permissions for executing user. + Returns a list of failed apis. + """ + failed_apis = [] + if api_list: + api_version = na_server.get_api_version() + if api_version: + major, minor = api_version + if major == 1 and minor < 20: + for api_name in api_list: + na_el = NaElement(api_name) + try: + na_server.invoke_successfully(na_el) + except Exception as e: + if isinstance(e, NaApiError): + if (e.code == NaErrors['API_NOT_FOUND'].code or + e.code == + NaErrors['INSUFFICIENT_PRIVS'].code): + failed_apis.append(api_name) + elif major == 1 and minor >= 20: + failed_apis = copy.copy(api_list) + result = invoke_api( + na_server, + api_name='system-user-capability-get-iter', + api_family='cm', + additional_elems=None, + is_iter=True) + for res in result: + attr_list = res.get_child_by_name('attributes-list') + if attr_list: + capabilities = attr_list.get_children() + for capability in capabilities: + op_list = capability.get_child_by_name( + 'operation-list') + if op_list: + ops = op_list.get_children() + for op in ops: + apis = op.get_child_content('api-name') + if apis: + api_list = apis.split(',') + for api_name in api_list: + if (api_name and + api_name.strip() + in failed_apis): + failed_apis.remove(api_name) + else: + continue + else: + msg = _("Unsupported Clustered Data ONTAP version.") + raise exception.VolumeBackendAPIException(data=msg) + else: + msg = _("Api version could not be determined.") + raise exception.VolumeBackendAPIException(data=msg) + return failed_apis diff --git a/cinder/volume/nexenta/__init__.py b/cinder/volume/drivers/nexenta/__init__.py similarity index 77% rename from cinder/volume/nexenta/__init__.py rename to cinder/volume/drivers/nexenta/__init__.py index 3050df8f66..16f48bd877 100644 --- a/cinder/volume/nexenta/__init__.py +++ b/cinder/volume/drivers/nexenta/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright 2011 Nexenta Systems, Inc. # All Rights Reserved. # @@ -20,14 +18,9 @@ .. automodule:: nexenta .. moduleauthor:: Yuriy Taraday +.. moduleauthor:: Mikhail Khodos """ class NexentaException(Exception): - MESSAGE = _('Nexenta SA returned the error') - - def __init__(self, error=None): - super(NexentaException, self).__init__(self.message, error) - - def __str__(self): - return '%s: %s' % self.args + pass diff --git a/cinder/volume/drivers/nexenta/iscsi.py b/cinder/volume/drivers/nexenta/iscsi.py new file mode 100644 index 0000000000..f80dda0c90 --- /dev/null +++ b/cinder/volume/drivers/nexenta/iscsi.py @@ -0,0 +1,562 @@ +# Copyright 2011 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +:mod:`nexenta.iscsi` -- Driver to store volumes on Nexenta Appliance +===================================================================== + +.. automodule:: nexenta.volume +.. moduleauthor:: Victor Rodionov +.. moduleauthor:: Mikhail Khodos +.. moduleauthor:: Yuriy Taraday +""" + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.volume import driver +from cinder.volume.drivers import nexenta +from cinder.volume.drivers.nexenta import jsonrpc +from cinder.volume.drivers.nexenta import options +from cinder.volume.drivers.nexenta import utils + +VERSION = '1.1.3' +LOG = logging.getLogger(__name__) + + +class NexentaISCSIDriver(driver.ISCSIDriver): # pylint: disable=R0921 + """Executes volume driver commands on Nexenta Appliance. + + Version history: + 1.0.0 - Initial driver version. + 1.0.1 - Fixed bug #1236626: catch "does not exist" exception of + lu_exists. + 1.1.0 - Changed class name to NexentaISCSIDriver. + 1.1.1 - Ignore "does not exist" exception of nms.snapshot.destroy. + 1.1.2 - Optimized create_cloned_volume, replaced zfs send recv with zfs + clone. + 1.1.3 - Extended volume stats provided by _update_volume_stats method. + """ + + VERSION = VERSION + + def __init__(self, *args, **kwargs): + super(NexentaISCSIDriver, self).__init__(*args, **kwargs) + self.nms = None + if self.configuration: + self.configuration.append_config_values( + options.NEXENTA_CONNECTION_OPTIONS) + self.configuration.append_config_values( + options.NEXENTA_ISCSI_OPTIONS) + self.configuration.append_config_values( + options.NEXENTA_VOLUME_OPTIONS) + self.configuration.append_config_values( + options.NEXENTA_RRMGR_OPTIONS) + self.nms_protocol = self.configuration.nexenta_rest_protocol + self.nms_host = self.configuration.nexenta_host + self.nms_port = self.configuration.nexenta_rest_port + self.nms_user = self.configuration.nexenta_user + self.nms_password = self.configuration.nexenta_password + self.volume = self.configuration.nexenta_volume + self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression + self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size + self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections + + @property + def backend_name(self): + backend_name = None + if self.configuration: + backend_name = self.configuration.safe_get('volume_backend_name') + if not backend_name: + backend_name = self.__class__.__name__ + return backend_name + + def do_setup(self, context): + if self.nms_protocol == 'auto': + protocol, auto = 'http', True + else: + protocol, auto = self.nms_protocol, False + self.nms = jsonrpc.NexentaJSONProxy( + protocol, self.nms_host, self.nms_port, '/rest/nms', self.nms_user, + self.nms_password, auto=auto) + + def check_for_setup_error(self): + """Verify that the volume for our zvols exists. + + :raise: :py:exc:`LookupError` + """ + if not self.nms.volume.object_exists(self.volume): + raise LookupError(_("Volume %s does not exist in Nexenta SA"), + self.volume) + + def _get_zvol_name(self, volume_name): + """Return zvol name that corresponds given volume name.""" + return '%s/%s' % (self.volume, volume_name) + + def _get_target_name(self, volume_name): + """Return iSCSI target name to access volume.""" + return '%s%s' % (self.configuration.nexenta_target_prefix, volume_name) + + def _get_target_group_name(self, volume_name): + """Return Nexenta iSCSI target group name for volume.""" + return '%s%s' % (self.configuration.nexenta_target_group_prefix, + volume_name) + + @staticmethod + def _get_clone_snapshot_name(volume): + """Return name for snapshot that will be used to clone the volume.""" + return 'cinder-clone-snapshot-%(id)s' % volume + + @staticmethod + def _is_clone_snapshot_name(snapshot): + """Check if snapshot is created for cloning.""" + name = snapshot.split('@')[-1] + return name.startswith('cinder-clone-snapshot-') + + @staticmethod + def _get_migrate_snapshot_name(volume): + """Return name for snapshot that will be used to migrate the volume.""" + return 'cinder-migrate-snapshot-%(id)s' % volume + + def create_volume(self, volume): + """Create a zvol on appliance. + + :param volume: volume reference + :return: model update dict for volume reference + """ + self.nms.zvol.create( + self._get_zvol_name(volume['name']), + '%sG' % (volume['size'],), + self.configuration.nexenta_blocksize, + self.configuration.nexenta_sparse) + return self.create_export(None, volume) + + def extend_volume(self, volume, new_size): + """Extend an existing volume. + + :param volume: volume reference + :param new_size: volume new size in GB + """ + LOG.info(_('Extending volume: %(id)s New size: %(size)s GB'), + {'id': volume['id'], 'size': new_size}) + self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']), + 'volsize', '%sG' % new_size) + + def delete_volume(self, volume): + """Destroy a zvol on appliance. + + :param volume: volume reference + """ + volume_name = self._get_zvol_name(volume['name']) + props = self.nms.zvol.get_child_props(volume_name, 'origin') or {} + try: + self.nms.zvol.destroy(volume_name, '') + except nexenta.NexentaException as exc: + if 'does not exist' in exc.args[0]: + LOG.info(_('Volume %s does not exist, it seems it was already ' + 'deleted.'), volume_name) + return + if 'zvol has children' in exc.args[0]: + raise exception.VolumeIsBusy(volume_name=volume_name) + raise + origin = props.get('origin') + if origin and self._is_clone_snapshot_name(origin): + volume, snapshot = origin.split('@') + volume = volume.lstrip('%s/' % self.configuration.nexenta_volume) + try: + self.delete_snapshot({'volume_name': volume, 'name': snapshot}) + except nexenta.NexentaException as exc: + LOG.warning(_('Cannot delete snapshot %(origin): %(exc)s'), + {'origin': origin, 'exc': exc}) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume. + + :param volume: new volume reference + :param src_vref: source volume reference + """ + snapshot = {'volume_name': src_vref['name'], + 'name': self._get_clone_snapshot_name(volume)} + LOG.debug(_('Creating temp snapshot of the original volume: ' + '%(volume_name)s@%(name)s'), snapshot) + # We don't delete this snapshot, because this snapshot will be origin + # of new volume. This snapshot will be automatically promoted by NMS + # when user will delete origin volume. But when cloned volume deleted + # we check its origin property and delete source snapshot if needed. + self.create_snapshot(snapshot) + try: + self.create_volume_from_snapshot(volume, snapshot) + except nexenta.NexentaException: + LOG.error(_('Volume creation failed, deleting created snapshot ' + '%(volume_name)s@%(name)s'), snapshot) + try: + self.delete_snapshot(snapshot) + except (nexenta.NexentaException, exception.SnapshotIsBusy): + LOG.warning(_('Failed to delete zfs snapshot ' + '%(volume_name)s@%(name)s'), snapshot) + raise + + def _get_zfs_send_recv_cmd(self, src, dst): + """Returns rrmgr command for source and destination.""" + return utils.get_rrmgr_cmd(src, dst, + compression=self.rrmgr_compression, + tcp_buf_size=self.rrmgr_tcp_buf_size, + connections=self.rrmgr_connections) + + def migrate_volume(self, ctxt, volume, host): + """Migrate if volume and host are managed by Nexenta appliance. + + :param ctxt: context + :param volume: a dictionary describing the volume to migrate + :param host: a dictionary describing the host to migrate to + """ + LOG.debug(_('Enter: migrate_volume: id=%(id)s, host=%(host)s') % + {'id': volume['id'], 'host': host}) + + false_ret = (False, None) + + if volume['status'] != 'available': + return false_ret + + if 'location_info' not in host['capabilities']: + return false_ret + + dst_parts = host['capabilities']['location_info'].split(':') + + if host['capabilities']['vendor_name'] != 'Nexenta' or \ + dst_parts[0] != self.__class__.__name__ or \ + host['capabilities']['free_capacity_gb'] < volume['size']: + return false_ret + + dst_host, dst_volume = dst_parts[1:] + + ssh_bound = False + ssh_bindings = self.nms.appliance.ssh_list_bindings() + for bind in ssh_bindings: + if bind.index(dst_host) != -1: + ssh_bound = True + break + if not(ssh_bound): + LOG.warning(_("Remote NexentaStor appliance at %s should be " + "SSH-bound."), dst_host) + + # Create temporary snapshot of volume on NexentaStor Appliance. + snapshot = {'volume_name': volume['name'], + 'name': self._get_migrate_snapshot_name(volume)} + self.create_snapshot(snapshot) + + src = '%(volume)s/%(zvol)s@%(snapshot)s' % { + 'volume': self.volume, + 'zvol': volume['name'], + 'snapshot': snapshot['name']} + dst = ':'.join([dst_host, dst_volume]) + + try: + self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst)) + except nexenta.NexentaException as exc: + LOG.warning(_("Cannot send source snapshot %(src)s to " + "destination %(dst)s. Reason: %(exc)s"), + {'src': src, 'dst': dst, 'exc': exc}) + return false_ret + finally: + try: + self.delete_snapshot(snapshot) + except nexenta.NexentaException as exc: + LOG.warning(_("Cannot delete temporary source snapshot " + "%(src)s on NexentaStor Appliance: %(exc)s"), + {'src': src, 'exc': exc}) + try: + self.delete_volume(volume) + except nexenta.NexentaException as exc: + LOG.warning(_("Cannot delete source volume %(volume)s on " + "NexentaStor Appliance: %(exc)s"), + {'volume': volume['name'], 'exc': exc}) + + return (True, None) + + def create_snapshot(self, snapshot): + """Create snapshot of existing zvol on appliance. + + :param snapshot: snapshot reference + """ + self.nms.zvol.create_snapshot( + self._get_zvol_name(snapshot['volume_name']), + snapshot['name'], '') + + def create_volume_from_snapshot(self, volume, snapshot): + """Create new volume from other's snapshot on appliance. + + :param volume: reference of volume to be created + :param snapshot: reference of source snapshot + """ + self.nms.zvol.clone( + '%s@%s' % (self._get_zvol_name(snapshot['volume_name']), + snapshot['name']), + self._get_zvol_name(volume['name'])) + + def delete_snapshot(self, snapshot): + """Delete volume's snapshot on appliance. + + :param snapshot: snapshot reference + """ + volume_name = self._get_zvol_name(snapshot['volume_name']) + snapshot_name = '%s@%s' % (volume_name, snapshot['name']) + try: + self.nms.snapshot.destroy(snapshot_name, '') + except nexenta.NexentaException as exc: + if "does not exist" in exc.args[0]: + LOG.info(_('Snapshot %s does not exist, it seems it was ' + 'already deleted.'), snapshot_name) + return + if "snapshot has dependent clones" in exc.args[0]: + raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) + raise + + def local_path(self, volume): + """Return local path to existing local volume. + + We never have local volumes, so it raises NotImplementedError. + + :raise: :py:exc:`NotImplementedError` + """ + raise NotImplementedError + + def _target_exists(self, target): + """Check if iSCSI target exist. + + :param target: target name + :return: True if target exist, else False + """ + targets = self.nms.stmf.list_targets() + if not targets: + return False + return target in self.nms.stmf.list_targets() + + def _target_group_exists(self, target_group): + """Check if target group exist. + + :param target_group: target group + :return: True if target group exist, else False + """ + groups = self.nms.stmf.list_targetgroups() + if not groups: + return False + return target_group in groups + + def _target_member_in_target_group(self, target_group, target_member): + """Check if target member in target group. + + :param target_group: target group + :param target_member: target member + :return: True if target member in target group, else False + :raises: NexentaException if target group doesn't exist + """ + members = self.nms.stmf.list_targetgroup_members(target_group) + if not members: + return False + return target_member in members + + def _lu_exists(self, zvol_name): + """Check if LU exists on appliance. + + :param zvol_name: Zvol name + :raises: NexentaException if zvol not exists + :return: True if LU exists, else False + """ + try: + return bool(self.nms.scsidisk.lu_exists(zvol_name)) + except nexenta.NexentaException as exc: + if 'does not exist' not in exc.args[0]: + raise + return False + + def _is_lu_shared(self, zvol_name): + """Check if LU exists on appliance and shared. + + :param zvol_name: Zvol name + :raises: NexentaException if Zvol not exist + :return: True if LU exists and shared, else False + """ + try: + shared = self.nms.scsidisk.lu_shared(zvol_name) > 0 + except nexenta.NexentaException as exc: + if 'does not exist for zvol' not in exc.args[0]: + raise # Zvol does not exists + shared = False # LU does not exist + return shared + + def _is_volume_exported(self, volume): + """Check if volume exported. + + :param volume: volume object + :return: True if volume exported, else False + """ + zvol_name = self._get_zvol_name(volume['name']) + target_name = self._get_target_name(volume['name']) + target_group_name = self._get_target_group_name(volume['name']) + return (self._target_exists(target_name) and + self._target_group_exists(target_group_name) and + self._target_member_in_target_group(target_group_name, + target_name) and + self._lu_exists(zvol_name) and + self._is_lu_shared(zvol_name)) + + def _get_provider_location(self, volume): + """Returns volume iscsiadm-formatted provider location string.""" + return '%(host)s:%(port)s,1 %(name)s 0' % { + 'host': self.nms_host, + 'port': self.configuration.nexenta_iscsi_target_portal_port, + 'name': self._get_target_name(volume['name']) + } + + def _do_export(self, _ctx, volume, ensure=False): + """Do all steps to get zvol exported as LUN 0 at separate target. + + :param volume: reference of volume to be exported + :param ensure: if True, ignore errors caused by already existing + resources + """ + zvol_name = self._get_zvol_name(volume['name']) + target_name = self._get_target_name(volume['name']) + target_group_name = self._get_target_group_name(volume['name']) + + if not self._target_exists(target_name): + try: + self.nms.iscsitarget.create_target({ + 'target_name': target_name}) + except nexenta.NexentaException as exc: + if ensure and 'already configured' in exc.args[0]: + LOG.info(_('Ignored target creation error "%s" while ' + 'ensuring export'), exc) + else: + raise + if not self._target_group_exists(target_group_name): + try: + self.nms.stmf.create_targetgroup(target_group_name) + except nexenta.NexentaException as exc: + if ((ensure and 'already exists' in exc.args[0]) or + 'target must be offline' in exc.args[0]): + LOG.info(_('Ignored target group creation error "%s" ' + 'while ensuring export'), exc) + else: + raise + if not self._target_member_in_target_group(target_group_name, + target_name): + try: + self.nms.stmf.add_targetgroup_member(target_group_name, + target_name) + except nexenta.NexentaException as exc: + if ((ensure and 'already exists' in exc.args[0]) or + 'target must be offline' in exc.args[0]): + LOG.info(_('Ignored target group member addition error ' + '"%s" while ensuring export'), exc) + else: + raise + if not self._lu_exists(zvol_name): + try: + self.nms.scsidisk.create_lu(zvol_name, {}) + except nexenta.NexentaException as exc: + if not ensure or 'in use' not in exc.args[0]: + raise + LOG.info(_('Ignored LU creation error "%s" while ensuring ' + 'export'), exc) + if not self._is_lu_shared(zvol_name): + try: + self.nms.scsidisk.add_lun_mapping_entry(zvol_name, { + 'target_group': target_group_name, + 'lun': '0'}) + except nexenta.NexentaException as exc: + if not ensure or 'view entry exists' not in exc.args[0]: + raise + LOG.info(_('Ignored LUN mapping entry addition error "%s" ' + 'while ensuring export'), exc) + + def create_export(self, _ctx, volume): + """Create new export for zvol. + + :param volume: reference of volume to be exported + :return: iscsiadm-formatted provider location string + """ + self._do_export(_ctx, volume, ensure=False) + return {'provider_location': self._get_provider_location(volume)} + + def ensure_export(self, _ctx, volume): + """Recreate parts of export if necessary. + + :param volume: reference of volume to be exported + """ + self._do_export(_ctx, volume, ensure=True) + + def remove_export(self, _ctx, volume): + """Destroy all resources created to export zvol. + + :param volume: reference of volume to be unexported + """ + zvol_name = self._get_zvol_name(volume['name']) + target_name = self._get_target_name(volume['name']) + target_group_name = self._get_target_group_name(volume['name']) + self.nms.scsidisk.delete_lu(zvol_name) + + try: + self.nms.stmf.destroy_targetgroup(target_group_name) + except nexenta.NexentaException as exc: + # We assume that target group is already gone + LOG.warn(_('Got error trying to destroy target group' + ' %(target_group)s, assuming it is ' + 'already gone: %(exc)s'), + {'target_group': target_group_name, 'exc': exc}) + try: + self.nms.iscsitarget.delete_target(target_name) + except nexenta.NexentaException as exc: + # We assume that target is gone as well + LOG.warn(_('Got error trying to delete target %(target)s,' + ' assuming it is already gone: %(exc)s'), + {'target': target_name, 'exc': exc}) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info for NexentaStor appliance.""" + LOG.debug(_('Updating volume stats')) + + stats = self.nms.volume.get_child_props( + self.configuration.nexenta_volume, 'health|size|used|available') + + total_amount = utils.str2gib_size(stats['size']) + free_amount = utils.str2gib_size(stats['available']) + + location_info = '%(driver)s:%(host)s:%(volume)s' % { + 'driver': self.__class__.__name__, + 'host': self.nms_host, + 'volume': self.volume + } + + self._stats = { + 'vendor_name': 'Nexenta', + 'driver_version': self.VERSION, + 'storage_protocol': 'iSCSI', + 'total_capacity_gb': total_amount, + 'free_capacity_gb': free_amount, + 'reserved_percentage': 0, + 'QoS_support': False, + 'volume_backend_name': self.backend_name, + 'location_info': location_info + } diff --git a/cinder/volume/nexenta/jsonrpc.py b/cinder/volume/drivers/nexenta/jsonrpc.py similarity index 61% rename from cinder/volume/nexenta/jsonrpc.py rename to cinder/volume/drivers/nexenta/jsonrpc.py index 2853303c42..58ee4af26a 100644 --- a/cinder/volume/nexenta/jsonrpc.py +++ b/cinder/volume/drivers/nexenta/jsonrpc.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright 2011 Nexenta Systems, Inc. # All Rights Reserved. # @@ -20,15 +18,16 @@ .. automodule:: nexenta.jsonrpc .. moduleauthor:: Yuriy Taraday +.. moduleauthor:: Victor Rodionov """ import urllib2 from cinder.openstack.common import jsonutils -from cinder.volume import nexenta from cinder.openstack.common import log as logging +from cinder.volume.drivers import nexenta -LOG = logging.getLogger("cinder.volume.nexenta.jsonrpc") +LOG = logging.getLogger(__name__) class NexentaJSONException(nexenta.NexentaException): @@ -36,8 +35,13 @@ class NexentaJSONException(nexenta.NexentaException): class NexentaJSONProxy(object): - def __init__(self, url, user, password, auto=False, obj=None, method=None): - self.url = url + + def __init__(self, scheme, host, port, path, user, password, auto=False, + obj=None, method=None): + self.scheme = scheme.lower() + self.host = host + self.port = port + self.path = path self.user = user self.password = password self.auto = auto @@ -51,34 +55,46 @@ def __getattr__(self, name): obj, method = self.obj, name else: obj, method = '%s.%s' % (self.obj, self.method), name - return NexentaJSONProxy(self.url, self.user, self.password, self.auto, - obj, method) + return NexentaJSONProxy(self.scheme, self.host, self.port, self.path, + self.user, self.password, self.auto, obj, + method) + + @property + def url(self): + return '%s://%s:%s%s' % (self.scheme, self.host, self.port, self.path) + + def __hash__(self): + return self.url.__hash__() + + def __repr__(self): + return 'NMS proxy: %s' % self.url def __call__(self, *args): - data = jsonutils.dumps({'object': self.obj, - 'method': self.method, - 'params': args}) + data = jsonutils.dumps({ + 'object': self.obj, + 'method': self.method, + 'params': args + }) auth = ('%s:%s' % (self.user, self.password)).encode('base64')[:-1] - headers = {'Content-Type': 'application/json', - 'Authorization': 'Basic %s' % (auth,)} + headers = { + 'Content-Type': 'application/json', + 'Authorization': 'Basic %s' % auth + } LOG.debug(_('Sending JSON data: %s'), data) request = urllib2.Request(self.url, data, headers) response_obj = urllib2.urlopen(request) if response_obj.info().status == 'EOF in headers': - if self.auto and self.url.startswith('http://'): - LOG.info(_('Auto switching to HTTPS connection to %s'), - self.url) - self.url = 'https' + self.url[4:] - request = urllib2.Request(self.url, data, headers) - response_obj = urllib2.urlopen(request) - else: + if not self.auto or self.scheme != 'http': LOG.error(_('No headers in server response')) raise NexentaJSONException(_('Bad response from server')) + LOG.info(_('Auto switching to HTTPS connection to %s'), self.url) + self.scheme = 'https' + request = urllib2.Request(self.url, data, headers) + response_obj = urllib2.urlopen(request) response_data = response_obj.read() LOG.debug(_('Got response: %s'), response_data) response = jsonutils.loads(response_data) if response.get('error') is not None: raise NexentaJSONException(response['error'].get('message', '')) - else: - return response.get('result') + return response.get('result') diff --git a/cinder/volume/drivers/nexenta/nfs.py b/cinder/volume/drivers/nexenta/nfs.py new file mode 100644 index 0000000000..d20cbf48f2 --- /dev/null +++ b/cinder/volume/drivers/nexenta/nfs.py @@ -0,0 +1,442 @@ +# Copyright 2013 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +:mod:`nexenta.nfs` -- Driver to store volumes on NexentaStor Appliance. +======================================================================= + +.. automodule:: nexenta.nfs +.. moduleauthor:: Mikhail Khodos +.. moduleauthor:: Victor Rodionov +""" + +import hashlib +import os + +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import units +from cinder.volume.drivers import nexenta +from cinder.volume.drivers.nexenta import jsonrpc +from cinder.volume.drivers.nexenta import options +from cinder.volume.drivers.nexenta import utils +from cinder.volume.drivers import nfs + +VERSION = '1.1.3' +LOG = logging.getLogger(__name__) + + +class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 + """Executes volume driver commands on Nexenta Appliance. + + Version history: + 1.0.0 - Initial driver version. + 1.1.0 - Auto sharing for enclosing folder. + 1.1.1 - Added caching for NexentaStor appliance 'volroot' value. + 1.1.2 - Ignore "folder does not exist" error in delete_volume and + delete_snapshot method. + 1.1.3 - Redefined volume_backend_name attribute inherited from + RemoteFsDriver. + """ + + driver_prefix = 'nexenta' + volume_backend_name = 'NexentaNfsDriver' + VERSION = VERSION + + def __init__(self, *args, **kwargs): + super(NexentaNfsDriver, self).__init__(*args, **kwargs) + if self.configuration: + self.configuration.append_config_values( + options.NEXENTA_NFS_OPTIONS) + conf = self.configuration + self.nms_cache_volroot = conf.nexenta_nms_cache_volroot + self._nms2volroot = {} + self.share2nms = {} + + def do_setup(self, context): + super(NexentaNfsDriver, self).do_setup(context) + self._load_shares_config(getattr(self.configuration, + self.driver_prefix + + '_shares_config')) + + def check_for_setup_error(self): + """Verify that the volume for our folder exists. + + :raise: :py:exc:`LookupError` + """ + if self.share2nms: + for nfs_share in self.share2nms: + nms = self.share2nms[nfs_share] + volume_name, dataset = self._get_share_datasets(nfs_share) + if not nms.volume.object_exists(volume_name): + raise LookupError(_("Volume %s does not exist in Nexenta " + "Store appliance"), volume_name) + folder = '%s/%s' % (volume_name, dataset) + if not nms.folder.object_exists(folder): + raise LookupError(_("Folder %s does not exist in Nexenta " + "Store appliance"), folder) + self._share_folder(nms, volume_name, dataset) + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info. + + :param volume: volume reference + :param connector: connector reference + """ + export = '%s/%s' % (volume['provider_location'], volume['name']) + data = {'export': export, 'name': 'volume'} + if volume['provider_location'] in self.shares: + data['options'] = self.shares[volume['provider_location']] + return { + 'driver_volume_type': self.driver_volume_type, + 'data': data + } + + def _do_create_volume(self, volume): + nfs_share = volume['provider_location'] + nms = self.share2nms[nfs_share] + + vol, dataset = self._get_share_datasets(nfs_share) + folder = '%s/%s' % (dataset, volume['name']) + LOG.debug(_('Creating folder on Nexenta Store %s'), folder) + nms.folder.create_with_props( + vol, folder, + {'compression': self.configuration.nexenta_volume_compression} + ) + + volume_path = self.remote_path(volume) + volume_size = volume['size'] + try: + self._share_folder(nms, vol, folder) + + if getattr(self.configuration, + self.driver_prefix + '_sparsed_volumes'): + self._create_sparsed_file(nms, volume_path, volume_size) + else: + compression = nms.folder.get('compression') + if compression != 'off': + # Disable compression, because otherwise will not use space + # on disk. + nms.folder.set('compression', 'off') + try: + self._create_regular_file(nms, volume_path, volume_size) + finally: + if compression != 'off': + # Backup default compression value if it was changed. + nms.folder.set('compression', compression) + + self._set_rw_permissions_for_all(nms, volume_path) + except nexenta.NexentaException as exc: + try: + nms.folder.destroy('%s/%s' % (vol, folder)) + except nexenta.NexentaException: + LOG.warning(_("Cannot destroy created folder: " + "%(vol)s/%(folder)s"), + {'vol': vol, 'folder': folder}) + raise exc + + def create_volume_from_snapshot(self, volume, snapshot): + """Create new volume from other's snapshot on appliance. + + :param volume: reference of volume to be created + :param snapshot: reference of source snapshot + """ + self._ensure_shares_mounted() + + snapshot_vol = self._get_snapshot_volume(snapshot) + nfs_share = snapshot_vol['provider_location'] + volume['provider_location'] = nfs_share + nms = self.share2nms[nfs_share] + + vol, dataset = self._get_share_datasets(nfs_share) + snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'], + snapshot['name']) + folder = '%s/%s' % (dataset, volume['name']) + nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder)) + + try: + self._share_folder(nms, vol, folder) + except nexenta.NexentaException: + try: + nms.folder.destroy('%s/%s' % (vol, folder), '') + except nexenta.NexentaException: + LOG.warning(_("Cannot destroy cloned folder: " + "%(vol)s/%(folder)s"), + {'vol': vol, 'folder': folder}) + raise + + return {'provider_location': volume['provider_location']} + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume. + + :param volume: new volume reference + :param src_vref: source volume reference + """ + LOG.info(_('Creating clone of volume: %s'), src_vref['id']) + snapshot = {'volume_name': src_vref['name'], + 'volume_id': src_vref['id'], + 'name': self._get_clone_snapshot_name(volume)} + # We don't delete this snapshot, because this snapshot will be origin + # of new volume. This snapshot will be automatically promoted by NMS + # when user will delete its origin. + self.create_snapshot(snapshot) + try: + return self.create_volume_from_snapshot(volume, snapshot) + except nexenta.NexentaException: + LOG.error(_('Volume creation failed, deleting created snapshot ' + '%(volume_name)s@%(name)s'), snapshot) + try: + self.delete_snapshot(snapshot) + except (nexenta.NexentaException, exception.SnapshotIsBusy): + LOG.warning(_('Failed to delete zfs snapshot ' + '%(volume_name)s@%(name)s'), snapshot) + raise + + def delete_volume(self, volume): + """Deletes a logical volume. + + :param volume: volume reference + """ + super(NexentaNfsDriver, self).delete_volume(volume) + + nfs_share = volume.get('provider_location') + + if nfs_share: + nms = self.share2nms[nfs_share] + vol, parent_folder = self._get_share_datasets(nfs_share) + folder = '%s/%s/%s' % (vol, parent_folder, volume['name']) + props = nms.folder.get_child_props(folder, 'origin') or {} + try: + nms.folder.destroy(folder, '-r') + except nexenta.NexentaException as exc: + if 'does not exist' in exc.args[0]: + LOG.info(_('Folder %s does not exist, it was ' + 'already deleted.'), folder) + return + raise + origin = props.get('origin') + if origin and self._is_clone_snapshot_name(origin): + try: + nms.snapshot.destroy(origin, '') + except nexenta.NexentaException as exc: + if 'does not exist' in exc.args[0]: + LOG.info(_('Snapshot %s does not exist, it was ' + 'already deleted.'), origin) + return + raise + + def create_snapshot(self, snapshot): + """Creates a snapshot. + + :param snapshot: snapshot reference + """ + volume = self._get_snapshot_volume(snapshot) + nfs_share = volume['provider_location'] + nms = self.share2nms[nfs_share] + vol, dataset = self._get_share_datasets(nfs_share) + folder = '%s/%s/%s' % (vol, dataset, volume['name']) + nms.folder.create_snapshot(folder, snapshot['name'], '-r') + + def delete_snapshot(self, snapshot): + """Deletes a snapshot. + + :param snapshot: snapshot reference + """ + volume = self._get_snapshot_volume(snapshot) + nfs_share = volume['provider_location'] + nms = self.share2nms[nfs_share] + vol, dataset = self._get_share_datasets(nfs_share) + folder = '%s/%s/%s' % (vol, dataset, volume['name']) + try: + nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '') + except nexenta.NexentaException as exc: + if 'does not exist' in exc.args[0]: + LOG.info(_('Snapshot %s does not exist, it was ' + 'already deleted.'), '%s@%s' % (folder, snapshot)) + return + raise + + def _create_sparsed_file(self, nms, path, size): + """Creates file with 0 disk usage. + + :param nms: nms object + :param path: path to new file + :param size: size of file + """ + block_size_mb = 1 + block_count = size * units.GiB / (block_size_mb * units.MiB) + + nms.appliance.execute( + 'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=0 seek=%(count)d' % { + 'path': path, + 'bs': block_size_mb, + 'count': block_count + } + ) + + def _create_regular_file(self, nms, path, size): + """Creates regular file of given size. + Takes a lot of time for large files. + + :param nms: nms object + :param path: path to new file + :param size: size of file + """ + block_size_mb = 1 + block_count = size * units.GiB / (block_size_mb * units.MiB) + + LOG.info(_('Creating regular file: %s.' + 'This may take some time.') % path) + + nms.appliance.execute( + 'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % { + 'path': path, + 'bs': block_size_mb, + 'count': block_count + } + ) + + LOG.info(_('Regular file: %s created.') % path) + + def _set_rw_permissions_for_all(self, nms, path): + """Sets 666 permissions for the path. + + :param nms: nms object + :param path: path to file + """ + nms.appliance.execute('chmod ugo+rw %s' % path) + + def local_path(self, volume): + """Get volume path (mounted locally fs path) for given volume. + + :param volume: volume reference + """ + nfs_share = volume['provider_location'] + return os.path.join(self._get_mount_point_for_share(nfs_share), + volume['name'], 'volume') + + def _get_mount_point_for_share(self, nfs_share): + """Returns path to mount point NFS share. + + :param nfs_share: example 172.18.194.100:/var/nfs + """ + return os.path.join(self.configuration.nexenta_mount_point_base, + hashlib.md5(nfs_share).hexdigest()) + + def remote_path(self, volume): + """Get volume path (mounted remotely fs path) for given volume. + + :param volume: volume reference + """ + nfs_share = volume['provider_location'] + share = nfs_share.split(':')[1].rstrip('/') + return '%s/%s/volume' % (share, volume['name']) + + def _share_folder(self, nms, volume, folder): + """Share NFS folder on NexentaStor Appliance. + + :param nms: nms object + :param volume: volume name + :param folder: folder name + """ + path = '%s/%s' % (volume, folder.lstrip('/')) + share_opts = { + 'read_write': '*', + 'read_only': '', + 'root': 'nobody', + 'extra_options': 'anon=0', + 'recursive': 'true', + 'anonymous_rw': 'true', + } + LOG.debug(_('Sharing folder %s on Nexenta Store'), folder) + nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path, + share_opts) + + def _load_shares_config(self, share_file): + self.shares = {} + self.share2nms = {} + + for share in self._read_config_file(share_file): + # A configuration line may be either: + # host:/share_name http://user:pass@host:[port]/ + # or + # host:/share_name http://user:pass@host:[port]/ + # -o options=123,rw --other + if not share.strip(): + continue + if share.startswith('#'): + continue + + share_info = share.split(' ', 2) + + share_address = share_info[0].strip().decode('unicode_escape') + nms_url = share_info[1].strip() + share_opts = share_info[2].strip() if len(share_info) > 2 else None + + self.shares[share_address] = share_opts + self.share2nms[share_address] = self._get_nms_for_url(nms_url) + + LOG.debug(_('Shares loaded: %s') % self.shares) + + def _get_capacity_info(self, nfs_share): + """Calculate available space on the NFS share. + + :param nfs_share: example 172.18.194.100:/var/nfs + """ + nms = self.share2nms[nfs_share] + ns_volume, ns_folder = self._get_share_datasets(nfs_share) + folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume, + ns_folder), '') + free = utils.str2size(folder_props['available']) + allocated = utils.str2size(folder_props['used']) + return free + allocated, free, allocated + + def _get_nms_for_url(self, url): + """Returns initialized nms object for url.""" + auto, scheme, user, password, host, port, path =\ + utils.parse_nms_url(url) + return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user, + password, auto=auto) + + def _get_snapshot_volume(self, snapshot): + ctxt = context.get_admin_context() + return db.volume_get(ctxt, snapshot['volume_id']) + + def _get_volroot(self, nms): + """Returns volroot property value from NexentaStor appliance.""" + if not self.nms_cache_volroot: + return nms.server.get_prop('volroot') + if nms not in self._nms2volroot: + self._nms2volroot[nms] = nms.server.get_prop('volroot') + return self._nms2volroot[nms] + + def _get_share_datasets(self, nfs_share): + nms = self.share2nms[nfs_share] + volroot = self._get_volroot(nms) + path = nfs_share.split(':')[1][len(volroot):].strip('/') + volume_name = path.split('/')[0] + folder_name = '/'.join(path.split('/')[1:]) + return volume_name, folder_name + + def _get_clone_snapshot_name(self, volume): + """Return name for snapshot that will be used to clone the volume.""" + return 'cinder-clone-snapshot-%(id)s' % volume + + def _is_clone_snapshot_name(self, snapshot): + """Check if snapshot is created for cloning.""" + name = snapshot.split('@')[-1] + return name.startswith('cinder-clone-snapshot-') diff --git a/cinder/volume/drivers/nexenta/options.py b/cinder/volume/drivers/nexenta/options.py new file mode 100644 index 0000000000..ee872a2e37 --- /dev/null +++ b/cinder/volume/drivers/nexenta/options.py @@ -0,0 +1,109 @@ +# Copyright 2013 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +:mod:`nexenta.options` -- Contains configuration options for Nexenta drivers. +============================================================================= + +.. automodule:: nexenta.options +.. moduleauthor:: Victor Rodionov +.. moduleauthor:: Yuriy Taraday +""" + +from oslo.config import cfg + + +NEXENTA_CONNECTION_OPTIONS = [ + cfg.StrOpt('nexenta_host', + default='', + help='IP address of Nexenta SA'), + cfg.IntOpt('nexenta_rest_port', + default=2000, + help='HTTP port to connect to Nexenta REST API server'), + cfg.StrOpt('nexenta_rest_protocol', + default='auto', + help='Use http or https for REST connection (default auto)'), + cfg.StrOpt('nexenta_user', + default='admin', + help='User name to connect to Nexenta SA'), + cfg.StrOpt('nexenta_password', + default='nexenta', + help='Password to connect to Nexenta SA', + secret=True), +] + +NEXENTA_ISCSI_OPTIONS = [ + cfg.IntOpt('nexenta_iscsi_target_portal_port', + default=3260, + help='Nexenta target portal port'), + cfg.StrOpt('nexenta_volume', + default='cinder', + help='pool on SA that will hold all volumes'), + cfg.StrOpt('nexenta_target_prefix', + default='iqn.1986-03.com.sun:02:cinder-', + help='IQN prefix for iSCSI targets'), + cfg.StrOpt('nexenta_target_group_prefix', + default='cinder/', + help='prefix for iSCSI target groups on SA'), +] + +NEXENTA_NFS_OPTIONS = [ + cfg.StrOpt('nexenta_shares_config', + default='/etc/cinder/nfs_shares', + help='File with the list of available nfs shares'), + cfg.StrOpt('nexenta_mount_point_base', + default='$state_path/mnt', + help='Base dir containing mount points for nfs shares'), + cfg.BoolOpt('nexenta_sparsed_volumes', + default=True, + help=('Create volumes as sparsed files which take no space.' + 'If set to False volume is created as regular file.' + 'In such case volume creation takes a lot of time.')), + cfg.StrOpt('nexenta_volume_compression', + default='on', + help='Default compression value for new ZFS folders.'), + cfg.BoolOpt('nexenta_nms_cache_volroot', + default=True, + help=('If set True cache NexentaStor appliance volroot option ' + 'value.')) +] + +NEXENTA_VOLUME_OPTIONS = [ + cfg.StrOpt('nexenta_blocksize', + default='', + help='block size for volumes (blank=default,8KB)'), + cfg.BoolOpt('nexenta_sparse', + default=False, + help='flag to create sparse volumes'), +] + +NEXENTA_RRMGR_OPTIONS = [ + cfg.IntOpt('nexenta_rrmgr_compression', + default=0, + help=('Enable stream compression, level 1..9. 1 - gives best ' + 'speed; 9 - gives best compression.')), + cfg.IntOpt('nexenta_rrmgr_tcp_buf_size', + default=4096, + help='TCP Buffer size in KiloBytes.'), + cfg.IntOpt('nexenta_rrmgr_connections', + default=2, + help='Number of TCP connections.'), +] + +CONF = cfg.CONF +CONF.register_opts(NEXENTA_CONNECTION_OPTIONS) +CONF.register_opts(NEXENTA_ISCSI_OPTIONS) +CONF.register_opts(NEXENTA_VOLUME_OPTIONS) +CONF.register_opts(NEXENTA_NFS_OPTIONS) +CONF.register_opts(NEXENTA_RRMGR_OPTIONS) diff --git a/cinder/volume/drivers/nexenta/utils.py b/cinder/volume/drivers/nexenta/utils.py new file mode 100644 index 0000000000..375835c0d6 --- /dev/null +++ b/cinder/volume/drivers/nexenta/utils.py @@ -0,0 +1,118 @@ +# Copyright 2013 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +:mod:`nexenta.utils` -- Nexenta-specific utils functions. +========================================================= + +.. automodule:: nexenta.utils +.. moduleauthor:: Victor Rodionov +.. moduleauthor:: Mikhail Khodos +""" + +import re +import urlparse + +from cinder import units + + +def str2size(s, scale=1024): + """Convert size-string. + + String format: [:space:] to bytes. + + :param s: size-string + :param scale: base size + """ + if not s: + return 0 + + if isinstance(s, (int, long)): + return s + + match = re.match(r'^([\.\d]+)\s*([BbKkMmGgTtPpEeZzYy]?)', s) + if match is None: + raise ValueError(_('Invalid value: "%s"') % s) + + groups = match.groups() + value = float(groups[0]) + suffix = len(groups) > 1 and groups[1].upper() or 'B' + + types = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') + for i, t in enumerate(types): + if suffix == t: + return int(value * pow(scale, i)) + + +def str2gib_size(s): + """Covert size-string to size in gigabytes.""" + size_in_bytes = str2size(s) + return size_in_bytes / units.GiB + + +def get_rrmgr_cmd(src, dst, compression=None, tcp_buf_size=None, + connections=None): + """Returns rrmgr command for source and destination.""" + cmd = ['rrmgr', '-s', 'zfs'] + if compression: + cmd.extend(['-c', '%s' % str(compression)]) + cmd.append('-q') + cmd.append('-e') + if tcp_buf_size: + cmd.extend(['-w', str(tcp_buf_size)]) + if connections: + cmd.extend(['-n', str(connections)]) + cmd.extend([src, dst]) + return ' '.join(cmd) + + +def parse_nms_url(url): + """Parse NMS url into normalized parts like scheme, user, host and others. + + Example NMS URL: + auto://admin:nexenta@192.168.1.1:2000/ + + NMS URL parts: + auto True if url starts with auto://, protocol will be + automatically switched to https if http not + supported; + scheme (auto) connection protocol (http or https); + user (admin) NMS user; + password (nexenta) NMS password; + host (192.168.1.1) NMS host; + port (2000) NMS port. + + :param url: url string + :return: tuple (auto, scheme, user, password, host, port, path) + """ + pr = urlparse.urlparse(url) + scheme = pr.scheme + auto = scheme == 'auto' + if auto: + scheme = 'http' + user = 'admin' + password = 'nexenta' + if '@' not in pr.netloc: + host_and_port = pr.netloc + else: + user_and_password, host_and_port = pr.netloc.split('@', 1) + if ':' in user_and_password: + user, password = user_and_password.split(':') + else: + user = user_and_password + if ':' in host_and_port: + host, port = host_and_port.split(':', 1) + else: + host, port = host_and_port, '2000' + return auto, scheme, user, password, host, port, '/rest/nms/' diff --git a/cinder/volume/drivers/nfs.py b/cinder/volume/drivers/nfs.py new file mode 100644 index 0000000000..24091f5efa --- /dev/null +++ b/cinder/volume/drivers/nfs.py @@ -0,0 +1,561 @@ +# Copyright (c) 2012 NetApp, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno +import os +import re + +from oslo.config import cfg + +from cinder.brick.remotefs import remotefs +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils as putils +from cinder import units +from cinder import utils +from cinder.volume import driver + +VERSION = '1.1.0' + +LOG = logging.getLogger(__name__) + +volume_opts = [ + cfg.StrOpt('nfs_shares_config', + default='/etc/cinder/nfs_shares', + help='File with the list of available nfs shares'), + cfg.BoolOpt('nfs_sparsed_volumes', + default=True, + help=('Create volumes as sparsed files which take no space.' + 'If set to False volume is created as regular file.' + 'In such case volume creation takes a lot of time.')), + cfg.FloatOpt('nfs_used_ratio', + default=0.95, + help=('Percent of ACTUAL usage of the underlying volume ' + 'before no new volumes can be allocated to the volume ' + 'destination.')), + cfg.FloatOpt('nfs_oversub_ratio', + default=1.0, + help=('This will compare the allocated to available space on ' + 'the volume destination. If the ratio exceeds this ' + 'number, the destination will no longer be valid.')), + cfg.StrOpt('nfs_mount_point_base', + default='$state_path/mnt', + help=('Base dir containing mount points for nfs shares.')), + cfg.StrOpt('nfs_mount_options', + default=None, + help=('Mount options passed to the nfs client. See section ' + 'of the nfs man page for details.')), +] + + +CONF = cfg.CONF +CONF.register_opts(volume_opts) + + +class RemoteFsDriver(driver.VolumeDriver): + """Common base for drivers that work like NFS.""" + + VERSION = "0.0.0" + + def __init__(self, *args, **kwargs): + super(RemoteFsDriver, self).__init__(*args, **kwargs) + self.shares = {} + self._mounted_shares = [] + + def check_for_setup_error(self): + """Just to override parent behavior.""" + pass + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info. + + :param volume: volume reference + :param connector: connector reference + """ + data = {'export': volume['provider_location'], + 'name': volume['name']} + if volume['provider_location'] in self.shares: + data['options'] = self.shares[volume['provider_location']] + return { + 'driver_volume_type': self.driver_volume_type, + 'data': data, + 'mount_point_base': self._get_mount_point_base() + } + + def _get_mount_point_base(self): + """Returns the mount point base for the remote fs. + + This method facilitates returning mount point base + for the specific remote fs. Override this method + in the respective driver to return the entry to be + used while attach/detach using brick in cinder. + If not overridden then it returns None without + raising exception to continue working for cases + when not used with brick. + """ + LOG.debug(_("Driver specific implementation needs to return" + " mount_point_base.")) + return None + + def create_volume(self, volume): + """Creates a volume. + + :param volume: volume reference + """ + self._ensure_shares_mounted() + + volume['provider_location'] = self._find_share(volume['size']) + + LOG.info(_('casted to %s') % volume['provider_location']) + + self._do_create_volume(volume) + + return {'provider_location': volume['provider_location']} + + def _do_create_volume(self, volume): + """Create a volume on given remote share. + + :param volume: volume reference + """ + volume_path = self.local_path(volume) + volume_size = volume['size'] + + if getattr(self.configuration, + self.driver_prefix + '_sparsed_volumes'): + self._create_sparsed_file(volume_path, volume_size) + else: + self._create_regular_file(volume_path, volume_size) + + self._set_rw_permissions_for_all(volume_path) + + def _ensure_shares_mounted(self): + """Look for remote shares in the flags and tries to mount them + locally. + """ + self._mounted_shares = [] + + self._load_shares_config(getattr(self.configuration, + self.driver_prefix + + '_shares_config')) + + for share in self.shares.keys(): + try: + self._ensure_share_mounted(share) + self._mounted_shares.append(share) + except Exception as exc: + LOG.warning(_('Exception during mounting %s') % (exc,)) + + LOG.debug('Available shares %s' % str(self._mounted_shares)) + + def create_cloned_volume(self, volume, src_vref): + raise NotImplementedError() + + def delete_volume(self, volume): + """Deletes a logical volume. + + :param volume: volume reference + """ + if not volume['provider_location']: + LOG.warn(_('Volume %s does not have provider_location specified, ' + 'skipping'), volume['name']) + return + + self._ensure_share_mounted(volume['provider_location']) + + mounted_path = self.local_path(volume) + + self._execute('rm', '-f', mounted_path, run_as_root=True) + + def ensure_export(self, ctx, volume): + """Synchronously recreates an export for a logical volume.""" + self._ensure_share_mounted(volume['provider_location']) + + def create_export(self, ctx, volume): + """Exports the volume. Can optionally return a Dictionary of changes + to the volume object to be persisted. + """ + pass + + def remove_export(self, ctx, volume): + """Removes an export for a logical volume.""" + pass + + def delete_snapshot(self, snapshot): + """Do nothing for this driver, but allow manager to handle deletion + of snapshot in error state. + """ + pass + + def _create_sparsed_file(self, path, size): + """Creates file with 0 disk usage.""" + self._execute('truncate', '-s', '%sG' % size, + path, run_as_root=True) + + def _create_regular_file(self, path, size): + """Creates regular file of given size. Takes a lot of time for large + files. + """ + + block_size_mb = 1 + block_count = size * units.GiB / (block_size_mb * units.MiB) + + self._execute('dd', 'if=/dev/zero', 'of=%s' % path, + 'bs=%dM' % block_size_mb, + 'count=%d' % block_count, + run_as_root=True) + + def _create_qcow2_file(self, path, size_gb): + """Creates a QCOW2 file of a given size.""" + + self._execute('qemu-img', 'create', '-f', 'qcow2', + '-o', 'preallocation=metadata', + path, str(size_gb * units.GiB), + run_as_root=True) + + def _set_rw_permissions_for_all(self, path): + """Sets 666 permissions for the path.""" + self._execute('chmod', 'ugo+rw', path, run_as_root=True) + + def local_path(self, volume): + """Get volume path (mounted locally fs path) for given volume + :param volume: volume reference + """ + nfs_share = volume['provider_location'] + return os.path.join(self._get_mount_point_for_share(nfs_share), + volume['name']) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + image_utils.fetch_to_raw(context, + image_service, + image_id, + self.local_path(volume), + self.configuration.volume_dd_blocksize, + size=volume['size']) + + # NOTE (leseb): Set the virtual size of the image + # the raw conversion overwrote the destination file + # (which had the correct size) + # with the fetched glance image size, + # thus the initial 'size' parameter is not honored + # this sets the size to the one asked in the first place by the user + # and then verify the final virtual size + image_utils.resize_image(self.local_path(volume), volume['size']) + + data = image_utils.qemu_img_info(self.local_path(volume)) + virt_size = data.virtual_size / units.GiB + if virt_size != volume['size']: + raise exception.ImageUnacceptable( + image_id=image_id, + reason=(_("Expected volume size was %d") % volume['size']) + + (_(" but size is now %d") % virt_size)) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + image_utils.upload_volume(context, + image_service, + image_meta, + self.local_path(volume)) + + def _read_config_file(self, config_file): + # Returns list of lines in file + with open(config_file) as f: + return f.readlines() + + def _load_shares_config(self, share_file): + self.shares = {} + + for share in self._read_config_file(share_file): + # A configuration line may be either: + # host:/vol_name + # or + # host:/vol_name -o options=123,rw --other + if not share.strip(): + # Skip blank or whitespace-only lines + continue + if share.startswith('#'): + continue + + share_info = share.split(' ', 1) + # results in share_info = + # [ 'address:/vol', '-o options=123,rw --other' ] + + share_address = share_info[0].strip().decode('unicode_escape') + share_opts = share_info[1].strip() if len(share_info) > 1 else None + + if not re.match(r'.+:/.+', share_address): + LOG.warn("Share %s ignored due to invalid format. Must be of " + "form address:/export." % share_address) + continue + + self.shares[share_address] = share_opts + + LOG.debug("shares loaded: %s", self.shares) + + def _get_mount_point_for_share(self, path): + raise NotImplementedError() + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + pass + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, update the stats first. + """ + if refresh or not self._stats: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.volume_backend_name + data['vendor_name'] = 'Open Source' + data['driver_version'] = self.get_version() + data['storage_protocol'] = self.driver_volume_type + + self._ensure_shares_mounted() + + global_capacity = 0 + global_free = 0 + for share in self._mounted_shares: + capacity, free, used = self._get_capacity_info(share) + global_capacity += capacity + global_free += free + + data['total_capacity_gb'] = global_capacity / float(units.GiB) + data['free_capacity_gb'] = global_free / float(units.GiB) + data['reserved_percentage'] = 0 + data['QoS_support'] = False + self._stats = data + + def _do_mount(self, cmd, ensure, share): + """Finalize mount command. + + :param cmd: command to do the actual mount + :param ensure: boolean to allow remounting a share with a warning + :param share: description of the share for error reporting + """ + try: + self._execute(*cmd, run_as_root=True) + except putils.ProcessExecutionError as exc: + if ensure and 'already mounted' in exc.stderr: + LOG.warn(_("%s is already mounted"), share) + else: + raise + + def _get_capacity_info(self, nfs_share): + raise NotImplementedError() + + def _find_share(self, volume_size_in_gib): + raise NotImplementedError() + + def _ensure_share_mounted(self, nfs_share): + raise NotImplementedError() + + def backup_volume(self, context, backup, backup_service): + """Create a new backup from an existing volume.""" + raise NotImplementedError() + + def restore_backup(self, context, backup, volume, backup_service): + """Restore an existing backup to a new or existing volume.""" + raise NotImplementedError() + + +class NfsDriver(RemoteFsDriver): + """NFS based cinder driver. Creates file on NFS share for using it + as block device on hypervisor. + """ + + driver_volume_type = 'nfs' + driver_prefix = 'nfs' + volume_backend_name = 'Generic_NFS' + VERSION = VERSION + + def __init__(self, execute=putils.execute, *args, **kwargs): + self._remotefsclient = None + super(NfsDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(volume_opts) + root_helper = utils.get_root_helper() + # base bound to instance is used in RemoteFsConnector. + self.base = getattr(self.configuration, + 'nfs_mount_point_base', + CONF.nfs_mount_point_base) + opts = getattr(self.configuration, + 'nfs_mount_options', + CONF.nfs_mount_options) + self._remotefsclient = remotefs.RemoteFsClient( + 'nfs', root_helper, execute=execute, + nfs_mount_point_base=self.base, + nfs_mount_options=opts) + + def set_execute(self, execute): + super(NfsDriver, self).set_execute(execute) + if self._remotefsclient: + self._remotefsclient.set_execute(execute) + + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + super(NfsDriver, self).do_setup(context) + + config = self.configuration.nfs_shares_config + if not config: + msg = (_("There's no NFS config file configured (%s)") % + 'nfs_shares_config') + LOG.warn(msg) + raise exception.NfsException(msg) + if not os.path.exists(config): + msg = (_("NFS config file at %(config)s doesn't exist") % + {'config': config}) + LOG.warn(msg) + raise exception.NfsException(msg) + if not self.configuration.nfs_oversub_ratio > 0: + msg = _("NFS config 'nfs_oversub_ratio' invalid. Must be > 0: " + "%s") % self.configuration.nfs_oversub_ratio + + LOG.error(msg) + raise exception.NfsException(msg) + + if ((not self.configuration.nfs_used_ratio > 0) and + (self.configuration.nfs_used_ratio <= 1)): + msg = _("NFS config 'nfs_used_ratio' invalid. Must be > 0 " + "and <= 1.0: %s") % self.configuration.nfs_used_ratio + LOG.error(msg) + raise exception.NfsException(msg) + + self.shares = {} # address : options + + # Check if mount.nfs is installed + try: + self._execute('mount.nfs', check_exit_code=False, run_as_root=True) + except OSError as exc: + if exc.errno == errno.ENOENT: + raise exception.NfsException('mount.nfs is not installed') + else: + raise exc + + def _ensure_share_mounted(self, nfs_share): + mnt_flags = [] + if self.shares.get(nfs_share) is not None: + mnt_flags = self.shares[nfs_share].split() + self._remotefsclient.mount(nfs_share, mnt_flags) + + def _find_share(self, volume_size_in_gib): + """Choose NFS share among available ones for given volume size. + + For instances with more than one share that meets the criteria, the + share with the least "allocated" space will be selected. + + :param volume_size_in_gib: int size in GB + """ + + if not self._mounted_shares: + raise exception.NfsNoSharesMounted() + + target_share = None + target_share_reserved = 0 + + for nfs_share in self._mounted_shares: + if not self._is_share_eligible(nfs_share, volume_size_in_gib): + continue + total_size, total_available, total_allocated = \ + self._get_capacity_info(nfs_share) + if target_share is not None: + if target_share_reserved > total_allocated: + target_share = nfs_share + target_share_reserved = total_allocated + else: + target_share = nfs_share + target_share_reserved = total_allocated + + if target_share is None: + raise exception.NfsNoSuitableShareFound( + volume_size=volume_size_in_gib) + + LOG.debug(_('Selected %s as target nfs share.'), target_share) + + return target_share + + def _is_share_eligible(self, nfs_share, volume_size_in_gib): + """Verifies NFS share is eligible to host volume with given size. + + First validation step: ratio of actual space (used_space / total_space) + is less than 'nfs_used_ratio'. Second validation step: apparent space + allocated (differs from actual space used when using sparse files) + and compares the apparent available + space (total_available * nfs_oversub_ratio) to ensure enough space is + available for the new volume. + + :param nfs_share: nfs share + :param volume_size_in_gib: int size in GB + """ + + used_ratio = self.configuration.nfs_used_ratio + oversub_ratio = self.configuration.nfs_oversub_ratio + requested_volume_size = volume_size_in_gib * units.GiB + + total_size, total_available, total_allocated = \ + self._get_capacity_info(nfs_share) + apparent_size = max(0, total_size * oversub_ratio) + apparent_available = max(0, apparent_size - total_allocated) + used = (total_size - total_available) / total_size + if used > used_ratio: + # NOTE(morganfainberg): We check the used_ratio first since + # with oversubscription it is possible to not have the actual + # available space but be within our oversubscription limit + # therefore allowing this share to still be selected as a valid + # target. + LOG.debug(_('%s is above nfs_used_ratio'), nfs_share) + return False + if apparent_available <= requested_volume_size: + LOG.debug(_('%s is above nfs_oversub_ratio'), nfs_share) + return False + if total_allocated / total_size >= oversub_ratio: + LOG.debug(_('%s reserved space is above nfs_oversub_ratio'), + nfs_share) + return False + return True + + def _get_mount_point_for_share(self, nfs_share): + """Needed by parent class.""" + return self._remotefsclient.get_mount_point(nfs_share) + + def _get_capacity_info(self, nfs_share): + """Calculate available space on the NFS share. + + :param nfs_share: example 172.18.194.100:/var/nfs + """ + + mount_point = self._get_mount_point_for_share(nfs_share) + + df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point, + run_as_root=True) + block_size, blocks_total, blocks_avail = map(float, df.split()) + total_available = block_size * blocks_avail + total_size = block_size * blocks_total + + du, _ = self._execute('du', '-sb', '--apparent-size', '--exclude', + '*snapshot*', mount_point, run_as_root=True) + total_allocated = float(du.split()[0]) + return total_size, total_available, total_allocated + + def _get_mount_point_base(self): + return self.base diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py new file mode 100644 index 0000000000..88236d2f12 --- /dev/null +++ b/cinder/volume/drivers/rbd.py @@ -0,0 +1,846 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""RADOS Block Device Driver""" + +from __future__ import absolute_import +import io +import json +import os +import tempfile +import urllib + +from oslo.config import cfg + +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import fileutils +from cinder.openstack.common import log as logging +from cinder import units +from cinder.volume import driver + +try: + import rados + import rbd +except ImportError: + rados = None + rbd = None + + +LOG = logging.getLogger(__name__) + +rbd_opts = [ + cfg.StrOpt('rbd_pool', + default='rbd', + help='the RADOS pool in which rbd volumes are stored'), + cfg.StrOpt('rbd_user', + default=None, + help='the RADOS client name for accessing rbd volumes ' + '- only set when using cephx authentication'), + cfg.StrOpt('rbd_ceph_conf', + default='', # default determined by librados + help='path to the ceph configuration file to use'), + cfg.BoolOpt('rbd_flatten_volume_from_snapshot', + default=False, + help='flatten volumes created from snapshots to remove ' + 'dependency'), + cfg.StrOpt('rbd_secret_uuid', + default=None, + help='the libvirt uuid of the secret for the rbd_user' + 'volumes'), + cfg.StrOpt('volume_tmp_dir', + default=None, + help='where to store temporary image files if the volume ' + 'driver does not write them directly to the volume'), + cfg.IntOpt('rbd_max_clone_depth', + default=5, + help='maximum number of nested clones that can be taken of a ' + 'volume before enforcing a flatten prior to next clone. ' + 'A value of zero disables cloning')] + +CONF = cfg.CONF +CONF.register_opts(rbd_opts) + + +def ascii_str(string): + """Convert a string to ascii, or return None if the input is None. + + This is useful when a parameter is None by default, or a string. LibRBD + only accepts ascii, hence the need for conversion. + """ + if string is None: + return string + return str(string) + + +class RBDImageMetadata(object): + """RBD image metadata to be used with RBDImageIOWrapper.""" + def __init__(self, image, pool, user, conf): + self.image = image + self.pool = str(pool) + self.user = str(user) + self.conf = str(conf) + + +class RBDImageIOWrapper(io.RawIOBase): + """Enables LibRBD.Image objects to be treated as Python IO objects. + + Calling unimplemented interfaces will raise IOError. + """ + + def __init__(self, rbd_meta): + super(RBDImageIOWrapper, self).__init__() + self._rbd_meta = rbd_meta + self._offset = 0 + + def _inc_offset(self, length): + self._offset += length + + @property + def rbd_image(self): + return self._rbd_meta.image + + @property + def rbd_user(self): + return self._rbd_meta.user + + @property + def rbd_pool(self): + return self._rbd_meta.pool + + @property + def rbd_conf(self): + return self._rbd_meta.conf + + def read(self, length=None): + offset = self._offset + total = self._rbd_meta.image.size() + + # NOTE(dosaboy): posix files do not barf if you read beyond their + # length (they just return nothing) but rbd images do so we need to + # return empty string if we have reached the end of the image. + if (offset >= total): + return '' + + if length is None: + length = total + + if (offset + length) > total: + length = total - offset + + self._inc_offset(length) + return self._rbd_meta.image.read(int(offset), int(length)) + + def write(self, data): + self._rbd_meta.image.write(data, self._offset) + self._inc_offset(len(data)) + + def seekable(self): + return True + + def seek(self, offset, whence=0): + if whence == 0: + new_offset = offset + elif whence == 1: + new_offset = self._offset + offset + elif whence == 2: + new_offset = self._rbd_meta.image.size() + new_offset += offset + else: + raise IOError(_("Invalid argument - whence=%s not supported") % + (whence)) + + if (new_offset < 0): + raise IOError(_("Invalid argument")) + + self._offset = new_offset + + def tell(self): + return self._offset + + def flush(self): + try: + self._rbd_meta.image.flush() + except AttributeError: + LOG.warning(_("flush() not supported in this version of librbd")) + + def fileno(self): + """RBD does not have support for fileno() so we raise IOError. + + Raising IOError is recommended way to notify caller that interface is + not supported - see http://docs.python.org/2/library/io.html#io.IOBase + """ + raise IOError(_("fileno() not supported by RBD()")) + + # NOTE(dosaboy): if IO object is not closed explicitly, Python auto closes + # it which, if this is not overridden, calls flush() prior to close which + # in this case is unwanted since the rbd image may have been closed prior + # to the autoclean - currently triggering a segfault in librbd. + def close(self): + pass + + +class RBDVolumeProxy(object): + """Context manager for dealing with an existing rbd volume. + + This handles connecting to rados and opening an ioctx automatically, and + otherwise acts like a librbd Image object. + + The underlying librados client and ioctx can be accessed as the attributes + 'client' and 'ioctx'. + """ + def __init__(self, driver, name, pool=None, snapshot=None, + read_only=False): + client, ioctx = driver._connect_to_rados(pool) + try: + self.volume = driver.rbd.Image(ioctx, str(name), + snapshot=ascii_str(snapshot), + read_only=read_only) + except driver.rbd.Error: + LOG.exception(_("error opening rbd image %s"), name) + driver._disconnect_from_rados(client, ioctx) + raise + self.driver = driver + self.client = client + self.ioctx = ioctx + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + try: + self.volume.close() + finally: + self.driver._disconnect_from_rados(self.client, self.ioctx) + + def __getattr__(self, attrib): + return getattr(self.volume, attrib) + + +class RADOSClient(object): + """Context manager to simplify error handling for connecting to ceph.""" + def __init__(self, driver, pool=None): + self.driver = driver + self.cluster, self.ioctx = driver._connect_to_rados(pool) + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + self.driver._disconnect_from_rados(self.cluster, self.ioctx) + + +class RBDDriver(driver.VolumeDriver): + """Implements RADOS block device (RBD) volume commands.""" + + VERSION = '1.1.0' + + def __init__(self, *args, **kwargs): + super(RBDDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(rbd_opts) + self._stats = {} + # allow overrides for testing + self.rados = kwargs.get('rados', rados) + self.rbd = kwargs.get('rbd', rbd) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + if rados is None: + msg = _('rados and rbd python libraries not found') + raise exception.VolumeBackendAPIException(data=msg) + try: + with RADOSClient(self): + pass + except self.rados.Error: + msg = _('error connecting to ceph cluster') + LOG.exception(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _ceph_args(self): + args = [] + if self.configuration.rbd_user: + args.extend(['--id', self.configuration.rbd_user]) + if self.configuration.rbd_ceph_conf: + args.extend(['--conf', self.configuration.rbd_ceph_conf]) + return args + + def _connect_to_rados(self, pool=None): + ascii_user = ascii_str(self.configuration.rbd_user) + ascii_conf = ascii_str(self.configuration.rbd_ceph_conf) + client = self.rados.Rados(rados_id=ascii_user, conffile=ascii_conf) + try: + client.connect() + pool_to_open = str(pool or self.configuration.rbd_pool) + ioctx = client.open_ioctx(pool_to_open) + return client, ioctx + except self.rados.Error: + # shutdown cannot raise an exception + client.shutdown() + raise + + def _disconnect_from_rados(self, client, ioctx): + # closing an ioctx cannot raise an exception + ioctx.close() + client.shutdown() + + def _get_backup_snaps(self, rbd_image): + """Get list of any backup snapshots that exist on this volume. + + There should only ever be one but accept all since they need to be + deleted before the volume can be. + """ + # NOTE(dosaboy): we do the import here otherwise we get import conflict + # issues between the rbd driver and the ceph backup driver. These + # issues only seem to occur when NOT using them together and are + # triggered when the ceph backup driver imports the rbd volume driver. + from cinder.backup.drivers import ceph + return ceph.CephBackupDriver.get_backup_snaps(rbd_image) + + def _get_mon_addrs(self): + args = ['ceph', 'mon', 'dump', '--format=json'] + args.extend(self._ceph_args()) + out, _ = self._execute(*args) + lines = out.split('\n') + if lines[0].startswith('dumped monmap epoch'): + lines = lines[1:] + monmap = json.loads('\n'.join(lines)) + addrs = [mon['addr'] for mon in monmap['mons']] + hosts = [] + ports = [] + for addr in addrs: + host_port = addr[:addr.rindex('/')] + host, port = host_port.rsplit(':', 1) + hosts.append(host.strip('[]')) + ports.append(port) + return hosts, ports + + def _update_volume_stats(self): + stats = { + 'vendor_name': 'Open Source', + 'driver_version': self.VERSION, + 'storage_protocol': 'ceph', + 'total_capacity_gb': 'unknown', + 'free_capacity_gb': 'unknown', + 'reserved_percentage': 0, + } + backend_name = self.configuration.safe_get('volume_backend_name') + stats['volume_backend_name'] = backend_name or 'RBD' + + try: + with RADOSClient(self) as client: + new_stats = client.cluster.get_cluster_stats() + stats['total_capacity_gb'] = new_stats['kb'] / units.MiB + stats['free_capacity_gb'] = new_stats['kb_avail'] / units.MiB + except self.rados.Error: + # just log and return unknown capacities + LOG.exception(_('error refreshing volume stats')) + self._stats = stats + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. + + If 'refresh' is True, run the update first. + """ + if refresh: + self._update_volume_stats() + return self._stats + + def _supports_layering(self): + return hasattr(self.rbd, 'RBD_FEATURE_LAYERING') + + def _get_clone_depth(self, client, volume_name, depth=0): + """Returns the number of ancestral clones (if any) of the given volume. + """ + parent_volume = self.rbd.Image(client.ioctx, volume_name) + try: + pool, parent, snap = self._get_clone_info(parent_volume, + volume_name) + finally: + parent_volume.close() + + if not parent: + return depth + + # If clone depth was reached, flatten should have occurred so if it has + # been exceeded then something has gone wrong. + if depth > CONF.rbd_max_clone_depth: + raise Exception(_("clone depth exceeds limit of %s") % + (CONF.rbd_max_clone_depth)) + + return self._get_clone_depth(client, parent, depth + 1) + + def create_cloned_volume(self, volume, src_vref): + """Create a cloned volume from another volume. + + Since we are cloning from a volume and not a snapshot, we must first + create a snapshot of the source volume. + + The user has the option to limit how long a volume's clone chain can be + by setting rbd_max_clone_depth. If a clone is made of another clone + and that clone has rbd_max_clone_depth clones behind it, the source + volume will be flattened. + """ + src_name = str(src_vref['name']) + dest_name = str(volume['name']) + flatten_parent = False + + # Do full copy if requested + if CONF.rbd_max_clone_depth <= 0: + with RBDVolumeProxy(self, src_name, read_only=True) as vol: + vol.copy(vol.ioctx, dest_name) + + return + + # Otherwise do COW clone. + with RADOSClient(self) as client: + depth = self._get_clone_depth(client, src_name) + # If source volume is a clone and rbd_max_clone_depth reached, + # flatten the source before cloning. Zero rbd_max_clone_depth means + # infinite is allowed. + if depth == CONF.rbd_max_clone_depth: + LOG.debug(_("maximum clone depth (%d) has been reached - " + "flattening source volume") % + (CONF.rbd_max_clone_depth)) + flatten_parent = True + + src_volume = self.rbd.Image(client.ioctx, src_name) + try: + # First flatten source volume if required. + if flatten_parent: + pool, parent, snap = self._get_clone_info(src_volume, + src_name) + # Flatten source volume + LOG.debug(_("flattening source volume %s") % (src_name)) + src_volume.flatten() + # Delete parent clone snap + parent_volume = self.rbd.Image(client.ioctx, parent) + try: + parent_volume.unprotect_snap(snap) + parent_volume.remove_snap(snap) + finally: + parent_volume.close() + + # Create new snapshot of source volume + clone_snap = "%s.clone_snap" % dest_name + LOG.debug(_("creating snapshot='%s'") % (clone_snap)) + src_volume.create_snap(clone_snap) + src_volume.protect_snap(clone_snap) + except Exception as exc: + # Only close if exception since we still need it. + src_volume.close() + raise exc + + # Now clone source volume snapshot + try: + LOG.debug(_("cloning '%(src_vol)s@%(src_snap)s' to " + "'%(dest)s'") % + {'src_vol': src_name, 'src_snap': clone_snap, + 'dest': dest_name}) + self.rbd.RBD().clone(client.ioctx, src_name, clone_snap, + client.ioctx, dest_name, + features=self.rbd.RBD_FEATURE_LAYERING) + except Exception as exc: + src_volume.unprotect_snap(clone_snap) + src_volume.remove_snap(clone_snap) + raise exc + finally: + src_volume.close() + + LOG.debug(_("clone created successfully")) + + def create_volume(self, volume): + """Creates a logical volume.""" + if int(volume['size']) == 0: + size = 100 * units.MiB + else: + size = int(volume['size']) * units.GiB + + LOG.debug(_("creating volume '%s'") % (volume['name'])) + + old_format = True + features = 0 + if self._supports_layering(): + old_format = False + features = self.rbd.RBD_FEATURE_LAYERING + + with RADOSClient(self) as client: + self.rbd.RBD().create(client.ioctx, + str(volume['name']), + size, + old_format=old_format, + features=features) + + def _flatten(self, pool, volume_name): + LOG.debug(_('flattening %(pool)s/%(img)s') % + dict(pool=pool, img=volume_name)) + with RBDVolumeProxy(self, volume_name, pool) as vol: + vol.flatten() + + def _clone(self, volume, src_pool, src_image, src_snap): + LOG.debug(_('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s') % + dict(pool=src_pool, img=src_image, snap=src_snap, + dst=volume['name'])) + with RADOSClient(self, src_pool) as src_client: + with RADOSClient(self) as dest_client: + self.rbd.RBD().clone(src_client.ioctx, + str(src_image), + str(src_snap), + dest_client.ioctx, + str(volume['name']), + features=self.rbd.RBD_FEATURE_LAYERING) + + def _resize(self, volume, **kwargs): + size = kwargs.get('size', None) + if not size: + size = int(volume['size']) * units.GiB + + with RBDVolumeProxy(self, volume['name']) as vol: + vol.resize(size) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self._clone(volume, self.configuration.rbd_pool, + snapshot['volume_name'], snapshot['name']) + if self.configuration.rbd_flatten_volume_from_snapshot: + self._flatten(self.configuration.rbd_pool, volume['name']) + if int(volume['size']): + self._resize(volume) + + def _delete_backup_snaps(self, client, volume_name): + rbd_image = self.rbd.Image(client.ioctx, volume_name) + try: + backup_snaps = self._get_backup_snaps(rbd_image) + if backup_snaps: + for snap in backup_snaps: + rbd_image.remove_snap(snap['name']) + else: + LOG.debug(_("volume has no backup snaps")) + finally: + rbd_image.close() + + def _get_clone_info(self, volume, volume_name, snap=None): + """If volume is a clone, return its parent info. + + Returns a tuple of (pool, parent, snap). A snapshot may optionally be + provided for the case where a cloned volume has been flattened but it's + snapshot still depends on the parent. + """ + try: + snap and volume.set_snap(snap) + pool, parent, parent_snap = tuple(volume.parent_info()) + snap and volume.set_snap(None) + # Strip the tag off the end of the volume name since it will not be + # in the snap name. + if volume_name.endswith('.deleted'): + volume_name = volume_name[:-len('.deleted')] + # Now check the snap name matches. + if parent_snap == "%s.clone_snap" % volume_name: + return pool, parent, parent_snap + except self.rbd.ImageNotFound: + LOG.debug(_("volume %s is not a clone") % volume_name) + volume.set_snap(None) + + return (None, None, None) + + def _delete_clone_parent_refs(self, client, parent_name, parent_snap): + """Walk back up the clone chain and delete references. + + Deletes references i.e. deleted parent volumes and snapshots. + """ + parent_rbd = self.rbd.Image(client.ioctx, parent_name) + parent_has_snaps = False + try: + # Check for grandparent + _pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd, + parent_name, + parent_snap) + + LOG.debug(_("deleting parent snapshot %s") % (parent_snap)) + parent_rbd.unprotect_snap(parent_snap) + parent_rbd.remove_snap(parent_snap) + + parent_has_snaps = bool(list(parent_rbd.list_snaps())) + finally: + parent_rbd.close() + + # If parent has been deleted in Cinder, delete the silent reference and + # keep walking up the chain if it is itself a clone. + if (not parent_has_snaps) and parent_name.endswith('.deleted'): + LOG.debug(_("deleting parent %s") % (parent_name)) + self.rbd.RBD().remove(client.ioctx, parent_name) + + # Now move up to grandparent if there is one + if g_parent: + self._delete_clone_parent_refs(client, g_parent, g_parent_snap) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + volume_name = str(volume['name']) + with RADOSClient(self) as client: + try: + rbd_image = self.rbd.Image(client.ioctx, volume_name) + except self.rbd.ImageNotFound: + LOG.info(_("volume %s no longer exists in backend") + % (volume_name)) + return + + clone_snap = None + parent = None + + # Ensure any backup snapshots are deleted + self._delete_backup_snaps(client, volume_name) + + # If the volume has non-clone snapshots this delete is expected to + # raise VolumeIsBusy so do so straight away. + try: + snaps = rbd_image.list_snaps() + for snap in snaps: + if snap['name'].endswith('.clone_snap'): + LOG.debug(_("volume has clone snapshot(s)")) + # We grab one of these and use it when fetching parent + # info in case the volume has been flattened. + clone_snap = snap['name'] + break + + raise exception.VolumeIsBusy(volume_name=volume_name) + + # Determine if this volume is itself a clone + pool, parent, parent_snap = self._get_clone_info(rbd_image, + volume_name, + clone_snap) + finally: + rbd_image.close() + + if clone_snap is None: + LOG.debug(_("deleting rbd volume %s") % (volume_name)) + try: + self.rbd.RBD().remove(client.ioctx, volume_name) + except self.rbd.ImageBusy: + msg = (_("ImageBusy error raised while deleting rbd " + "volume. This may have been caused by a " + "connection from a client that has crashed and, " + "if so, may be resolved by retrying the delete " + "after 30 seconds has elapsed.")) + LOG.error(msg) + # Now raise this so that volume stays available so that we + # delete can be retried. + raise exception.VolumeIsBusy(msg, volume_name=volume_name) + + # If it is a clone, walk back up the parent chain deleting + # references. + if parent: + LOG.debug(_("volume is a clone so cleaning references")) + self._delete_clone_parent_refs(client, parent, parent_snap) + else: + # If the volume has copy-on-write clones we will not be able to + # delete it. Instead we will keep it as a silent volume which + # will be deleted when it's snapshot and clones are deleted. + new_name = "%s.deleted" % (volume_name) + self.rbd.RBD().rename(client.ioctx, volume_name, new_name) + + def create_snapshot(self, snapshot): + """Creates an rbd snapshot.""" + with RBDVolumeProxy(self, snapshot['volume_name']) as volume: + snap = str(snapshot['name']) + volume.create_snap(snap) + if self._supports_layering(): + volume.protect_snap(snap) + + def delete_snapshot(self, snapshot): + """Deletes an rbd snapshot.""" + with RBDVolumeProxy(self, snapshot['volume_name']) as volume: + snap = str(snapshot['name']) + if self._supports_layering(): + try: + volume.unprotect_snap(snap) + except self.rbd.ImageBusy: + raise exception.SnapshotIsBusy(snapshot_name=snap) + volume.remove_snap(snap) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume.""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + pass + + def initialize_connection(self, volume, connector): + hosts, ports = self._get_mon_addrs() + data = { + 'driver_volume_type': 'rbd', + 'data': { + 'name': '%s/%s' % (self.configuration.rbd_pool, + volume['name']), + 'hosts': hosts, + 'ports': ports, + 'auth_enabled': (self.configuration.rbd_user is not None), + 'auth_username': self.configuration.rbd_user, + 'secret_type': 'ceph', + 'secret_uuid': self.configuration.rbd_secret_uuid, } + } + LOG.debug(_('connection data: %s'), data) + return data + + def terminate_connection(self, volume, connector, **kwargs): + pass + + def _parse_location(self, location): + prefix = 'rbd://' + if not location.startswith(prefix): + reason = _('Not stored in rbd') + raise exception.ImageUnacceptable(image_id=location, reason=reason) + pieces = map(urllib.unquote, location[len(prefix):].split('/')) + if any(map(lambda p: p == '', pieces)): + reason = _('Blank components') + raise exception.ImageUnacceptable(image_id=location, reason=reason) + if len(pieces) != 4: + reason = _('Not an rbd snapshot') + raise exception.ImageUnacceptable(image_id=location, reason=reason) + return pieces + + def _get_fsid(self): + with RADOSClient(self) as client: + return client.cluster.get_fsid() + + def _is_cloneable(self, image_location, image_meta): + try: + fsid, pool, image, snapshot = self._parse_location(image_location) + except exception.ImageUnacceptable as e: + LOG.debug(_('not cloneable: %s'), e) + return False + + if self._get_fsid() != fsid: + reason = _('%s is in a different ceph cluster') % image_location + LOG.debug(reason) + return False + + if image_meta['disk_format'] != 'raw': + reason = _("rbd image clone requires image format to be " + "'raw' but image {0} is '{1}'").format( + image_location, image_meta['disk_format']) + LOG.debug(reason) + return False + + # check that we can read the image + try: + with RBDVolumeProxy(self, image, + pool=pool, + snapshot=snapshot, + read_only=True): + return True + except self.rbd.Error as e: + LOG.debug(_('Unable to open image %(loc)s: %(err)s') % + dict(loc=image_location, err=e)) + return False + + def clone_image(self, volume, image_location, image_id, image_meta): + image_location = image_location[0] if image_location else None + if image_location is None or not self._is_cloneable( + image_location, image_meta): + return ({}, False) + prefix, pool, image, snapshot = self._parse_location(image_location) + self._clone(volume, pool, image, snapshot) + self._resize(volume) + return {'provider_location': None}, True + + def _ensure_tmp_exists(self): + tmp_dir = self.configuration.volume_tmp_dir + if tmp_dir and not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + self._ensure_tmp_exists() + tmp_dir = self.configuration.volume_tmp_dir + + with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp: + image_utils.fetch_to_raw(context, image_service, image_id, + tmp.name, + self.configuration.volume_dd_blocksize, + size=volume['size']) + + self.delete_volume(volume) + + # keep using the command line import instead of librbd since it + # detects zeroes to preserve sparseness in the image + args = ['rbd', 'import', + '--pool', self.configuration.rbd_pool, + tmp.name, volume['name']] + if self._supports_layering(): + args.append('--new-format') + args.extend(self._ceph_args()) + self._try_execute(*args) + self._resize(volume) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + self._ensure_tmp_exists() + + tmp_dir = self.configuration.volume_tmp_dir or '/tmp' + tmp_file = os.path.join(tmp_dir, + volume['name'] + '-' + image_meta['id']) + with fileutils.remove_path_on_error(tmp_file): + args = ['rbd', 'export', + '--pool', self.configuration.rbd_pool, + volume['name'], tmp_file] + args.extend(self._ceph_args()) + self._try_execute(*args) + image_utils.upload_volume(context, image_service, + image_meta, tmp_file) + os.unlink(tmp_file) + + def backup_volume(self, context, backup, backup_service): + """Create a new backup from an existing volume.""" + volume = self.db.volume_get(context, backup['volume_id']) + pool = self.configuration.rbd_pool + + with RBDVolumeProxy(self, volume['name'], pool) as rbd_image: + rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool, + self.configuration.rbd_user, + self.configuration.rbd_ceph_conf) + rbd_fd = RBDImageIOWrapper(rbd_meta) + backup_service.backup(backup, rbd_fd) + + LOG.debug(_("volume backup complete.")) + + def restore_backup(self, context, backup, volume, backup_service): + """Restore an existing backup to a new or existing volume.""" + pool = self.configuration.rbd_pool + + with RBDVolumeProxy(self, volume['name'], pool) as rbd_image: + rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool, + self.configuration.rbd_user, + self.configuration.rbd_ceph_conf) + rbd_fd = RBDImageIOWrapper(rbd_meta) + backup_service.restore(backup, volume['id'], rbd_fd) + + LOG.debug(_("volume restore complete.")) + + def extend_volume(self, volume, new_size): + """Extend an existing volume.""" + old_size = volume['size'] + + try: + size = int(new_size) * units.GiB + self._resize(volume, size=size) + except Exception: + msg = _('Failed to Extend Volume ' + '%(volname)s') % {'volname': volume['name']} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_("Extend volume from %(old_size)s GB to %(new_size)s GB."), + {'old_size': old_size, 'new_size': new_size}) diff --git a/cinder/volume/drivers/san/__init__.py b/cinder/volume/drivers/san/__init__.py new file mode 100644 index 0000000000..bfc9cda666 --- /dev/null +++ b/cinder/volume/drivers/san/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.volume.san` -- Cinder San Drivers +===================================================== + +.. automodule:: cinder.volume.san + :platform: Unix + :synopsis: Module containing all the Cinder San drivers. +""" + +# Adding imports for backwards compatibility in loading volume_driver. +from hp_lefthand import HpSanISCSIDriver # noqa +from san import SanISCSIDriver # noqa +from solaris import SolarisISCSIDriver # noqa diff --git a/cinder/volume/drivers/san/hp/__init__.py b/cinder/volume/drivers/san/hp/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/drivers/san/hp/hp_3par_common.py b/cinder/volume/drivers/san/hp/hp_3par_common.py new file mode 100644 index 0000000000..d32271c8a3 --- /dev/null +++ b/cinder/volume/drivers/san/hp/hp_3par_common.py @@ -0,0 +1,1124 @@ +# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +""" +Volume driver common utilities for HP 3PAR Storage array + +The 3PAR drivers requires 3.1.2 MU3 firmware on the 3PAR array. + +You will need to install the python hp3parclient. +sudo pip install hp3parclient + +The drivers uses both the REST service and the SSH +command line to correctly operate. Since the +ssh credentials and the REST credentials can be different +we need to have settings for both. + +The drivers requires the use of the san_ip, san_login, +san_password settings for ssh connections into the 3PAR +array. It also requires the setting of +hp3par_api_url, hp3par_username, hp3par_password +for credentials to talk to the REST service on the 3PAR +array. +""" + +import ast +import base64 +import json +import pprint +from random import randint +import re +import uuid + +from eventlet import greenthread +import hp3parclient +from hp3parclient import client +from hp3parclient import exceptions as hpexceptions +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import utils +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + +MIN_CLIENT_VERSION = '2.0.0' + +hp3par_opts = [ + cfg.StrOpt('hp3par_api_url', + default='', + help="3PAR WSAPI Server Url like " + "https://<3par ip>:8080/api/v1"), + cfg.StrOpt('hp3par_username', + default='', + help="3PAR Super user username"), + cfg.StrOpt('hp3par_password', + default='', + help="3PAR Super user password", + secret=True), + cfg.StrOpt('hp3par_cpg', + default="OpenStack", + help="The CPG to use for volume creation"), + cfg.StrOpt('hp3par_cpg_snap', + default="", + help="The CPG to use for Snapshots for volumes. " + "If empty hp3par_cpg will be used"), + cfg.StrOpt('hp3par_snapshot_retention', + default="", + help="The time in hours to retain a snapshot. " + "You can't delete it before this expires."), + cfg.StrOpt('hp3par_snapshot_expiration', + default="", + help="The time in hours when a snapshot expires " + " and is deleted. This must be larger than expiration"), + cfg.BoolOpt('hp3par_debug', + default=False, + help="Enable HTTP debugging to 3PAR"), + cfg.ListOpt('hp3par_iscsi_ips', + default=[], + help="List of target iSCSI addresses to use.") +] + + +CONF = cfg.CONF +CONF.register_opts(hp3par_opts) + + +class HP3PARCommon(object): + """Class that contains common code for the 3PAR drivers. + + Version history: + 1.2.0 - Updated hp3parclient API use to 2.0.x + 1.2.1 - Check that the VVS exists + 1.2.2 - log prior to raising exceptions + 1.2.3 - Methods to update key/value pair bug #1258033 + 1.2.4 - Remove deprecated config option hp3par_domain + 1.2.5 - Raise Ex when deleting snapshot with dependencies bug #1250249 + 1.2.6 - Allow optional specifying n:s:p for vlun creation bug #1269515 + This update now requires 3.1.2 MU3 firmware + + """ + + VERSION = "1.2.6" + + stats = {} + + # TODO(Ramy): move these to the 3PAR Client + VLUN_TYPE_EMPTY = 1 + VLUN_TYPE_PORT = 2 + VLUN_TYPE_HOST = 3 + VLUN_TYPE_MATCHED_SET = 4 + VLUN_TYPE_HOST_SET = 5 + + # Valid values for volume type extra specs + # The first value in the list is the default value + valid_prov_values = ['thin', 'full'] + valid_persona_values = ['1 - Generic', + '2 - Generic-ALUA', + '6 - Generic-legacy', + '7 - HPUX-legacy', + '8 - AIX-legacy', + '9 - EGENERA', + '10 - ONTAP-legacy', + '11 - VMware', + '12 - OpenVMS'] + hp_qos_keys = ['maxIOPS', 'maxBWS'] + hp3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs'] + + def __init__(self, config): + self.sshpool = None + self.config = config + self.hosts_naming_dict = dict() + self.client = None + + def get_version(self): + return self.VERSION + + def check_flags(self, options, required_flags): + for flag in required_flags: + if not getattr(options, flag, None): + msg = _('%s is not set') % flag + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + def _create_client(self): + cl = client.HP3ParClient(self.config.hp3par_api_url) + client_version = hp3parclient.version + + if (client_version < MIN_CLIENT_VERSION): + ex_msg = (_('Invalid hp3parclient version. Version %s or greater ' + 'required.') % MIN_CLIENT_VERSION) + LOG.error(ex_msg) + raise exception.InvalidInput(reason=ex_msg) + + return cl + + def client_login(self): + try: + LOG.debug("Connecting to 3PAR") + self.client.login(self.config.hp3par_username, + self.config.hp3par_password) + except hpexceptions.HTTPUnauthorized as ex: + msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") % + {'url': self.config.hp3par_api_url, 'err': str(ex)}) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + def client_logout(self): + self.client.logout() + LOG.debug("Disconnect from 3PAR") + + def do_setup(self, context): + try: + self.client = self._create_client() + except hpexceptions.UnsupportedVersion as ex: + raise exception.InvalidInput(str(ex)) + LOG.info(_("HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s") + % {"common_ver": self.VERSION, + "rest_ver": hp3parclient.get_version_string()}) + if self.config.hp3par_debug: + self.client.debug_rest(True) + + self.client_login() + + try: + # make sure the default CPG exists + self.validate_cpg(self.config.hp3par_cpg) + self._set_connections() + finally: + self.client_logout() + + def validate_cpg(self, cpg_name): + try: + cpg = self.client.getCPG(cpg_name) + except hpexceptions.HTTPNotFound as ex: + err = (_("CPG (%s) doesn't exist on array") % cpg_name) + LOG.error(err) + raise exception.InvalidInput(reason=err) + + def _set_connections(self): + """Set the number of concurrent connections. + + The 3PAR WS API server has a limit of concurrent connections. + This is setting the number to the highest allowed, 15 connections. + """ + self._cli_run(['setwsapi', '-sru', 'high']) + + def get_domain(self, cpg_name): + try: + cpg = self.client.getCPG(cpg_name) + except hpexceptions.HTTPNotFound: + err = (_("Failed to get domain because CPG (%s) doesn't " + "exist on array.") % cpg_name) + LOG.error(err) + raise exception.InvalidInput(reason=err) + + if 'domain' in cpg: + return cpg['domain'] + return None + + def extend_volume(self, volume, new_size): + volume_name = self._get_3par_vol_name(volume['id']) + old_size = volume.size + growth_size = int(new_size) - old_size + LOG.debug("Extending Volume %s from %s to %s, by %s GB." % + (volume_name, old_size, new_size, growth_size)) + try: + self._cli_run(['growvv', '-f', volume_name, '%dg' % growth_size]) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_("Error extending volume %s") % volume) + + def _get_3par_vol_name(self, volume_id): + """Get converted 3PAR volume name. + + Converts the openstack volume id from + ecffc30f-98cb-4cf5-85ee-d7309cc17cd2 + to + osv-7P.DD5jLTPWF7tcwnMF80g + + We convert the 128 bits of the uuid into a 24character long + base64 encoded string to ensure we don't exceed the maximum + allowed 31 character name limit on 3Par + + We strip the padding '=' and replace + with . + and / with - + """ + volume_name = self._encode_name(volume_id) + return "osv-%s" % volume_name + + def _get_3par_snap_name(self, snapshot_id): + snapshot_name = self._encode_name(snapshot_id) + return "oss-%s" % snapshot_name + + def _get_3par_vvs_name(self, volume_id): + vvs_name = self._encode_name(volume_id) + return "vvs-%s" % vvs_name + + def _encode_name(self, name): + uuid_str = name.replace("-", "") + vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) + vol_encoded = base64.b64encode(vol_uuid.bytes) + + # 3par doesn't allow +, nor / + vol_encoded = vol_encoded.replace('+', '.') + vol_encoded = vol_encoded.replace('/', '-') + # strip off the == as 3par doesn't like those. + vol_encoded = vol_encoded.replace('=', '') + return vol_encoded + + def _capacity_from_size(self, vol_size): + + # because 3PAR volume sizes are in + # Mebibytes, Gigibytes, not Megabytes. + MB = 1000L + MiB = 1.048576 + + if int(vol_size) == 0: + capacity = MB # default: 1GB + else: + capacity = vol_size * MB + + capacity = int(round(capacity / MiB)) + return capacity + + def _cli_run(self, cmd): + """Runs a CLI command over SSH, without doing any result parsing.""" + LOG.debug("SSH CMD = %s " % cmd) + + (stdout, stderr) = self._run_ssh(cmd, False) + # we have to strip out the input and exit lines + tmp = stdout.split("\r\n") + out = tmp[5:len(tmp) - 2] + return out + + def _ssh_execute(self, ssh, cmd, check_exit_code=True): + """We have to do this in order to get CSV output from the CLI command. + + We first have to issue a command to tell the CLI that we want the + output to be formatted in CSV, then we issue the real command. + """ + LOG.debug(_('Running cmd (SSH): %s'), cmd) + + channel = ssh.invoke_shell() + stdin_stream = channel.makefile('wb') + stdout_stream = channel.makefile('rb') + stderr_stream = channel.makefile('rb') + + stdin_stream.write('''setclienv csvtable 1 +%s +exit +''' % cmd) + + # stdin.write('process_input would go here') + # stdin.flush() + + # NOTE(justinsb): This seems suspicious... + # ...other SSH clients have buffering issues with this approach + stdout = stdout_stream.read() + stderr = stderr_stream.read() + stdin_stream.close() + stdout_stream.close() + stderr_stream.close() + + exit_status = channel.recv_exit_status() + + # exit_status == -1 if no exit code was returned + if exit_status != -1: + LOG.debug(_('Result was %s') % exit_status) + if check_exit_code and exit_status != 0: + msg = _("command %s failed") % cmd + LOG.error(msg) + raise processutils.ProcessExecutionError(exit_code=exit_status, + stdout=stdout, + stderr=stderr, + cmd=cmd) + channel.close() + return (stdout, stderr) + + def _run_ssh(self, cmd_list, check_exit=True, attempts=1): + utils.check_ssh_injection(cmd_list) + command = ' '. join(cmd_list) + + if not self.sshpool: + self.sshpool = utils.SSHPool(self.config.san_ip, + self.config.san_ssh_port, + self.config.ssh_conn_timeout, + self.config.san_login, + password=self.config.san_password, + privatekey= + self.config.san_private_key, + min_size= + self.config.ssh_min_pool_conn, + max_size= + self.config.ssh_max_pool_conn) + try: + total_attempts = attempts + with self.sshpool.item() as ssh: + while attempts > 0: + attempts -= 1 + try: + return self._ssh_execute(ssh, command, + check_exit_code=check_exit) + except Exception as e: + LOG.error(e) + greenthread.sleep(randint(20, 500) / 100.0) + msg = (_("SSH Command failed after '%(total_attempts)r' " + "attempts : '%(command)s'") % + {'total_attempts': total_attempts, 'command': command}) + LOG.error(msg) + raise exception.CinderException(message=msg) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_("Error running ssh command: %s") % command) + + def _delete_3par_host(self, hostname): + self.client.deleteHost(hostname) + + def _create_3par_vlun(self, volume, hostname, nsp): + try: + if nsp is None: + self.client.createVLUN(volume, hostname=hostname, auto=True) + else: + port = self.build_portPos(nsp) + self.client.createVLUN(volume, hostname=hostname, auto=True, + portPos=port) + + except hpexceptions.HTTPBadRequest as e: + if 'must be in the same domain' in e.get_description(): + LOG.error(e.get_description()) + raise exception.Invalid3PARDomain(err=e.get_description()) + + def _safe_hostname(self, hostname): + """We have to use a safe hostname length for 3PAR host names.""" + try: + index = hostname.index('.') + except ValueError: + # couldn't find it + index = len(hostname) + + # we'll just chop this off for now. + if index > 23: + index = 23 + + return hostname[:index] + + def _get_3par_host(self, hostname): + return self.client.getHost(hostname) + + def get_ports(self): + return self.client.getPorts() + + def get_active_target_ports(self): + ports = self.get_ports() + target_ports = [] + for port in ports['members']: + if ( + port['mode'] == self.client.PORT_MODE_TARGET and + port['linkState'] == self.client.PORT_STATE_READY + ): + port['nsp'] = self.build_nsp(port['portPos']) + target_ports.append(port) + + return target_ports + + def get_active_fc_target_ports(self): + ports = self.get_active_target_ports() + fc_ports = [] + for port in ports: + if port['protocol'] == self.client.PORT_PROTO_FC: + fc_ports.append(port) + + return fc_ports + + def get_active_iscsi_target_ports(self): + ports = self.get_active_target_ports() + iscsi_ports = [] + for port in ports: + if port['protocol'] == self.client.PORT_PROTO_ISCSI: + iscsi_ports.append(port) + + return iscsi_ports + + def get_volume_stats(self, refresh): + if refresh: + self._update_volume_stats() + + return self.stats + + def _update_volume_stats(self): + # const to convert MiB to GB + const = 0.0009765625 + + # storage_protocol and volume_backend_name are + # set in the child classes + stats = {'driver_version': '1.0', + 'free_capacity_gb': 'unknown', + 'reserved_percentage': 0, + 'storage_protocol': None, + 'total_capacity_gb': 'unknown', + 'QoS_support': True, + 'vendor_name': 'Hewlett-Packard', + 'volume_backend_name': None} + + try: + cpg = self.client.getCPG(self.config.hp3par_cpg) + if 'limitMiB' not in cpg['SDGrowth']: + total_capacity = 'infinite' + free_capacity = 'infinite' + else: + total_capacity = int(cpg['SDGrowth']['limitMiB'] * const) + free_capacity = int((cpg['SDGrowth']['limitMiB'] - + cpg['UsrUsage']['usedMiB']) * const) + + stats['total_capacity_gb'] = total_capacity + stats['free_capacity_gb'] = free_capacity + except hpexceptions.HTTPNotFound: + err = (_("CPG (%s) doesn't exist on array") + % self.config.hp3par_cpg) + LOG.error(err) + raise exception.InvalidInput(reason=err) + + self.stats = stats + + def create_vlun(self, volume, host, nsp=None): + """Create a VLUN. + + In order to export a volume on a 3PAR box, we have to create a VLUN. + """ + volume_name = self._get_3par_vol_name(volume['id']) + self._create_3par_vlun(volume_name, host['name'], nsp) + return self.client.getVLUN(volume_name) + + def delete_vlun(self, volume, hostname): + volume_name = self._get_3par_vol_name(volume['id']) + vlun = self.client.getVLUN(volume_name) + # VLUN Type of MATCHED_SET 4 requires the port to be provided + if self.VLUN_TYPE_MATCHED_SET == vlun['type']: + self.client.deleteVLUN(volume_name, vlun['lun'], hostname, + vlun['portPos']) + else: + self.client.deleteVLUN(volume_name, vlun['lun'], hostname) + + try: + self._delete_3par_host(hostname) + except hpexceptions.HTTPConflict as ex: + # host will only be removed after all vluns + # have been removed + if 'has exported VLUN' in ex.get_description(): + pass + else: + raise + + def _get_volume_type(self, type_id): + ctxt = context.get_admin_context() + return volume_types.get_volume_type(ctxt, type_id) + + def _get_key_value(self, hp3par_keys, key, default=None): + if hp3par_keys is not None and key in hp3par_keys: + return hp3par_keys[key] + else: + return default + + def _get_qos_value(self, qos, key, default=None): + if key in qos: + return qos[key] + else: + return default + + def _get_qos_by_volume_type(self, volume_type): + qos = {} + specs = volume_type.get('extra_specs') + for key, value in specs.iteritems(): + if 'qos:' in key: + fields = key.split(':') + key = fields[1] + if key in self.hp_qos_keys: + qos[key] = int(value) + return qos + + def _get_keys_by_volume_type(self, volume_type): + hp3par_keys = {} + specs = volume_type.get('extra_specs') + for key, value in specs.iteritems(): + if ':' in key: + fields = key.split(':') + key = fields[1] + if key in self.hp3par_valid_keys: + hp3par_keys[key] = value + return hp3par_keys + + def _set_qos_rule(self, qos, vvs_name): + max_io = self._get_qos_value(qos, 'maxIOPS') + max_bw = self._get_qos_value(qos, 'maxBWS') + cmd = ['setqos'] + if max_io is not None: + cmd.extend(['-io', '%s' % max_io]) + if max_bw is not None: + cmd.extend(['-bw', '%sM' % max_bw]) + cmd.append('vvset:' + vvs_name) + self._cli_run(cmd) + + def _add_volume_to_volume_set(self, volume, volume_name, + cpg, vvs_name, qos): + if vvs_name is not None: + # Admin has set a volume set name to add the volume to + out = self._cli_run(['createvvset', '-add', vvs_name, volume_name]) + if out and len(out) == 1: + if 'does not exist' in out[0]: + msg = _('VV Set %s does not exist.') % vvs_name + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + else: + vvs_name = self._get_3par_vvs_name(volume['id']) + domain = self.get_domain(cpg) + if domain is not None: + self._cli_run(['createvvset', '-domain', domain, vvs_name]) + else: + self._cli_run(['createvvset', vvs_name]) + self._set_qos_rule(qos, vvs_name) + self._cli_run(['createvvset', '-add', vvs_name, volume_name]) + + def _remove_volume_set(self, vvs_name): + # Must first clear the QoS rules before removing the volume set + self._cli_run(['setqos', '-clear', 'vvset:%s' % (vvs_name)]) + self._cli_run(['removevvset', '-f', vvs_name]) + + def _remove_volume_from_volume_set(self, volume_name, vvs_name): + self._cli_run(['removevvset', '-f', vvs_name, volume_name]) + + def get_cpg(self, volume, allowSnap=False): + volume_name = self._get_3par_vol_name(volume['id']) + vol = self.client.getVolume(volume_name) + if 'userCPG' in vol: + return vol['userCPG'] + elif allowSnap: + return vol['snapCPG'] + return None + + def _get_3par_vol_comment(self, volume_name): + vol = self.client.getVolume(volume_name) + if 'comment' in vol: + return vol['comment'] + return None + + def get_persona_type(self, volume, hp3par_keys=None): + default_persona = self.valid_persona_values[0] + type_id = volume.get('volume_type_id', None) + volume_type = None + if type_id is not None: + volume_type = self._get_volume_type(type_id) + if hp3par_keys is None: + hp3par_keys = self._get_keys_by_volume_type(volume_type) + persona_value = self._get_key_value(hp3par_keys, 'persona', + default_persona) + if persona_value not in self.valid_persona_values: + err = _("Must specify a valid persona %(valid)s, " + "value '%(persona)s' is invalid.") % \ + ({'valid': self.valid_persona_values, + 'persona': persona_value}) + LOG.error(err) + raise exception.InvalidInput(reason=err) + # persona is set by the id so remove the text and return the id + # i.e for persona '1 - Generic' returns 1 + persona_id = persona_value.split(' ') + return persona_id[0] + + def get_volume_settings_from_type(self, volume): + cpg = None + snap_cpg = None + volume_type = None + vvs_name = None + hp3par_keys = {} + qos = {} + type_id = volume.get('volume_type_id', None) + if type_id is not None: + volume_type = self._get_volume_type(type_id) + hp3par_keys = self._get_keys_by_volume_type(volume_type) + vvs_name = self._get_key_value(hp3par_keys, 'vvs') + if vvs_name is None: + qos = self._get_qos_by_volume_type(volume_type) + + cpg = self._get_key_value(hp3par_keys, 'cpg', + self.config.hp3par_cpg) + if cpg is not self.config.hp3par_cpg: + # The cpg was specified in a volume type extra spec so it + # needs to be validated that it's in the correct domain. + self.validate_cpg(cpg) + # Also, look to see if the snap_cpg was specified in volume + # type extra spec, if not use the extra spec cpg as the + # default. + snap_cpg = self._get_key_value(hp3par_keys, 'snap_cpg', cpg) + else: + # default snap_cpg to hp3par_cpg_snap if it's not specified + # in the volume type extra specs. + snap_cpg = self.config.hp3par_cpg_snap + # if it's still not set or empty then set it to the cpg + # specified in the cinder.conf file. + if not self.config.hp3par_cpg_snap: + snap_cpg = cpg + + # if provisioning is not set use thin + default_prov = self.valid_prov_values[0] + prov_value = self._get_key_value(hp3par_keys, 'provisioning', + default_prov) + # check for valid provisioning type + if prov_value not in self.valid_prov_values: + err = _("Must specify a valid provisioning type %(valid)s, " + "value '%(prov)s' is invalid.") % \ + ({'valid': self.valid_prov_values, + 'prov': prov_value}) + LOG.error(err) + raise exception.InvalidInput(reason=err) + + tpvv = True + if prov_value == "full": + tpvv = False + + # check for valid persona even if we don't use it until + # attach time, this will give the end user notice that the + # persona type is invalid at volume creation time + self.get_persona_type(volume, hp3par_keys) + + return {'cpg': cpg, 'snap_cpg': snap_cpg, + 'vvs_name': vvs_name, 'qos': qos, + 'tpvv': tpvv, 'volume_type': volume_type} + + def create_volume(self, volume): + LOG.debug("CREATE VOLUME (%s : %s %s)" % + (volume['display_name'], volume['name'], + self._get_3par_vol_name(volume['id']))) + try: + comments = {'volume_id': volume['id'], + 'name': volume['name'], + 'type': 'OpenStack'} + + name = volume.get('display_name', None) + if name: + comments['display_name'] = name + + # get the options supported by volume types + type_info = self.get_volume_settings_from_type(volume) + volume_type = type_info['volume_type'] + vvs_name = type_info['vvs_name'] + qos = type_info['qos'] + cpg = type_info['cpg'] + snap_cpg = type_info['snap_cpg'] + tpvv = type_info['tpvv'] + + type_id = volume.get('volume_type_id', None) + if type_id is not None: + comments['volume_type_name'] = volume_type.get('name') + comments['volume_type_id'] = type_id + if vvs_name is not None: + comments['vvs'] = vvs_name + else: + comments['qos'] = qos + + extras = {'comment': json.dumps(comments), + 'snapCPG': snap_cpg, + 'tpvv': tpvv} + + capacity = self._capacity_from_size(volume['size']) + volume_name = self._get_3par_vol_name(volume['id']) + self.client.createVolume(volume_name, cpg, capacity, extras) + if qos or vvs_name is not None: + try: + self._add_volume_to_volume_set(volume, volume_name, + cpg, vvs_name, qos) + except exception.InvalidInput as ex: + # Delete the volume if unable to add it to the volume set + self.client.deleteVolume(volume_name) + LOG.error(str(ex)) + raise exception.CinderException(str(ex)) + except hpexceptions.HTTPConflict: + msg = _("Volume (%s) already exists on array") % volume_name + LOG.error(msg) + raise exception.Duplicate(msg) + except hpexceptions.HTTPBadRequest as ex: + LOG.error(str(ex)) + raise exception.Invalid(ex.get_description()) + except exception.InvalidInput as ex: + LOG.error(str(ex)) + raise ex + except exception.CinderException as ex: + LOG.error(str(ex)) + raise ex + except Exception as ex: + LOG.error(str(ex)) + raise exception.CinderException(ex.get_description()) + + def _copy_volume(self, src_name, dest_name, cpg=None, snap_cpg=None, + tpvv=True): + # Virtual volume sets are not supported with the -online option + cmd = ['createvvcopy', '-p', src_name, '-online'] + if snap_cpg: + cmd.extend(['-snp_cpg', snap_cpg]) + if tpvv: + cmd.append('-tpvv') + if cpg: + cmd.append(cpg) + cmd.append(dest_name) + LOG.debug('Creating clone of a volume with %s' % cmd) + self._cli_run(cmd) + + def get_next_word(self, s, search_string): + """Return the next word. + + Search 's' for 'search_string', if found return the word preceding + 'search_string' from 's'. + """ + word = re.search(search_string.strip(' ') + ' ([^ ]*)', s) + return word.groups()[0].strip(' ') + + def _get_3par_vol_comment_value(self, vol_comment, key): + comment_dict = dict(ast.literal_eval(vol_comment)) + if key in comment_dict: + return comment_dict[key] + return None + + def create_cloned_volume(self, volume, src_vref): + try: + orig_name = self._get_3par_vol_name(volume['source_volid']) + vol_name = self._get_3par_vol_name(volume['id']) + + type_info = self.get_volume_settings_from_type(volume) + + # make the 3PAR copy the contents. + # can't delete the original until the copy is done. + self._copy_volume(orig_name, vol_name, cpg=type_info['cpg'], + snap_cpg=type_info['snap_cpg'], + tpvv=type_info['tpvv']) + return None + except hpexceptions.HTTPForbidden: + raise exception.NotAuthorized() + except hpexceptions.HTTPNotFound: + raise exception.NotFound() + except Exception as ex: + LOG.error(str(ex)) + raise exception.CinderException(ex) + + def _get_vvset_from_3par(self, volume_name): + """Get Virtual Volume Set from 3PAR. + + The only way to do this currently is to try and delete the volume + to get the error message. + + NOTE(walter-boring): don't call this unless you know the volume is + already in a vvset! + """ + cmd = ['removevv', '-f', volume_name] + LOG.debug("Issuing remove command to find vvset name %s" % cmd) + out = self._cli_run(cmd) + vvset_name = None + if out and len(out) > 1: + if out[1].startswith("Attempt to delete "): + words = out[1].split(" ") + vvset_name = words[len(words) - 1] + + return vvset_name + + def delete_volume(self, volume): + try: + volume_name = self._get_3par_vol_name(volume['id']) + # Try and delete the volume, it might fail here because + # the volume is part of a volume set which will have the + # volume set name in the error. + try: + self.client.deleteVolume(volume_name) + except hpexceptions.HTTPConflict as ex: + if ex.get_code() == 34: + # This is a special case which means the + # volume is part of a volume set. + vvset_name = self._get_vvset_from_3par(volume_name) + LOG.debug("Returned vvset_name = %s" % vvset_name) + if vvset_name is not None and \ + vvset_name.startswith('vvs-'): + # We have a single volume per volume set, so + # remove the volume set. + self._remove_volume_set( + self._get_3par_vvs_name(volume['id'])) + elif vvset_name is not None: + # We have a pre-defined volume set just remove the + # volume and leave the volume set. + self._remove_volume_from_volume_set(volume_name, + vvset_name) + self.client.deleteVolume(volume_name) + else: + LOG.error(str(ex)) + raise ex + + except hpexceptions.HTTPNotFound as ex: + # We'll let this act as if it worked + # it helps clean up the cinder entries. + LOG.error(str(ex)) + except hpexceptions.HTTPForbidden as ex: + LOG.error(str(ex)) + raise exception.NotAuthorized(ex.get_description()) + except Exception as ex: + LOG.error(str(ex)) + raise exception.CinderException(ex) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot. + + TODO: support using the size from the user. + """ + LOG.debug("Create Volume from Snapshot\n%s\n%s" % + (pprint.pformat(volume['display_name']), + pprint.pformat(snapshot['display_name']))) + + if snapshot['volume_size'] != volume['size']: + err = "You cannot change size of the volume. It must " + "be the same as the snapshot." + LOG.error(err) + raise exception.InvalidInput(reason=err) + + try: + snap_name = self._get_3par_snap_name(snapshot['id']) + volume_name = self._get_3par_vol_name(volume['id']) + + extra = {'volume_id': volume['id'], + 'snapshot_id': snapshot['id']} + + volume_type = None + type_id = volume.get('volume_type_id', None) + vvs_name = None + qos = {} + hp3par_keys = {} + if type_id is not None: + volume_type = self._get_volume_type(type_id) + hp3par_keys = self._get_keys_by_volume_type(volume_type) + vvs_name = self._get_key_value(hp3par_keys, 'vvs') + if vvs_name is None: + qos = self._get_qos_by_volume_type(volume_type) + + name = volume.get('display_name', None) + if name: + extra['display_name'] = name + + description = volume.get('display_description', None) + if description: + extra['description'] = description + + optional = {'comment': json.dumps(extra), + 'readOnly': False} + + self.client.createSnapshot(volume_name, snap_name, optional) + if qos or vvs_name is not None: + cpg = self._get_key_value(hp3par_keys, 'cpg', + self.config.hp3par_cpg) + try: + self._add_volume_to_volume_set(volume, volume_name, + cpg, vvs_name, qos) + except Exception as ex: + # Delete the volume if unable to add it to the volume set + self.client.deleteVolume(volume_name) + LOG.error(str(ex)) + raise exception.CinderException(ex.get_description()) + except hpexceptions.HTTPForbidden as ex: + LOG.error(str(ex)) + raise exception.NotAuthorized() + except hpexceptions.HTTPNotFound as ex: + LOG.error(str(ex)) + raise exception.NotFound() + except Exception as ex: + LOG.error(str(ex)) + raise exception.CinderException(ex.get_description()) + + def create_snapshot(self, snapshot): + LOG.debug("Create Snapshot\n%s" % pprint.pformat(snapshot)) + + try: + snap_name = self._get_3par_snap_name(snapshot['id']) + vol_name = self._get_3par_vol_name(snapshot['volume_id']) + + extra = {'volume_name': snapshot['volume_name']} + vol_id = snapshot.get('volume_id', None) + if vol_id: + extra['volume_id'] = vol_id + + try: + extra['display_name'] = snapshot['display_name'] + except AttributeError: + pass + + try: + extra['description'] = snapshot['display_description'] + except AttributeError: + pass + + optional = {'comment': json.dumps(extra), + 'readOnly': True} + if self.config.hp3par_snapshot_expiration: + optional['expirationHours'] = ( + self.config.hp3par_snapshot_expiration) + + if self.config.hp3par_snapshot_retention: + optional['retentionHours'] = ( + self.config.hp3par_snapshot_retention) + + self.client.createSnapshot(snap_name, vol_name, optional) + except hpexceptions.HTTPForbidden as ex: + LOG.error(str(ex)) + raise exception.NotAuthorized() + except hpexceptions.HTTPNotFound as ex: + LOG.error(str(ex)) + raise exception.NotFound() + + def update_volume_key_value_pair(self, volume, key, value): + """Updates key,value pair as metadata onto virtual volume. + + If key already exists, the value will be replaced. + """ + LOG.debug("VOLUME (%s : %s %s) Updating KEY-VALUE pair: (%s : %s)" % + (volume['display_name'], + volume['name'], + self._get_3par_vol_name(volume['id']), + str(key), + str(value))) + try: + volume_name = self._get_3par_vol_name(volume['id']) + if value is None: + value = '' + cmd = ['setvv', '-setkv', key + '=' + value, volume_name] + self._cli_run(cmd) + except Exception as ex: + msg = _('Failure in update_volume_key_value_pair:%s') % str(ex) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def clear_volume_key_value_pair(self, volume, key): + """Clears key,value pairs metadata from virtual volume.""" + + LOG.debug("VOLUME (%s : %s %s) Clearing Key : %s)" % + (volume['display_name'], volume['name'], + self._get_3par_vol_name(volume['id']), str(key))) + try: + volume_name = self._get_3par_vol_name(volume['id']) + cmd = ['setvv', '-clrkey', key, volume_name] + self._cli_run(cmd) + except Exception as ex: + msg = _('Failure in clear_volume_key_value_pair:%s') % str(ex) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def attach_volume(self, volume, instance_uuid): + LOG.debug("Attach Volume\n%s" % pprint.pformat(volume)) + try: + self.update_volume_key_value_pair(volume, + 'HPQ-CS-instance_uuid', + instance_uuid) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_("Error attaching volume %s") % volume) + + def detach_volume(self, volume): + LOG.debug("Detach Volume\n%s" % pprint.pformat(volume)) + try: + self.clear_volume_key_value_pair(volume, 'HPQ-CS-instance_uuid') + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_("Error detaching volume %s") % volume) + + def delete_snapshot(self, snapshot): + LOG.debug("Delete Snapshot id %s %s" % (snapshot['id'], + pprint.pformat(snapshot))) + + try: + snap_name = self._get_3par_snap_name(snapshot['id']) + self.client.deleteVolume(snap_name) + except hpexceptions.HTTPForbidden as ex: + LOG.error(str(ex)) + raise exception.NotAuthorized() + except hpexceptions.HTTPNotFound as ex: + LOG.error(str(ex)) + raise exception.NotFound() + except hpexceptions.HTTPConflict as ex: + LOG.error(str(ex)) + raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) + + def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns): + if wwns is not None and not isinstance(wwns, list): + wwns = [wwns] + if iqns is not None and not isinstance(iqns, list): + iqns = [iqns] + + out = self.client.getHosts() + hosts = out['members'] + for host in hosts: + if 'iSCSIPaths' in host and iqns is not None: + iscsi_paths = host['iSCSIPaths'] + for iscsi in iscsi_paths: + for iqn in iqns: + if iqn == iscsi['name']: + return host['name'] + + if 'FCPaths' in host and wwns is not None: + fc_paths = host['FCPaths'] + for fc in fc_paths: + for wwn in wwns: + if wwn == fc['wwn']: + return host['name'] + + def terminate_connection(self, volume, hostname, wwn=None, iqn=None): + """Driver entry point to unattach a volume from an instance.""" + try: + # does 3par know this host by a different name? + if hostname in self.hosts_naming_dict: + hostname = self.hosts_naming_dict.get(hostname) + self.delete_vlun(volume, hostname) + return + except hpexceptions.HTTPNotFound as e: + if 'host does not exist' in e.get_description(): + # use the wwn to see if we can find the hostname + hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn) + # no 3par host, re-throw + if (hostname is None): + LOG.error(str(e)) + raise + else: + # not a 'host does not exist' HTTPNotFound exception, re-throw + LOG.error(str(e)) + raise + + # try again with name retrieved from 3par + self.delete_vlun(volume, hostname) + + def parse_create_host_error(self, hostname, out): + search_str = "already used by host " + if search_str in out[1]: + # host exists, return name used by 3par + hostname_3par = self.get_next_word(out[1], search_str) + self.hosts_naming_dict[hostname] = hostname_3par + return hostname_3par + + def build_nsp(self, portPos): + return '%s:%s:%s' % (portPos['node'], + portPos['slot'], + portPos['cardPort']) + + def build_portPos(self, nsp): + split = nsp.split(":") + portPos = {} + portPos['node'] = int(split[0]) + portPos['slot'] = int(split[1]) + portPos['cardPort'] = int(split[2]) + return portPos diff --git a/cinder/volume/drivers/san/hp/hp_3par_fc.py b/cinder/volume/drivers/san/hp/hp_3par_fc.py new file mode 100644 index 0000000000..683f75d5e4 --- /dev/null +++ b/cinder/volume/drivers/san/hp/hp_3par_fc.py @@ -0,0 +1,327 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +""" +Volume driver for HP 3PAR Storage array. +This driver requires 3.1.2 MU2 firmware on the 3PAR array, using +the 2.x version of the hp3parclient. + +You will need to install the python hp3parclient. +sudo pip install --upgrade "hp3parclient>=2.0" + +Set the following in the cinder.conf file to enable the +3PAR Fibre Channel Driver along with the required flags: + +volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver +""" + +from hp3parclient import exceptions as hpexceptions + +from cinder.openstack.common import log as logging +from cinder import utils +import cinder.volume.driver +from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon +from cinder.volume.drivers.san import san + +LOG = logging.getLogger(__name__) + + +class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver): + """OpenStack Fibre Channel driver to enable 3PAR storage array. + + Version history: + 1.0 - Initial driver + 1.1 - QoS, extend volume, multiple iscsi ports, remove domain, + session changes, faster clone, requires 3.1.2 MU2 firmware, + copy volume <--> Image. + 1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored + the drivers to use the new APIs. + 1.2.1 - Synchronized extend_volume method. + 1.2.2 - Added try/finally around client login/logout. + 1.2.3 - Added ability to add WWNs to host. + 1.2.4 - Added metadata during attach/detach bug #1258033. + + """ + + VERSION = "1.2.4" + + def __init__(self, *args, **kwargs): + super(HP3PARFCDriver, self).__init__(*args, **kwargs) + self.common = None + self.configuration.append_config_values(hpcommon.hp3par_opts) + self.configuration.append_config_values(san.san_opts) + + def _init_common(self): + return hpcommon.HP3PARCommon(self.configuration) + + def _check_flags(self): + """Sanity check to ensure we have required options set.""" + required_flags = ['hp3par_api_url', 'hp3par_username', + 'hp3par_password', + 'san_ip', 'san_login', 'san_password'] + self.common.check_flags(self.configuration, required_flags) + + @utils.synchronized('3par', external=True) + def get_volume_stats(self, refresh): + self.common.client_login() + try: + stats = self.common.get_volume_stats(refresh) + stats['storage_protocol'] = 'FC' + stats['driver_version'] = self.VERSION + backend_name = self.configuration.safe_get('volume_backend_name') + stats['volume_backend_name'] = (backend_name or + self.__class__.__name__) + return stats + finally: + self.common.client_logout() + + def do_setup(self, context): + self.common = self._init_common() + self._check_flags() + self.common.do_setup(context) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_flags() + + @utils.synchronized('3par', external=True) + def create_volume(self, volume): + self.common.client_login() + try: + metadata = self.common.create_volume(volume) + return {'metadata': metadata} + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def create_cloned_volume(self, volume, src_vref): + self.common.client_login() + try: + new_vol = self.common.create_cloned_volume(volume, src_vref) + return {'metadata': new_vol} + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def delete_volume(self, volume): + self.common.client_login() + try: + self.common.delete_volume(volume) + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot. + + TODO: support using the size from the user. + """ + self.common.client_login() + try: + metadata = self.common.create_volume_from_snapshot(volume, + snapshot) + return {'metadata': metadata} + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def create_snapshot(self, snapshot): + self.common.client_login() + try: + self.common.create_snapshot(snapshot) + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def delete_snapshot(self, snapshot): + self.common.client_login() + try: + self.common.delete_snapshot(snapshot) + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def initialize_connection(self, volume, connector): + """Assigns the volume to a server. + + Assign any created volume to a compute node/host so that it can be + used from that host. + + The driver returns a driver_volume_type of 'fibre_channel'. + The target_wwn can be a single entry or a list of wwns that + correspond to the list of remote wwn(s) that will export the volume. + Example return values: + + { + 'driver_volume_type': 'fibre_channel' + 'data': { + 'target_discovered': True, + 'target_lun': 1, + 'target_wwn': '1234567890123', + } + } + + or + + { + 'driver_volume_type': 'fibre_channel' + 'data': { + 'target_discovered': True, + 'target_lun': 1, + 'target_wwn': ['1234567890123', '0987654321321'], + } + } + + + Steps to export a volume on 3PAR + * Create a host on the 3par with the target wwn + * Create a VLUN for that HOST with the volume we want to export. + + """ + self.common.client_login() + try: + # we have to make sure we have a host + host = self._create_host(volume, connector) + + # now that we have a host, create the VLUN + vlun = self.common.create_vlun(volume, host) + + fc_ports = self.common.get_active_fc_target_ports() + wwns = [] + + for port in fc_ports: + wwns.append(port['portWWN']) + + info = {'driver_volume_type': 'fibre_channel', + 'data': {'target_lun': vlun['lun'], + 'target_discovered': True, + 'target_wwn': wwns}} + return info + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def terminate_connection(self, volume, connector, **kwargs): + """Driver entry point to unattach a volume from an instance.""" + self.common.client_login() + try: + hostname = self.common._safe_hostname(connector['host']) + self.common.terminate_connection(volume, hostname, + wwn=connector['wwpns']) + finally: + self.common.client_logout() + + def _create_3par_fibrechan_host(self, hostname, wwns, domain, persona_id): + """Create a 3PAR host. + + Create a 3PAR host, if there is already a host on the 3par using + the same wwn but with a different hostname, return the hostname + used by 3PAR. + """ + if domain is not None: + command = ['createhost', '-persona', persona_id, '-domain', domain, + hostname] + else: + command = ['createhost', '-persona', persona_id, hostname] + for wwn in wwns: + command.append(wwn) + + out = self.common._cli_run(command) + if out and len(out) > 1: + return self.common.parse_create_host_error(hostname, out) + + return hostname + + def _modify_3par_fibrechan_host(self, hostname, wwn): + mod_request = {'pathOperation': self.common.client.HOST_EDIT_ADD, + 'FCWWNs': wwn} + + self.common.client.modifyHost(hostname, mod_request) + + def _create_host(self, volume, connector): + """Creates or modifies existing 3PAR host.""" + host = None + hostname = self.common._safe_hostname(connector['host']) + cpg = self.common.get_cpg(volume, allowSnap=True) + domain = self.common.get_domain(cpg) + try: + host = self.common._get_3par_host(hostname) + except hpexceptions.HTTPNotFound as ex: + # get persona from the volume type extra specs + persona_id = self.common.get_persona_type(volume) + # host doesn't exist, we have to create it + hostname = self._create_3par_fibrechan_host(hostname, + connector['wwpns'], + domain, + persona_id) + host = self.common._get_3par_host(hostname) + + return self._add_new_wwn_to_host(host, connector['wwpns']) + + def _add_new_wwn_to_host(self, host, wwns): + """Add wwns to a host if one or more don't exist. + + Identify if argument wwns contains any world wide names + not configured in the 3PAR host path. If any are found, + add them to the 3PAR host. + """ + # get the currently configured wwns + # from the host's FC paths + host_wwns = [] + if 'FCPaths' in host: + for path in host['FCPaths']: + wwn = path.get('wwn', None) + if wwn is not None: + host_wwns.append(wwn.lower()) + + # lower case all wwns in the compare list + compare_wwns = [x.lower() for x in wwns] + + # calculate wwns in compare list, but not in host_wwns list + new_wwns = list(set(compare_wwns).difference(host_wwns)) + + # if any wwns found that were not in host list, + # add them to the host + if (len(new_wwns) > 0): + self._modify_3par_fibrechan_host(host['name'], new_wwns) + host = self.common._get_3par_host(host['name']) + return host + + @utils.synchronized('3par', external=True) + def create_export(self, context, volume): + pass + + @utils.synchronized('3par', external=True) + def ensure_export(self, context, volume): + pass + + @utils.synchronized('3par', external=True) + def remove_export(self, context, volume): + pass + + @utils.synchronized('3par', external=True) + def extend_volume(self, volume, new_size): + self.common.extend_volume(volume, new_size) + + @utils.synchronized('3par', external=True) + def attach_volume(self, context, volume, instance_uuid, host_name, + mountpoint): + self.common.attach_volume(volume, instance_uuid) + + @utils.synchronized('3par', external=True) + def detach_volume(self, context, volume): + self.common.detach_volume(volume) diff --git a/cinder/volume/drivers/san/hp/hp_3par_iscsi.py b/cinder/volume/drivers/san/hp/hp_3par_iscsi.py new file mode 100644 index 0000000000..b78b6601f2 --- /dev/null +++ b/cinder/volume/drivers/san/hp/hp_3par_iscsi.py @@ -0,0 +1,437 @@ +# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +""" +Volume driver for HP 3PAR Storage array. +This driver requires 3.1.2 MU3 firmware on the 3PAR array, using +the 2.x version of the hp3parclient. + +You will need to install the python hp3parclient. +sudo pip install --upgrade "hp3parclient>=2.0" + +Set the following in the cinder.conf file to enable the +3PAR iSCSI Driver along with the required flags: + +volume_driver=cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver +""" + +import sys + +from hp3parclient import exceptions as hpexceptions + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import utils +import cinder.volume.driver +from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon +from cinder.volume.drivers.san import san + +LOG = logging.getLogger(__name__) +DEFAULT_ISCSI_PORT = 3260 + + +class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver): + """OpenStack iSCSI driver to enable 3PAR storage array. + + Version history: + 1.0 - Initial driver + 1.1 - QoS, extend volume, multiple iscsi ports, remove domain, + session changes, faster clone, requires 3.1.2 MU2 firmware. + 1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored + the drivers to use the new APIs. + 1.2.1 - Synchronized extend_volume method. + 1.2.2 - Added try/finally around client login/logout. + 1.2.3 - log exceptions before raising + 1.2.4 - Fixed iSCSI active path bug #1224594 + 1.2.5 - Added metadata during attach/detach bug #1258033 + 1.2.6 - Use least-used iscsi n:s:p for iscsi volume attach bug #1269515 + This update now requires 3.1.2 MU3 firmware + + """ + + VERSION = "1.2.6" + + def __init__(self, *args, **kwargs): + super(HP3PARISCSIDriver, self).__init__(*args, **kwargs) + self.common = None + self.configuration.append_config_values(hpcommon.hp3par_opts) + self.configuration.append_config_values(san.san_opts) + + def _init_common(self): + return hpcommon.HP3PARCommon(self.configuration) + + def _check_flags(self): + """Sanity check to ensure we have required options set.""" + required_flags = ['hp3par_api_url', 'hp3par_username', + 'hp3par_password', 'san_ip', 'san_login', + 'san_password'] + self.common.check_flags(self.configuration, required_flags) + + @utils.synchronized('3par', external=True) + def get_volume_stats(self, refresh): + self.common.client_login() + try: + stats = self.common.get_volume_stats(refresh) + stats['storage_protocol'] = 'iSCSI' + stats['driver_version'] = self.VERSION + backend_name = self.configuration.safe_get('volume_backend_name') + stats['volume_backend_name'] = (backend_name or + self.__class__.__name__) + return stats + finally: + self.common.client_logout() + + def do_setup(self, context): + self.common = self._init_common() + self._check_flags() + self.common.do_setup(context) + + self.common.client_login() + try: + self.initialize_iscsi_ports() + finally: + self.common.client_logout() + + def initialize_iscsi_ports(self): + # map iscsi_ip-> ip_port + # -> iqn + # -> nsp + self.iscsi_ips = {} + temp_iscsi_ip = {} + + # use the 3PAR ip_addr list for iSCSI configuration + if len(self.configuration.hp3par_iscsi_ips) > 0: + # add port values to ip_addr, if necessary + for ip_addr in self.configuration.hp3par_iscsi_ips: + ip = ip_addr.split(':') + if len(ip) == 1: + temp_iscsi_ip[ip_addr] = {'ip_port': DEFAULT_ISCSI_PORT} + elif len(ip) == 2: + temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]} + else: + msg = _("Invalid IP address format '%s'") % ip_addr + LOG.warn(msg) + + # add the single value iscsi_ip_address option to the IP dictionary. + # This way we can see if it's a valid iSCSI IP. If it's not valid, + # we won't use it and won't bother to report it, see below + if (self.configuration.iscsi_ip_address not in temp_iscsi_ip): + ip = self.configuration.iscsi_ip_address + ip_port = self.configuration.iscsi_port + temp_iscsi_ip[ip] = {'ip_port': ip_port} + + # get all the valid iSCSI ports from 3PAR + # when found, add the valid iSCSI ip, ip port, iqn and nsp + # to the iSCSI IP dictionary + iscsi_ports = self.common.get_active_iscsi_target_ports() + + for port in iscsi_ports: + ip = port['IPAddr'] + if ip in temp_iscsi_ip: + ip_port = temp_iscsi_ip[ip]['ip_port'] + self.iscsi_ips[ip] = {'ip_port': ip_port, + 'nsp': port['nsp'], + 'iqn': port['iSCSIName'] + } + del temp_iscsi_ip[ip] + + # if the single value iscsi_ip_address option is still in the + # temp dictionary it's because it defaults to $my_ip which doesn't + # make sense in this context. So, if present, remove it and move on. + if (self.configuration.iscsi_ip_address in temp_iscsi_ip): + del temp_iscsi_ip[self.configuration.iscsi_ip_address] + + # lets see if there are invalid iSCSI IPs left in the temp dict + if len(temp_iscsi_ip) > 0: + msg = (_("Found invalid iSCSI IP address(s) in configuration " + "option(s) hp3par_iscsi_ips or iscsi_ip_address '%s.'") % + (", ".join(temp_iscsi_ip))) + LOG.warn(msg) + + if not len(self.iscsi_ips) > 0: + msg = _('At least one valid iSCSI IP address must be set.') + LOG.error(msg) + raise exception.InvalidInput(reason=(msg)) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_flags() + + @utils.synchronized('3par', external=True) + def create_volume(self, volume): + self.common.client_login() + try: + metadata = self.common.create_volume(volume) + return {'metadata': metadata} + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def create_cloned_volume(self, volume, src_vref): + """Clone an existing volume.""" + self.common.client_login() + try: + new_vol = self.common.create_cloned_volume(volume, src_vref) + return {'metadata': new_vol} + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def delete_volume(self, volume): + self.common.client_login() + try: + self.common.delete_volume(volume) + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot. + + TODO: support using the size from the user. + """ + self.common.client_login() + try: + metadata = self.common.create_volume_from_snapshot(volume, + snapshot) + return {'metadata': metadata} + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def create_snapshot(self, snapshot): + self.common.client_login() + try: + self.common.create_snapshot(snapshot) + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def delete_snapshot(self, snapshot): + self.common.client_login() + try: + self.common.delete_snapshot(snapshot) + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def initialize_connection(self, volume, connector): + """Assigns the volume to a server. + + Assign any created volume to a compute node/host so that it can be + used from that host. + + This driver returns a driver_volume_type of 'iscsi'. + The format of the driver data is defined in _get_iscsi_properties. + Example return value: + + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_protal': '127.0.0.1:3260', + 'volume_id': 1, + } + } + + Steps to export a volume on 3PAR + * Get the 3PAR iSCSI iqn + * Create a host on the 3par + * create vlun on the 3par + """ + self.common.client_login() + try: + + # we have to make sure we have a host + host = self._create_host(volume, connector) + least_used_nsp = self._get_least_used_nsp_for_host(host['name']) + + # now that we have a host, create the VLUN + vlun = self.common.create_vlun(volume, host, least_used_nsp) + + if least_used_nsp is None: + msg = _("Least busy iSCSI port not found, " + "using first iSCSI port in list.") + LOG.warn(msg) + iscsi_ip = self.iscsi_ips.keys()[0] + else: + iscsi_ip = self._get_ip_using_nsp(least_used_nsp) + + iscsi_ip_port = self.iscsi_ips[iscsi_ip]['ip_port'] + iscsi_target_iqn = self.iscsi_ips[iscsi_ip]['iqn'] + info = {'driver_volume_type': 'iscsi', + 'data': {'target_portal': "%s:%s" % + (iscsi_ip, iscsi_ip_port), + 'target_iqn': iscsi_target_iqn, + 'target_lun': vlun['lun'], + 'target_discovered': True + } + } + return info + finally: + self.common.client_logout() + + @utils.synchronized('3par', external=True) + def terminate_connection(self, volume, connector, **kwargs): + """Driver entry point to unattach a volume from an instance.""" + self.common.client_login() + try: + hostname = self.common._safe_hostname(connector['host']) + self.common.terminate_connection(volume, hostname, + iqn=connector['initiator']) + finally: + self.common.client_logout() + + def _create_3par_iscsi_host(self, hostname, iscsi_iqn, domain, persona_id): + """Create a 3PAR host. + + Create a 3PAR host, if there is already a host on the 3par using + the same iqn but with a different hostname, return the hostname + used by 3PAR. + """ + if domain is not None: + cmd = ['createhost', '-iscsi', '-persona', persona_id, '-domain', + domain, hostname, iscsi_iqn] + else: + cmd = ['createhost', '-iscsi', '-persona', persona_id, hostname, + iscsi_iqn] + out = self.common._cli_run(cmd) + if out and len(out) > 1: + return self.common.parse_create_host_error(hostname, out) + return hostname + + def _modify_3par_iscsi_host(self, hostname, iscsi_iqn): + mod_request = {'pathOperation': self.common.client.HOST_EDIT_ADD, + 'iSCSINames': [iscsi_iqn]} + + self.common.client.modifyHost(hostname, mod_request) + + def _create_host(self, volume, connector): + """Creates or modifies existing 3PAR host.""" + # make sure we don't have the host already + host = None + hostname = self.common._safe_hostname(connector['host']) + cpg = self.common.get_cpg(volume, allowSnap=True) + domain = self.common.get_domain(cpg) + try: + host = self.common._get_3par_host(hostname) + if 'iSCSIPaths' not in host or len(host['iSCSIPaths']) < 1: + self._modify_3par_iscsi_host(hostname, connector['initiator']) + host = self.common._get_3par_host(hostname) + except hpexceptions.HTTPNotFound: + # get persona from the volume type extra specs + persona_id = self.common.get_persona_type(volume) + # host doesn't exist, we have to create it + hostname = self._create_3par_iscsi_host(hostname, + connector['initiator'], + domain, + persona_id) + host = self.common._get_3par_host(hostname) + + return host + + @utils.synchronized('3par', external=True) + def create_export(self, context, volume): + pass + + @utils.synchronized('3par', external=True) + def ensure_export(self, context, volume): + pass + + @utils.synchronized('3par', external=True) + def remove_export(self, context, volume): + pass + + def _get_least_used_nsp_for_host(self, hostname): + """Get the least used NSP for the current host. + + Steps to determine which NSP to use. + * If only one iSCSI NSP, return it + * If there is already an active vlun to this host, return its NSP + * Return NSP with fewest active vluns + """ + + iscsi_nsps = self._get_iscsi_nsps() + # If there's only one path, use it + if len(iscsi_nsps) == 1: + return iscsi_nsps[0] + + # Try to reuse an existing iscsi path to the host + vluns = self.common.client.getVLUNs() + for vlun in vluns['members']: + if vlun['active']: + if vlun['hostname'] == hostname: + temp_nsp = self.common.build_nsp(vlun['portPos']) + if temp_nsp in iscsi_nsps: + # this host already has an iscsi path, so use it + return temp_nsp + + # Calculate the least used iscsi nsp + least_used_nsp = self._get_least_used_nsp(vluns['members'], + self._get_iscsi_nsps()) + return least_used_nsp + + def _get_iscsi_nsps(self): + """Return the list of candidate nsps.""" + nsps = [] + for value in self.iscsi_ips.values(): + nsps.append(value['nsp']) + return nsps + + def _get_ip_using_nsp(self, nsp): + """Return IP associated with given nsp.""" + for (key, value) in self.iscsi_ips.items(): + if value['nsp'] == nsp: + return key + + def _get_least_used_nsp(self, vluns, nspss): + """"Return the nsp that has the fewest active vluns.""" + # return only the nsp (node:server:port) + # count the number of nsps + nsp_counts = {} + for nsp in nspss: + # initialize counts to zero + nsp_counts[nsp] = 0 + + current_least_used_nsp = None + + for vlun in vluns: + if vlun['active']: + nsp = self.common.build_nsp(vlun['portPos']) + if nsp in nsp_counts: + nsp_counts[nsp] = nsp_counts[nsp] + 1 + + # identify key (nsp) of least used nsp + current_smallest_count = sys.maxint + for (nsp, count) in nsp_counts.iteritems(): + if count < current_smallest_count: + current_least_used_nsp = nsp + current_smallest_count = count + + return current_least_used_nsp + + @utils.synchronized('3par', external=True) + def extend_volume(self, volume, new_size): + self.common.extend_volume(volume, new_size) + + @utils.synchronized('3par', external=True) + def attach_volume(self, context, volume, instance_uuid, host_name, + mountpoint): + self.common.attach_volume(volume, instance_uuid) + + @utils.synchronized('3par', external=True) + def detach_volume(self, context, volume): + self.common.detach_volume(volume) diff --git a/cinder/volume/drivers/san/hp_lefthand.py b/cinder/volume/drivers/san/hp_lefthand.py new file mode 100644 index 0000000000..8a96d25d48 --- /dev/null +++ b/cinder/volume/drivers/san/hp_lefthand.py @@ -0,0 +1,421 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +HP Lefthand SAN ISCSI Driver. + +The driver communicates to the backend aka Cliq via SSH to perform all the +operations on the SAN. +""" +from lxml import etree + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder.volume.drivers.san.san import SanISCSIDriver + + +LOG = logging.getLogger(__name__) + + +class HpSanISCSIDriver(SanISCSIDriver): + """Executes commands relating to HP/Lefthand SAN ISCSI volumes. + + We use the CLIQ interface, over SSH. + + Rough overview of CLIQ commands used: + + :createVolume: (creates the volume) + + :deleteVolume: (deletes the volume) + + :modifyVolume: (extends the volume) + + :createSnapshot: (creates the snapshot) + + :deleteSnapshot: (deletes the snapshot) + + :cloneSnapshot: (creates the volume from a snapshot) + + :getVolumeInfo: (to discover the IQN etc) + + :getSnapshotInfo: (to discover the IQN etc) + + :getClusterInfo: (to discover the iSCSI target IP address) + + :assignVolumeChap: (exports it with CHAP security) + + The 'trick' here is that the HP SAN enforces security by default, so + normally a volume mount would need both to configure the SAN in the volume + layer and do the mount on the compute layer. Multi-layer operations are + not catered for at the moment in the cinder architecture, so instead we + share the volume using CHAP at volume creation time. Then the mount need + only use those CHAP credentials, so can take place exclusively in the + compute layer. + + Version history: + 1.0.0 - Initial driver + 1.1.0 - Added create/delete snapshot, extend volume, create volume + from snapshot support. + """ + + VERSION = "1.1.0" + + device_stats = {} + + def __init__(self, *args, **kwargs): + super(HpSanISCSIDriver, self).__init__(*args, **kwargs) + self.cluster_vip = None + + def _cliq_run(self, verb, cliq_args, check_exit_code=True): + """Runs a CLIQ command over SSH, without doing any result parsing.""" + cmd_list = [verb] + for k, v in cliq_args.items(): + cmd_list.append("%s=%s" % (k, v)) + + return self._run_ssh(cmd_list, check_exit_code) + + def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True): + """Runs a CLIQ command over SSH, parsing and checking the output.""" + cliq_args['output'] = 'XML' + (out, _err) = self._cliq_run(verb, cliq_args, check_cliq_result) + + LOG.debug(_("CLIQ command returned %s"), out) + + result_xml = etree.fromstring(out) + if check_cliq_result: + response_node = result_xml.find("response") + if response_node is None: + msg = (_("Malformed response to CLIQ command " + "%(verb)s %(cliq_args)s. Result=%(out)s") % + {'verb': verb, 'cliq_args': cliq_args, 'out': out}) + raise exception.VolumeBackendAPIException(data=msg) + + result_code = response_node.attrib.get("result") + + if result_code != "0": + msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. " + " Result=%(out)s") % + {'verb': verb, 'cliq_args': cliq_args, 'out': out}) + raise exception.VolumeBackendAPIException(data=msg) + + return result_xml + + def _cliq_get_cluster_info(self, cluster_name): + """Queries for info about the cluster (including IP).""" + cliq_args = {} + cliq_args['clusterName'] = cluster_name + cliq_args['searchDepth'] = '1' + cliq_args['verbose'] = '0' + + result_xml = self._cliq_run_xml("getClusterInfo", cliq_args) + + return result_xml + + def _cliq_get_cluster_vip(self, cluster_name): + """Gets the IP on which a cluster shares iSCSI volumes.""" + cluster_xml = self._cliq_get_cluster_info(cluster_name) + + vips = [] + for vip in cluster_xml.findall("response/cluster/vip"): + vips.append(vip.attrib.get('ipAddress')) + + if len(vips) == 1: + return vips[0] + + _xml = etree.tostring(cluster_xml) + msg = (_("Unexpected number of virtual ips for cluster " + " %(cluster_name)s. Result=%(_xml)s") % + {'cluster_name': cluster_name, '_xml': _xml}) + raise exception.VolumeBackendAPIException(data=msg) + + def _cliq_get_volume_info(self, volume_name): + """Gets the volume info, including IQN.""" + cliq_args = {} + cliq_args['volumeName'] = volume_name + result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args) + + # Result looks like this: + # + # + # + # + # + # + # + # + + # Flatten the nodes into a dictionary; use prefixes to avoid collisions + volume_attributes = {} + + volume_node = result_xml.find("response/volume") + for k, v in volume_node.attrib.items(): + volume_attributes["volume." + k] = v + + status_node = volume_node.find("status") + if status_node is not None: + for k, v in status_node.attrib.items(): + volume_attributes["status." + k] = v + + # We only consider the first permission node + permission_node = volume_node.find("permission") + if permission_node is not None: + for k, v in status_node.attrib.items(): + volume_attributes["permission." + k] = v + + LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") % + {'volume_name': volume_name, + 'volume_attributes': volume_attributes}) + return volume_attributes + + def _cliq_get_snapshot_info(self, snapshot_name): + """Gets the snapshot info, including IQN.""" + cliq_args = {} + cliq_args['snapshotName'] = snapshot_name + result_xml = self._cliq_run_xml("getSnapshotInfo", cliq_args) + + # Result looks like this: + # + # + # + # + # + # + # + # + + # Flatten the nodes into a dictionary; use prefixes to avoid collisions + snapshot_attributes = {} + + snapshot_node = result_xml.find("response/snapshot") + for k, v in snapshot_node.attrib.items(): + snapshot_attributes["snapshot." + k] = v + + status_node = snapshot_node.find("status") + if status_node is not None: + for k, v in status_node.attrib.items(): + snapshot_attributes["status." + k] = v + + # We only consider the first permission node + permission_node = snapshot_node.find("permission") + if permission_node is not None: + for k, v in status_node.attrib.items(): + snapshot_attributes["permission." + k] = v + + LOG.debug(_("Snapshot info: %(name)s => %(attributes)s") % + {'name': snapshot_name, 'attributes': snapshot_attributes}) + return snapshot_attributes + + def create_volume(self, volume): + """Creates a volume.""" + cliq_args = {} + cliq_args['clusterName'] = self.configuration.san_clustername + + if self.configuration.san_thin_provision: + cliq_args['thinProvision'] = '1' + else: + cliq_args['thinProvision'] = '0' + + cliq_args['volumeName'] = volume['name'] + if int(volume['size']) == 0: + cliq_args['size'] = '100MB' + else: + cliq_args['size'] = '%sGB' % volume['size'] + + self._cliq_run_xml("createVolume", cliq_args) + + return self._get_model_update(volume['name']) + + def extend_volume(self, volume, new_size): + """Extend the size of an existing volume.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['size'] = '%sGB' % new_size + + self._cliq_run_xml("modifyVolume", cliq_args) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + cliq_args = {} + cliq_args['snapshotName'] = snapshot['name'] + cliq_args['volumeName'] = volume['name'] + + self._cliq_run_xml("cloneSnapshot", cliq_args) + + return self._get_model_update(volume['name']) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + cliq_args = {} + cliq_args['snapshotName'] = snapshot['name'] + cliq_args['volumeName'] = snapshot['volume_name'] + cliq_args['inheritAccess'] = 1 + self._cliq_run_xml("createSnapshot", cliq_args) + + def delete_volume(self, volume): + """Deletes a volume.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['prompt'] = 'false' # Don't confirm + try: + volume_info = self._cliq_get_volume_info(volume['name']) + except processutils.ProcessExecutionError: + LOG.error_("Volume did not exist. It will not be deleted") + return + self._cliq_run_xml("deleteVolume", cliq_args) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + cliq_args = {} + cliq_args['snapshotName'] = snapshot['name'] + cliq_args['prompt'] = 'false' # Don't confirm + try: + volume_info = self._cliq_get_snapshot_info(snapshot['name']) + except processutils.ProcessExecutionError: + LOG.error_("Snapshot did not exist. It will not be deleted") + return + self._cliq_run_xml("deleteSnapshot", cliq_args) + + def local_path(self, volume): + msg = _("local_path not supported") + raise exception.VolumeBackendAPIException(data=msg) + + def initialize_connection(self, volume, connector): + """Assigns the volume to a server. + + Assign any created volume to a compute node/host so that it can be + used from that host. HP VSA requires a volume to be assigned + to a server. + + This driver returns a driver_volume_type of 'iscsi'. + The format of the driver data is defined in _get_iscsi_properties. + Example return value: + + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_protal': '127.0.0.1:3260', + 'volume_id': 1, + } + } + + """ + self._create_server(connector) + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['serverName'] = connector['host'] + self._cliq_run_xml("assignVolumeToServer", cliq_args) + + iscsi_properties = self._get_iscsi_properties(volume) + return { + 'driver_volume_type': 'iscsi', + 'data': iscsi_properties + } + + def _create_server(self, connector): + cliq_args = {} + cliq_args['serverName'] = connector['host'] + out = self._cliq_run_xml("getServerInfo", cliq_args, False) + response = out.find("response") + result = response.attrib.get("result") + if result != '0': + cliq_args = {} + cliq_args['serverName'] = connector['host'] + cliq_args['initiator'] = connector['initiator'] + self._cliq_run_xml("createServer", cliq_args) + + def _get_model_update(self, volume_name): + volume_info = self._cliq_get_volume_info(volume_name) + cluster_name = volume_info['volume.clusterName'] + iscsi_iqn = volume_info['volume.iscsiIqn'] + + # TODO(justinsb): Is this always 1? Does it matter? + cluster_interface = '1' + + if not self.cluster_vip: + self.cluster_vip = self._cliq_get_cluster_vip(cluster_name) + iscsi_portal = self.cluster_vip + ":3260," + cluster_interface + + model_update = {} + + # NOTE(jdg): LH volumes always at lun 0 ? + model_update['provider_location'] = ("%s %s %s" % + (iscsi_portal, + iscsi_iqn, + 0)) + return model_update + + def terminate_connection(self, volume, connector, **kwargs): + """Unassign the volume from the host.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['serverName'] = connector['host'] + self._cliq_run_xml("unassignVolumeToServer", cliq_args) + + def get_volume_stats(self, refresh): + if refresh: + self._update_backend_status() + + return self.device_stats + + def _update_backend_status(self): + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data['volume_backend_name'] = backend_name or self.__class__.__name__ + data['driver_version'] = self.VERSION + data['reserved_percentage'] = 0 + data['storage_protocol'] = 'iSCSI' + data['vendor_name'] = 'Hewlett-Packard' + + result_xml = self._cliq_run_xml("getClusterInfo", {}) + cluster_node = result_xml.find("response/cluster") + total_capacity = cluster_node.attrib.get("spaceTotal") + free_capacity = cluster_node.attrib.get("unprovisionedSpace") + GB = 1073741824 + + data['total_capacity_gb'] = int(total_capacity) / GB + data['free_capacity_gb'] = int(free_capacity) / GB + self.device_stats = data diff --git a/cinder/volume/drivers/san/san.py b/cinder/volume/drivers/san/san.py new file mode 100644 index 0000000000..45ef69f00d --- /dev/null +++ b/cinder/volume/drivers/san/san.py @@ -0,0 +1,182 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Default Driver for san-stored volumes. + +The unique thing about a SAN is that we don't expect that we can run the volume +controller on the SAN hardware. We expect to access it over SSH or some API. +""" + +import random + +from eventlet import greenthread +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import utils +from cinder.volume import driver + +LOG = logging.getLogger(__name__) + +san_opts = [ + cfg.BoolOpt('san_thin_provision', + default=True, + help='Use thin provisioning for SAN volumes?'), + cfg.StrOpt('san_ip', + default='', + help='IP address of SAN controller'), + cfg.StrOpt('san_login', + default='admin', + help='Username for SAN controller'), + cfg.StrOpt('san_password', + default='', + help='Password for SAN controller', + secret=True), + cfg.StrOpt('san_private_key', + default='', + help='Filename of private key to use for SSH authentication'), + cfg.StrOpt('san_clustername', + default='', + help='Cluster name to use for creating volumes'), + cfg.IntOpt('san_ssh_port', + default=22, + help='SSH port to use with SAN'), + cfg.BoolOpt('san_is_local', + default=False, + help='Execute commands locally instead of over SSH; ' + 'use if the volume service is running on the SAN device'), + cfg.IntOpt('ssh_conn_timeout', + default=30, + help="SSH connection timeout in seconds"), + cfg.IntOpt('ssh_min_pool_conn', + default=1, + help='Minimum ssh connections in the pool'), + cfg.IntOpt('ssh_max_pool_conn', + default=5, + help='Maximum ssh connections in the pool'), +] + +CONF = cfg.CONF +CONF.register_opts(san_opts) + + +class SanDriver(driver.VolumeDriver): + """Base class for SAN-style storage volumes + + A SAN-style storage value is 'different' because the volume controller + probably won't run on it, so we need to access is over SSH or another + remote protocol. + """ + + def __init__(self, *args, **kwargs): + execute = kwargs.pop('execute', self.san_execute) + super(SanDriver, self).__init__(execute=execute, + *args, **kwargs) + self.configuration.append_config_values(san_opts) + self.run_local = self.configuration.san_is_local + self.sshpool = None + + def san_execute(self, *cmd, **kwargs): + if self.run_local: + return utils.execute(*cmd, **kwargs) + else: + check_exit_code = kwargs.pop('check_exit_code', None) + command = ' '.join(cmd) + return self._run_ssh(command, check_exit_code) + + def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): + utils.check_ssh_injection(cmd_list) + command = ' '. join(cmd_list) + + if not self.sshpool: + password = self.configuration.san_password + privatekey = self.configuration.san_private_key + min_size = self.configuration.ssh_min_pool_conn + max_size = self.configuration.ssh_max_pool_conn + self.sshpool = utils.SSHPool(self.configuration.san_ip, + self.configuration.san_ssh_port, + self.configuration.ssh_conn_timeout, + self.configuration.san_login, + password=password, + privatekey=privatekey, + min_size=min_size, + max_size=max_size) + last_exception = None + try: + total_attempts = attempts + with self.sshpool.item() as ssh: + while attempts > 0: + attempts -= 1 + try: + return processutils.ssh_execute( + ssh, + command, + check_exit_code=check_exit_code) + except Exception as e: + LOG.error(e) + last_exception = e + greenthread.sleep(random.randint(20, 500) / 100.0) + try: + raise processutils.ProcessExecutionError( + exit_code=last_exception.exit_code, + stdout=last_exception.stdout, + stderr=last_exception.stderr, + cmd=last_exception.cmd) + except AttributeError: + raise processutils.ProcessExecutionError( + exit_code=-1, + stdout="", + stderr="Error running SSH command", + cmd=command) + + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_("Error running SSH command: %s") % command) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume.""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + pass + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + if not self.run_local: + if not (self.configuration.san_password or + self.configuration.san_private_key): + raise exception.InvalidInput( + reason=_('Specify san_password or san_private_key')) + + # The san_ip must always be set, because we use it for the target + if not self.configuration.san_ip: + raise exception.InvalidInput(reason=_("san_ip must be set")) + + +class SanISCSIDriver(SanDriver, driver.ISCSIDriver): + def __init__(self, *args, **kwargs): + super(SanISCSIDriver, self).__init__(*args, **kwargs) + + def _build_iscsi_target_name(self, volume): + return "%s%s" % (self.configuration.iscsi_target_prefix, + volume['name']) diff --git a/cinder/volume/drivers/san/solaris.py b/cinder/volume/drivers/san/solaris.py new file mode 100644 index 0000000000..9a513265eb --- /dev/null +++ b/cinder/volume/drivers/san/solaris.py @@ -0,0 +1,286 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.volume.drivers.san.san import SanISCSIDriver + +LOG = logging.getLogger(__name__) + +solaris_opts = [ + cfg.StrOpt('san_zfs_volume_base', + default='rpool/', + help='The ZFS path under which to create zvols for volumes.'), ] + +CONF = cfg.CONF +CONF.register_opts(solaris_opts) + + +class SolarisISCSIDriver(SanISCSIDriver): + """Executes commands relating to Solaris-hosted ISCSI volumes. + + Basic setup for a Solaris iSCSI server: + + pkg install storage-server SUNWiscsit + + svcadm enable stmf + + svcadm enable -r svc:/network/iscsi/target:default + + pfexec itadm create-tpg e1000g0 ${MYIP} + + pfexec itadm create-target -t e1000g0 + + + Then grant the user that will be logging on lots of permissions. + I'm not sure exactly which though: + + zfs allow justinsb create,mount,destroy rpool + + usermod -P'File System Management' justinsb + + usermod -P'Primary Administrator' justinsb + + Also make sure you can login using san_login & san_password/san_private_key + """ + def __init__(self, *cmd, **kwargs): + super(SolarisISCSIDriver, self).__init__(execute=self.solaris_execute, + *cmd, **kwargs) + self.configuration.append_config_values(solaris_opts) + + def solaris_execute(self, *cmd, **kwargs): + new_cmd = ['pfexec'] + new_cmd.extend(cmd) + return super(SolarisISCSIDriver, self)._execute(*new_cmd, + **kwargs) + + def _view_exists(self, luid): + (out, _err) = self._execute('/usr/sbin/stmfadm', + 'list-view', '-l', luid, + check_exit_code=False) + if "no views found" in out: + return False + + if "View Entry:" in out: + return True + msg = _("Cannot parse list-view output: %s") % out + raise exception.VolumeBackendAPIException(data=msg) + + def _get_target_groups(self): + """Gets list of target groups from host.""" + (out, _err) = self._execute('/usr/sbin/stmfadm', 'list-tg') + matches = self._get_prefixed_values(out, 'Target group: ') + LOG.debug("target_groups=%s" % matches) + return matches + + def _target_group_exists(self, target_group_name): + return target_group_name not in self._get_target_groups() + + def _get_target_group_members(self, target_group_name): + (out, _err) = self._execute('/usr/sbin/stmfadm', + 'list-tg', '-v', target_group_name) + matches = self._get_prefixed_values(out, 'Member: ') + LOG.debug("members of %s=%s" % (target_group_name, matches)) + return matches + + def _is_target_group_member(self, target_group_name, iscsi_target_name): + return iscsi_target_name in ( + self._get_target_group_members(target_group_name)) + + def _get_iscsi_targets(self): + (out, _err) = self._execute('/usr/sbin/itadm', 'list-target') + matches = self._collect_lines(out) + + # Skip header + if len(matches) != 0: + assert 'TARGET NAME' in matches[0] + matches = matches[1:] + + targets = [] + for line in matches: + items = line.split() + assert len(items) == 3 + targets.append(items[0]) + + LOG.debug("_get_iscsi_targets=%s" % (targets)) + return targets + + def _iscsi_target_exists(self, iscsi_target_name): + return iscsi_target_name in self._get_iscsi_targets() + + def _build_zfs_poolname(self, volume): + zfs_poolname = '%s%s' % (self.configuration.san_zfs_volume_base, + volume['name']) + return zfs_poolname + + def create_volume(self, volume): + """Creates a volume.""" + if int(volume['size']) == 0: + sizestr = '100M' + else: + sizestr = '%sG' % volume['size'] + + zfs_poolname = self._build_zfs_poolname(volume) + + # Create a zfs volume + cmd = ['/usr/sbin/zfs', 'create'] + if self.configuration.san_thin_provision: + cmd.append('-s') + cmd.extend(['-V', sizestr]) + cmd.append(zfs_poolname) + self._execute(*cmd) + + def _get_luid(self, volume): + zfs_poolname = self._build_zfs_poolname(volume) + zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname + + (out, _err) = self._execute('/usr/sbin/sbdadm', 'list-lu') + + lines = self._collect_lines(out) + + # Strip headers + if len(lines) >= 1: + if lines[0] == '': + lines = lines[1:] + + if len(lines) >= 4: + assert 'Found' in lines[0] + assert '' == lines[1] + assert 'GUID' in lines[2] + assert '------------------' in lines[3] + + lines = lines[4:] + + for line in lines: + items = line.split() + assert len(items) == 3 + if items[2] == zvol_name: + luid = items[0].strip() + return luid + + msg = _('LUID not found for %(zfs_poolname)s. ' + 'Output=%(out)s') % {'zfs_poolname': zfs_poolname, 'out': out} + raise exception.VolumeBackendAPIException(data=msg) + + def _is_lu_created(self, volume): + luid = self._get_luid(volume) + return luid + + def delete_volume(self, volume): + """Deletes a volume.""" + zfs_poolname = self._build_zfs_poolname(volume) + self._execute('/usr/sbin/zfs', 'destroy', zfs_poolname) + + def local_path(self, volume): + # TODO(justinsb): Is this needed here? + escaped_group = self.configuration.volume_group.replace('-', '--') + escaped_name = volume['name'].replace('-', '--') + return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + #TODO(justinsb): On bootup, this is called for every volume. + # It then runs ~5 SSH commands for each volume, + # most of which fetch the same info each time + # This makes initial start stupid-slow + return self._do_export(volume, force_create=False) + + def create_export(self, context, volume): + return self._do_export(volume, force_create=True) + + def _do_export(self, volume, force_create): + # Create a Logical Unit (LU) backed by the zfs volume + zfs_poolname = self._build_zfs_poolname(volume) + + if force_create or not self._is_lu_created(volume): + zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname + self._execute('/usr/sbin/sbdadm', 'create-lu', zvol_name) + + luid = self._get_luid(volume) + iscsi_name = self._build_iscsi_target_name(volume) + target_group_name = 'tg-%s' % volume['name'] + + # Create a iSCSI target, mapped to just this volume + if force_create or not self._target_group_exists(target_group_name): + self._execute('/usr/sbin/stmfadm', 'create-tg', target_group_name) + + # Yes, we add the initiatior before we create it! + # Otherwise, it complains that the target is already active + if force_create or not self._is_target_group_member(target_group_name, + iscsi_name): + self._execute('/usr/sbin/stmfadm', + 'add-tg-member', '-g', target_group_name, iscsi_name) + + if force_create or not self._iscsi_target_exists(iscsi_name): + self._execute('/usr/sbin/itadm', 'create-target', '-n', iscsi_name) + + if force_create or not self._view_exists(luid): + self._execute('/usr/sbin/stmfadm', + 'add-view', '-t', target_group_name, luid) + + #TODO(justinsb): Is this always 1? Does it matter? + iscsi_portal_interface = '1' + iscsi_portal = \ + self.configuration.san_ip + ":3260," + iscsi_portal_interface + + db_update = {} + db_update['provider_location'] = ("%s %s" % + (iscsi_portal, + iscsi_name)) + + return db_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + + # This is the reverse of _do_export + luid = self._get_luid(volume) + iscsi_name = self._build_iscsi_target_name(volume) + target_group_name = 'tg-%s' % volume['name'] + + if self._view_exists(luid): + self._execute('/usr/sbin/stmfadm', 'remove-view', '-l', luid, '-a') + + if self._iscsi_target_exists(iscsi_name): + self._execute('/usr/sbin/stmfadm', 'offline-target', iscsi_name) + self._execute('/usr/sbin/itadm', 'delete-target', iscsi_name) + + # We don't delete the tg-member; we delete the whole tg! + + if self._target_group_exists(target_group_name): + self._execute('/usr/sbin/stmfadm', 'delete-tg', target_group_name) + + if self._is_lu_created(volume): + self._execute('/usr/sbin/sbdadm', 'delete-lu', luid) + + def _collect_lines(self, data): + """Split lines from data into an array, trimming them.""" + matches = [] + for line in data.splitlines(): + match = line.strip() + matches.append(match) + return matches + + def _get_prefixed_values(self, data, prefix): + """Collect lines which start with prefix; with trimming.""" + matches = [] + for line in data.splitlines(): + line = line.strip() + if line.startswith(prefix): + match = line[len(prefix):] + match = match.strip() + matches.append(match) + return matches diff --git a/cinder/volume/drivers/scality.py b/cinder/volume/drivers/scality.py new file mode 100644 index 0000000000..559bfabd46 --- /dev/null +++ b/cinder/volume/drivers/scality.py @@ -0,0 +1,285 @@ +# Copyright (c) 2013 Scality +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scality SOFS Volume Driver. +""" + + +import errno +import os +import urllib2 +import urlparse + +from oslo.config import cfg + +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder import units +from cinder.volume import driver + + +LOG = logging.getLogger(__name__) + +volume_opts = [ + cfg.StrOpt('scality_sofs_config', + default=None, + help='Path or URL to Scality SOFS configuration file'), + cfg.StrOpt('scality_sofs_mount_point', + default='$state_path/scality', + help='Base dir where Scality SOFS shall be mounted'), + cfg.StrOpt('scality_sofs_volume_dir', + default='cinder/volumes', + help='Path from Scality SOFS root to volume dir'), +] + +CONF = cfg.CONF +CONF.register_opts(volume_opts) + + +class ScalityDriver(driver.VolumeDriver): + """Scality SOFS cinder driver. + + Creates sparse files on SOFS for hypervisors to use as block + devices. + """ + + VERSION = '1.0.0' + + def _check_prerequisites(self): + """Sanity checks before attempting to mount SOFS.""" + + # config is mandatory + config = CONF.scality_sofs_config + if not config: + msg = _("Value required for 'scality_sofs_config'") + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # config can be a file path or a URL, check it + if urlparse.urlparse(config).scheme == '': + # turn local path into URL + config = 'file://%s' % config + try: + urllib2.urlopen(config, timeout=5).close() + except urllib2.URLError as e: + msg = _("Cannot access 'scality_sofs_config': %s") % e + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # mount.sofs must be installed + if not os.access('/sbin/mount.sofs', os.X_OK): + msg = _("Cannot execute /sbin/mount.sofs") + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _makedirs(self, path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + def _mount_sofs(self): + config = CONF.scality_sofs_config + mount_path = CONF.scality_sofs_mount_point + sysdir = os.path.join(mount_path, 'sys') + + self._makedirs(mount_path) + if not os.path.isdir(sysdir): + self._execute('mount', '-t', 'sofs', config, mount_path, + run_as_root=True) + if not os.path.isdir(sysdir): + msg = _("Cannot mount Scality SOFS, check syslog for errors") + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _size_bytes(self, size_in_g): + if int(size_in_g) == 0: + return 100 * units.MiB + return int(size_in_g) * units.GiB + + def _create_file(self, path, size): + with open(path, "ab") as f: + f.truncate(size) + os.chmod(path, 0o666) + + def _copy_file(self, src_path, dest_path): + self._execute('dd', 'if=%s' % src_path, 'of=%s' % dest_path, + 'bs=1M', 'conv=fsync,nocreat,notrunc', + run_as_root=True) + + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + self._check_prerequisites() + self._mount_sofs() + voldir = os.path.join(CONF.scality_sofs_mount_point, + CONF.scality_sofs_volume_dir) + if not os.path.isdir(voldir): + self._makedirs(voldir) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + self._check_prerequisites() + voldir = os.path.join(CONF.scality_sofs_mount_point, + CONF.scality_sofs_volume_dir) + if not os.path.isdir(voldir): + msg = _("Cannot find volume dir for Scality SOFS at '%s'") % voldir + LOG.warn(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def create_volume(self, volume): + """Creates a logical volume. + + Can optionally return a Dictionary of changes to the volume + object to be persisted. + """ + self._create_file(self.local_path(volume), + self._size_bytes(volume['size'])) + volume['provider_location'] = self._sofs_path(volume) + return {'provider_location': volume['provider_location']} + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + changes = self.create_volume(volume) + self._copy_file(self.local_path(snapshot), + self.local_path(volume)) + return changes + + def delete_volume(self, volume): + """Deletes a logical volume.""" + os.remove(self.local_path(volume)) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + volume_path = os.path.join(CONF.scality_sofs_mount_point, + CONF.scality_sofs_volume_dir, + snapshot['volume_name']) + snapshot_path = self.local_path(snapshot) + self._create_file(snapshot_path, + self._size_bytes(snapshot['volume_size'])) + self._copy_file(volume_path, snapshot_path) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + os.remove(self.local_path(snapshot)) + + def _sofs_path(self, volume): + return os.path.join(CONF.scality_sofs_volume_dir, + volume['name']) + + def local_path(self, volume): + return os.path.join(CONF.scality_sofs_mount_point, + self._sofs_path(volume)) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume. + + Can optionally return a Dictionary of changes to the volume + object to be persisted. + """ + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + pass + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + return { + 'driver_volume_type': 'scality', + 'data': { + 'sofs_path': self._sofs_path(volume), + } + } + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + pass + + def detach_volume(self, context, volume): + """Callback for volume detached.""" + pass + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. + + If 'refresh' is True, run the update first. + """ + stats = { + 'vendor_name': 'Scality', + 'driver_version': self.VERSION, + 'storage_protocol': 'scality', + 'total_capacity_gb': 'infinite', + 'free_capacity_gb': 'infinite', + 'reserved_percentage': 0, + } + backend_name = self.configuration.safe_get('volume_backend_name') + stats['volume_backend_name'] = backend_name or 'Scality_SOFS' + return stats + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + image_utils.fetch_to_raw(context, + image_service, + image_id, + self.local_path(volume), + CONF.volume_dd_blocksize, + size=volume['size']) + self.create_volume(volume) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + image_utils.upload_volume(context, + image_service, + image_meta, + self.local_path(volume)) + + def clone_image(self, volume, image_location, image_id, image_meta): + """Create a volume efficiently from an existing image. + + image_location is a string whose format depends on the + image service backend in use. The driver should use it + to determine whether cloning is possible. + + image_id is a string which represents id of the image. + It can be used by the driver to introspect internal + stores or registry to do an efficient image clone. + + Returns a dict of volume properties eg. provider_location, + boolean indicating whether cloning occurred + """ + return None, False + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + self.create_volume_from_snapshot(volume, src_vref) + + def extend_volume(self, volume, new_size): + """Extend an existing volume.""" + self._create_file(self.local_path(volume), + self._size_bytes(new_size)) + + def backup_volume(self, context, backup, backup_service): + """Create a new backup from an existing volume.""" + raise NotImplementedError() + + def restore_backup(self, context, backup, volume, backup_service): + """Restore an existing backup to a new or existing volume.""" + raise NotImplementedError() diff --git a/cinder/volume/drivers/sheepdog.py b/cinder/volume/drivers/sheepdog.py new file mode 100644 index 0000000000..4fda28b906 --- /dev/null +++ b/cinder/volume/drivers/sheepdog.py @@ -0,0 +1,209 @@ +# Copyright 2012 OpenStack Foundation +# Copyright (c) 2013 Zelin.io +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SheepDog Volume Driver. + +""" +import os +import re +import tempfile + +from oslo.config import cfg + +from cinder import exception +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder import units +from cinder.volume import driver + + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF +CONF.import_opt("image_conversion_dir", "cinder.image.image_utils") + + +class SheepdogDriver(driver.VolumeDriver): + """Executes commands relating to Sheepdog Volumes.""" + + VERSION = "1.0.0" + + def __init__(self, *args, **kwargs): + super(SheepdogDriver, self).__init__(*args, **kwargs) + self.stats_pattern = re.compile(r'[\w\s%]*Total\s(\d+)\s(\d+)*') + self._stats = {} + + def check_for_setup_error(self): + """Return error if prerequisites aren't met.""" + try: + #NOTE(francois-charlier) Since 0.24 'collie cluster info -r' + # gives short output, but for compatibility reason we won't + # use it and just check if 'running' is in the output. + (out, err) = self._execute('collie', 'cluster', 'info') + if 'status: running' not in out: + exception_message = (_("Sheepdog is not working: %s") % out) + raise exception.VolumeBackendAPIException( + data=exception_message) + + except processutils.ProcessExecutionError: + exception_message = _("Sheepdog is not working") + raise exception.VolumeBackendAPIException(data=exception_message) + + def create_cloned_volume(self, volume, src_vref): + raise NotImplementedError() + + def create_volume(self, volume): + """Create a sheepdog volume.""" + self._try_execute('qemu-img', 'create', + "sheepdog:%s" % volume['name'], + '%sG' % volume['size']) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a sheepdog volume from a snapshot.""" + self._try_execute('qemu-img', 'create', '-b', + "sheepdog:%s:%s" % (snapshot['volume_name'], + snapshot['name']), + "sheepdog:%s" % volume['name']) + + def delete_volume(self, volume): + """Delete a logical volume.""" + self._delete(volume) + + def _ensure_dir_exists(self, tmp_dir): + if tmp_dir and not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + def _resize(self, volume, size=None): + if not size: + size = int(volume['size']) * units.GiB + + self._try_execute('collie', 'vdi', 'resize', + volume['name'], size) + + def _delete(self, volume): + self._try_execute('collie', 'vdi', 'delete', + volume['name']) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + # use the image_conversion_dir as a temporary place to save the image + conversion_dir = CONF.image_conversion_dir + self._ensure_dir_exists(conversion_dir) + with tempfile.NamedTemporaryFile(dir=conversion_dir) as tmp: + # (wenhao): we don't need to convert to raw for sheepdog. + image_utils.fetch_verify_image(context, image_service, + image_id, tmp.name) + + # remove the image created by import before this function. + # see volume/drivers/manager.py:_create_volume + self._delete(volume) + # convert and store into sheepdog + image_utils.convert_image(tmp.name, 'sheepdog:%s' % volume['name'], + 'raw') + self._resize(volume) + + def create_snapshot(self, snapshot): + """Create a sheepdog snapshot.""" + self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'], + "sheepdog:%s" % snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Delete a sheepdog snapshot.""" + self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'], + '-s', snapshot['name']) + + def local_path(self, volume): + return "sheepdog:%s" % volume['name'] + + def ensure_export(self, context, volume): + """Safely and synchronously recreate an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Export a volume.""" + pass + + def remove_export(self, context, volume): + """Remove an export for a logical volume.""" + pass + + def initialize_connection(self, volume, connector): + return { + 'driver_volume_type': 'sheepdog', + 'data': { + 'name': volume['name'] + } + } + + def terminate_connection(self, volume, connector, **kwargs): + pass + + def _update_volume_stats(self): + stats = {} + + backend_name = "sheepdog" + if self.configuration: + backend_name = self.configuration.safe_get('volume_backend_name') + stats["volume_backend_name"] = backend_name or 'sheepdog' + stats['vendor_name'] = 'Open Source' + stats['dirver_version'] = self.VERSION + stats['storage_protocol'] = 'sheepdog' + stats['total_capacity_gb'] = 'unknown' + stats['free_capacity_gb'] = 'unknown' + stats['reserved_percentage'] = 0 + stats['QoS_support'] = False + + try: + stdout, _err = self._execute('collie', 'node', 'info', '-r') + m = self.stats_pattern.match(stdout) + total = float(m.group(1)) + used = float(m.group(2)) + stats['total_capacity_gb'] = total / units.GiB + stats['free_capacity_gb'] = (total - used) / units.GiB + except processutils.ProcessExecutionError: + LOG.exception(_('error refreshing volume stats')) + + self._stats = stats + + def get_volume_stats(self, refresh=False): + if refresh: + self._update_volume_stats() + return self._stats + + def extend_volume(self, volume, new_size): + """Extend an Existing Volume.""" + old_size = volume['size'] + + try: + size = int(new_size) * units.GiB + self._resize(volume, size=size) + except Exception: + msg = _('Failed to Extend Volume ' + '%(volname)s') % {'volname': volume['name']} + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + LOG.debug(_("Extend volume from %(old_size)s GB to %(new_size)s GB."), + {'old_size': old_size, 'new_size': new_size}) + + def backup_volume(self, context, backup, backup_service): + """Create a new backup from an existing volume.""" + raise NotImplementedError() + + def restore_backup(self, context, backup, volume, backup_service): + """Restore an existing backup to a new or existing volume.""" + raise NotImplementedError() diff --git a/cinder/volume/drivers/solidfire.py b/cinder/volume/drivers/solidfire.py new file mode 100644 index 0000000000..ed79444d2a --- /dev/null +++ b/cinder/volume/drivers/solidfire.py @@ -0,0 +1,771 @@ +# Copyright 2013 SolidFire Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import httplib +import json +import random +import socket +import string +import time +import uuid + +from oslo.config import cfg + +from cinder import context +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import units +from cinder.volume.drivers.san.san import SanISCSIDriver +from cinder.volume import qos_specs +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +sf_opts = [ + cfg.BoolOpt('sf_emulate_512', + default=True, + help='Set 512 byte emulation on volume creation; '), + + cfg.BoolOpt('sf_allow_tenant_qos', + default=False, + help='Allow tenants to specify QOS on create'), + + cfg.StrOpt('sf_account_prefix', + default=None, + help='Create SolidFire accounts with this prefix. Any string ' + 'can be used here, but the string \"hostname\" is special ' + 'and will create a prefix using the cinder node hostsname ' + '(previous default behavior). The default is NO prefix.'), + + cfg.IntOpt('sf_api_port', + default=443, + help='SolidFire API port. Useful if the device api is behind ' + 'a proxy on a different port.'), ] + + +CONF = cfg.CONF +CONF.register_opts(sf_opts) + + +class SolidFireDriver(SanISCSIDriver): + """OpenStack driver to enable SolidFire cluster. + + Version history: + 1.0 - Initial driver + 1.1 - Refactor, clone support, qos by type and minor bug fixes + + """ + + VERSION = '1.2.0' + + sf_qos_dict = {'slow': {'minIOPS': 100, + 'maxIOPS': 200, + 'burstIOPS': 200}, + 'medium': {'minIOPS': 200, + 'maxIOPS': 400, + 'burstIOPS': 400}, + 'fast': {'minIOPS': 500, + 'maxIOPS': 1000, + 'burstIOPS': 1000}, + 'performant': {'minIOPS': 2000, + 'maxIOPS': 4000, + 'burstIOPS': 4000}, + 'off': None} + + sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS'] + cluster_stats = {} + + def __init__(self, *args, **kwargs): + super(SolidFireDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(sf_opts) + try: + self._update_cluster_status() + except exception.SolidFireAPIException: + pass + + def _issue_api_request(self, method_name, params, version='1.0'): + """All API requests to SolidFire device go through this method. + + Simple json-rpc web based API calls. + each call takes a set of parameters (dict) + and returns results in a dict as well. + + """ + max_simultaneous_clones = ['xMaxSnapshotsPerVolumeExceeded', + 'xMaxClonesPerVolumeExceeded', + 'xMaxSnapshotsPerNodeExceeded', + 'xMaxClonesPerNodeExceeded'] + host = self.configuration.san_ip + port = self.configuration.sf_api_port + + cluster_admin = self.configuration.san_login + cluster_password = self.configuration.san_password + + # NOTE(jdg): We're wrapping a retry loop for a know XDB issue + # Shows up in very high request rates (ie create 1000 volumes) + # we have to wrap the whole sequence because the request_id + # can't be re-used + retry_count = 5 + while retry_count > 0: + request_id = hash(uuid.uuid4()) # just generate a random number + command = {'method': method_name, + 'id': request_id} + + if params is not None: + command['params'] = params + + payload = json.dumps(command, ensure_ascii=False) + payload.encode('utf-8') + header = {'Content-Type': 'application/json-rpc; charset=utf-8'} + + if cluster_password is not None: + # base64.encodestring includes a newline character + # in the result, make sure we strip it off + auth_key = base64.encodestring('%s:%s' % (cluster_admin, + cluster_password))[:-1] + header['Authorization'] = 'Basic %s' % auth_key + + LOG.debug(_("Payload for SolidFire API call: %s"), payload) + + api_endpoint = '/json-rpc/%s' % version + connection = httplib.HTTPSConnection(host, port) + try: + connection.request('POST', api_endpoint, payload, header) + except Exception as ex: + LOG.error(_('Failed to make httplib connection ' + 'SolidFire Cluster: %s (verify san_ip ' + 'settings)') % ex.message) + msg = _("Failed to make httplib connection: %s") % ex.message + raise exception.SolidFireAPIException(msg) + response = connection.getresponse() + + data = {} + if response.status != 200: + connection.close() + LOG.error(_('Request to SolidFire cluster returned ' + 'bad status: %(status)s / %(reason)s (check ' + 'san_login/san_password settings)') % + {'status': response.status, + 'reason': response.reason}) + msg = (_("HTTP request failed, with status: %(status)s " + "and reason: %(reason)s") % + {'status': response.status, 'reason': response.reason}) + raise exception.SolidFireAPIException(msg) + + else: + data = response.read() + try: + data = json.loads(data) + except (TypeError, ValueError) as exc: + connection.close() + msg = _("Call to json.loads() raised " + "an exception: %s") % exc + raise exception.SfJsonEncodeFailure(msg) + + connection.close() + + LOG.debug(_("Results of SolidFire API call: %s"), data) + + if 'error' in data: + if data['error']['name'] in max_simultaneous_clones: + LOG.warning(_('Clone operation ' + 'encountered: %s') % data['error']['name']) + LOG.warning(_( + 'Waiting for outstanding operation ' + 'before retrying snapshot: %s') % params['name']) + time.sleep(5) + # Don't decrement the retry count for this one + elif 'xDBVersionMismatch' in data['error']['name']: + LOG.warning(_('Detected xDBVersionMismatch, ' + 'retry %s of 5') % (5 - retry_count)) + time.sleep(1) + retry_count -= 1 + elif 'xUnknownAccount' in data['error']['name']: + retry_count = 0 + else: + msg = _("API response: %s") % data + raise exception.SolidFireAPIException(msg) + else: + retry_count = 0 + + return data + + def _get_volumes_by_sfaccount(self, account_id): + """Get all volumes on cluster for specified account.""" + params = {'accountID': account_id} + data = self._issue_api_request('ListVolumesForAccount', params) + if 'result' in data: + return data['result']['volumes'] + + def _get_sfaccount_by_name(self, sf_account_name): + """Get SolidFire account object by name.""" + sfaccount = None + params = {'username': sf_account_name} + data = self._issue_api_request('GetAccountByName', params) + if 'result' in data and 'account' in data['result']: + LOG.debug(_('Found solidfire account: %s'), sf_account_name) + sfaccount = data['result']['account'] + return sfaccount + + def _get_sf_account_name(self, project_id): + """Build the SolidFire account name to use.""" + prefix = self.configuration.sf_account_prefix or '' + if prefix == 'hostname': + prefix = socket.gethostname() + return '%s%s%s' % (prefix, '-' if prefix else '', project_id) + + def _get_sfaccount(self, project_id): + sf_account_name = self._get_sf_account_name(project_id) + sfaccount = self._get_sfaccount_by_name(sf_account_name) + if sfaccount is None: + raise exception.SolidFireAccountNotFound( + account_name=sf_account_name) + + return sfaccount + + def _create_sfaccount(self, project_id): + """Create account on SolidFire device if it doesn't already exist. + + We're first going to check if the account already exits, if it does + just return it. If not, then create it. + + """ + + sf_account_name = self._get_sf_account_name(project_id) + sfaccount = self._get_sfaccount_by_name(sf_account_name) + if sfaccount is None: + LOG.debug(_('solidfire account: %s does not exist, create it...'), + sf_account_name) + chap_secret = self._generate_random_string(12) + params = {'username': sf_account_name, + 'initiatorSecret': chap_secret, + 'targetSecret': chap_secret, + 'attributes': {}} + data = self._issue_api_request('AddAccount', params) + if 'result' in data: + sfaccount = self._get_sfaccount_by_name(sf_account_name) + + return sfaccount + + def _get_cluster_info(self): + """Query the SolidFire cluster for some property info.""" + params = {} + data = self._issue_api_request('GetClusterInfo', params) + if 'result' not in data: + msg = _("API response: %s") % data + raise exception.SolidFireAPIException(msg) + + return data['result'] + + def _do_export(self, volume): + """Gets the associated account, retrieves CHAP info and updates.""" + sfaccount = self._get_sfaccount(volume['project_id']) + + model_update = {} + model_update['provider_auth'] = ('CHAP %s %s' + % (sfaccount['username'], + sfaccount['targetSecret'])) + + return model_update + + def _generate_random_string(self, length): + """Generates random_string to use for CHAP password.""" + + char_set = string.ascii_uppercase + string.digits + return ''.join(random.sample(char_set, length)) + + def _get_model_info(self, sfaccount, sf_volume_id): + """Gets the connection info for specified account and volume.""" + cluster_info = self._get_cluster_info() + iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260' + chap_secret = sfaccount['targetSecret'] + + found_volume = False + iteration_count = 0 + while not found_volume and iteration_count < 600: + volume_list = self._get_volumes_by_sfaccount( + sfaccount['accountID']) + iqn = None + for v in volume_list: + if v['volumeID'] == sf_volume_id: + iqn = v['iqn'] + found_volume = True + break + if not found_volume: + time.sleep(2) + iteration_count += 1 + + if not found_volume: + LOG.error(_('Failed to retrieve volume SolidFire-' + 'ID: %s in get_by_account!') % sf_volume_id) + raise exception.VolumeNotFound(volume_id=sf_volume_id) + + model_update = {} + # NOTE(john-griffith): SF volumes are always at lun 0 + model_update['provider_location'] = ('%s %s %s' + % (iscsi_portal, iqn, 0)) + model_update['provider_auth'] = ('CHAP %s %s' + % (sfaccount['username'], + chap_secret)) + if not self.configuration.sf_emulate_512: + model_update['provider_geometry'] = ('%s %s' % (4096, 4096)) + + return model_update + + def _do_clone_volume(self, src_uuid, src_project_id, v_ref): + """Create a clone of an existing volume. + + Currently snapshots are the same as clones on the SF cluster. + Due to the way the SF cluster works there's no loss in efficiency + or space usage between the two. The only thing different right + now is the restore snapshot functionality which has not been + implemented in the pre-release version of the SolidFire Cluster. + + """ + attributes = {} + qos = {} + + sfaccount = self._get_sfaccount(src_project_id) + params = {'accountID': sfaccount['accountID']} + + sf_vol = self._get_sf_volume(src_uuid, params) + if sf_vol is None: + raise exception.VolumeNotFound(volume_id=src_uuid) + + if src_project_id != v_ref['project_id']: + sfaccount = self._create_sfaccount(v_ref['project_id']) + + if v_ref.get('size', None): + new_size = v_ref['size'] + else: + new_size = v_ref['volume_size'] + + params = {'volumeID': int(sf_vol['volumeID']), + 'name': 'UUID-%s' % v_ref['id'], + 'newSize': int(new_size * units.GiB), + 'newAccountID': sfaccount['accountID']} + data = self._issue_api_request('CloneVolume', params) + + if (('result' not in data) or ('volumeID' not in data['result'])): + msg = _("API response: %s") % data + raise exception.SolidFireAPIException(msg) + sf_volume_id = data['result']['volumeID'] + + if (self.configuration.sf_allow_tenant_qos and + v_ref.get('volume_metadata')is not None): + qos = self._set_qos_presets(v_ref) + + ctxt = context.get_admin_context() + type_id = v_ref.get('volume_type_id', None) + if type_id is not None: + qos = self._set_qos_by_volume_type(ctxt, type_id) + + # NOTE(jdg): all attributes are copied via clone, need to do an update + # to set any that were provided + params = {'volumeID': sf_volume_id} + + create_time = timeutils.strtime(v_ref['created_at']) + attributes = {'uuid': v_ref['id'], + 'is_clone': 'True', + 'src_uuid': src_uuid, + 'created_at': create_time} + if qos: + params['qos'] = qos + for k, v in qos.items(): + attributes[k] = str(v) + + params['attributes'] = attributes + data = self._issue_api_request('ModifyVolume', params) + + model_update = self._get_model_info(sfaccount, sf_volume_id) + if model_update is None: + mesg = _('Failed to get model update from clone') + raise exception.SolidFireAPIException(mesg) + + return (data, sfaccount, model_update) + + def _do_volume_create(self, project_id, params): + sfaccount = self._create_sfaccount(project_id) + + params['accountID'] = sfaccount['accountID'] + data = self._issue_api_request('CreateVolume', params) + + if (('result' not in data) or ('volumeID' not in data['result'])): + msg = _("Failed volume create: %s") % data + raise exception.SolidFireAPIException(msg) + + sf_volume_id = data['result']['volumeID'] + return self._get_model_info(sfaccount, sf_volume_id) + + def _set_qos_presets(self, volume): + qos = {} + valid_presets = self.sf_qos_dict.keys() + + #First look to see if they included a preset + presets = [i.value for i in volume.get('volume_metadata') + if i.key == 'sf-qos' and i.value in valid_presets] + if len(presets) > 0: + if len(presets) > 1: + LOG.warning(_('More than one valid preset was ' + 'detected, using %s') % presets[0]) + qos = self.sf_qos_dict[presets[0]] + else: + #look for explicit settings + for i in volume.get('volume_metadata'): + if i.key in self.sf_qos_keys: + qos[i.key] = int(i.value) + return qos + + def _set_qos_by_volume_type(self, ctxt, type_id): + qos = {} + volume_type = volume_types.get_volume_type(ctxt, type_id) + qos_specs_id = volume_type.get('qos_specs_id') + specs = volume_type.get('extra_specs') + + # NOTE(jdg): We prefer the qos_specs association + # and over-ride any existing + # extra-specs settings if present + if qos_specs_id is not None: + kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] + else: + kvs = specs + + for key, value in kvs.iteritems(): + if ':' in key: + fields = key.split(':') + key = fields[1] + if key in self.sf_qos_keys: + qos[key] = int(value) + return qos + + def _get_sf_volume(self, uuid, params): + data = self._issue_api_request('ListVolumesForAccount', params) + if 'result' not in data: + msg = _("Failed to get SolidFire Volume: %s") % data + raise exception.SolidFireAPIException(msg) + + found_count = 0 + sf_volref = None + for v in data['result']['volumes']: + if uuid in v['name']: + found_count += 1 + sf_volref = v + LOG.debug(_("Mapped SolidFire volumeID %(sfid)s " + "to cinder ID %(uuid)s.") % + {'sfid': v['volumeID'], + 'uuid': uuid}) + + if found_count == 0: + # NOTE(jdg): Previously we would raise here, but there are cases + # where this might be a cleanup for a failed delete. + # Until we get better states we'll just log an error + LOG.error(_("Volume %s, not found on SF Cluster."), uuid) + + if found_count > 1: + LOG.error(_("Found %(count)s volumes mapped to id: %(uuid)s.") % + {'count': found_count, + 'uuid': uuid}) + raise exception.DuplicateSfVolumeNames(vol_name=uuid) + + return sf_volref + + def create_volume(self, volume): + """Create volume on SolidFire device. + + The account is where CHAP settings are derived from, volume is + created and exported. Note that the new volume is immediately ready + for use. + + One caveat here is that an existing user account must be specified + in the API call to create a new volume. We use a set algorithm to + determine account info based on passed in cinder volume object. First + we check to see if the account already exists (and use it), or if it + does not already exist, we'll go ahead and create it. + + """ + slice_count = 1 + attributes = {} + qos = {} + + if (self.configuration.sf_allow_tenant_qos and + volume.get('volume_metadata')is not None): + qos = self._set_qos_presets(volume) + + ctxt = context.get_admin_context() + type_id = volume['volume_type_id'] + if type_id is not None: + qos = self._set_qos_by_volume_type(ctxt, type_id) + + create_time = timeutils.strtime(volume['created_at']) + attributes = {'uuid': volume['id'], + 'is_clone': 'False', + 'created_at': create_time} + if qos: + for k, v in qos.items(): + attributes[k] = str(v) + + params = {'name': 'UUID-%s' % volume['id'], + 'accountID': None, + 'sliceCount': slice_count, + 'totalSize': int(volume['size'] * units.GiB), + 'enable512e': self.configuration.sf_emulate_512, + 'attributes': attributes, + 'qos': qos} + + return self._do_volume_create(volume['project_id'], params) + + def create_cloned_volume(self, volume, src_vref): + """Create a clone of an existing volume.""" + (data, sfaccount, model) = self._do_clone_volume( + src_vref['id'], + src_vref['project_id'], + volume) + + return model + + def delete_volume(self, volume): + """Delete SolidFire Volume from device. + + SolidFire allows multiple volumes with same name, + volumeID is what's guaranteed unique. + + """ + + LOG.debug(_("Enter SolidFire delete_volume...")) + + sfaccount = self._get_sfaccount(volume['project_id']) + if sfaccount is None: + LOG.error(_("Account for Volume ID %s was not found on " + "the SolidFire Cluster!") % volume['id']) + LOG.error(_("This usually means the volume was never " + "successfully created.")) + return + + params = {'accountID': sfaccount['accountID']} + + sf_vol = self._get_sf_volume(volume['id'], params) + + if sf_vol is not None: + params = {'volumeID': sf_vol['volumeID']} + data = self._issue_api_request('DeleteVolume', params) + + if 'result' not in data: + msg = _("Failed to delete SolidFire Volume: %s") % data + raise exception.SolidFireAPIException(msg) + else: + LOG.error(_("Volume ID %s was not found on " + "the SolidFire Cluster!"), volume['id']) + + LOG.debug(_("Leaving SolidFire delete_volume")) + + def ensure_export(self, context, volume): + """Verify the iscsi export info.""" + LOG.debug(_("Executing SolidFire ensure_export...")) + try: + return self._do_export(volume) + except exception.SolidFireAPIException: + return None + + def create_export(self, context, volume): + """Setup the iscsi export info.""" + LOG.debug(_("Executing SolidFire create_export...")) + return self._do_export(volume) + + def delete_snapshot(self, snapshot): + """Delete the specified snapshot from the SolidFire cluster.""" + self.delete_volume(snapshot) + + def create_snapshot(self, snapshot): + """Create a snapshot of a volume on the SolidFire cluster. + + Note that for SolidFire Clusters currently there is no snapshot + implementation. Due to the way SF does cloning there's no performance + hit or extra space used. The only thing that's lacking from this is + the abilit to restore snaps. + + After GA a true snapshot implementation will be available with + restore at which time we'll rework this appropriately. + + """ + (data, sfaccount, model) = self._do_clone_volume( + snapshot['volume_id'], + snapshot['project_id'], + snapshot) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from the specified snapshot.""" + (data, sfaccount, model) = self._do_clone_volume( + snapshot['id'], + snapshot['project_id'], + volume) + + return model + + def get_volume_stats(self, refresh=False): + """Get volume status. + + If 'refresh' is True, run update first. + The name is a bit misleading as + the majority of the data here is cluster + data + """ + if refresh: + try: + self._update_cluster_status() + except exception.SolidFireAPIException: + pass + + return self.cluster_stats + + def extend_volume(self, volume, new_size): + """Extend an existing volume.""" + LOG.debug(_("Entering SolidFire extend_volume...")) + + sfaccount = self._get_sfaccount(volume['project_id']) + params = {'accountID': sfaccount['accountID']} + + sf_vol = self._get_sf_volume(volume['id'], params) + + if sf_vol is None: + LOG.error(_("Volume ID %s was not found on " + "the SolidFire Cluster!"), volume['id']) + raise exception.VolumeNotFound(volume_id=volume['id']) + + params = { + 'volumeID': sf_vol['volumeID'], + 'totalSize': int(new_size * units.GiB) + } + data = self._issue_api_request('ModifyVolume', + params, version='5.0') + + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) + + LOG.debug(_("Leaving SolidFire extend_volume")) + + def _update_cluster_status(self): + """Retrieve status info for the Cluster.""" + + LOG.debug(_("Updating cluster status info")) + + params = {} + + # NOTE(jdg): The SF api provides an UNBELIEVABLE amount + # of stats data, this is just one of the calls + results = self._issue_api_request('GetClusterCapacity', params) + if 'result' not in results: + LOG.error(_('Failed to get updated stats')) + + results = results['result']['clusterCapacity'] + free_capacity =\ + results['maxProvisionedSpace'] - results['usedSpace'] + + data = {} + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or self.__class__.__name__ + data["vendor_name"] = 'SolidFire Inc' + data["driver_version"] = self.VERSION + data["storage_protocol"] = 'iSCSI' + + data['total_capacity_gb'] = results['maxProvisionedSpace'] + + data['free_capacity_gb'] = float(free_capacity) + data['reserved_percentage'] = 0 + data['QoS_support'] = True + data['compression_percent'] =\ + results['compressionPercent'] + data['deduplicaton_percent'] =\ + results['deDuplicationPercent'] + data['thin_provision_percent'] =\ + results['thinProvisioningPercent'] + self.cluster_stats = data + + def attach_volume(self, context, volume, + instance_uuid, host_name, + mountpoint): + + LOG.debug(_("Entering SolidFire attach_volume...")) + sfaccount = self._get_sfaccount(volume['project_id']) + params = {'accountID': sfaccount['accountID']} + + sf_vol = self._get_sf_volume(volume['id'], params) + if sf_vol is None: + LOG.error(_("Volume ID %s was not found on " + "the SolidFire Cluster!"), volume['id']) + raise exception.VolumeNotFound(volume_id=volume['id']) + + attributes = sf_vol['attributes'] + attributes['attach_time'] = volume.get('attach_time', None) + attributes['attached_to'] = instance_uuid + params = { + 'volumeID': sf_vol['volumeID'], + 'attributes': attributes + } + + data = self._issue_api_request('ModifyVolume', params) + + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) + + def detach_volume(self, context, volume): + + LOG.debug(_("Entering SolidFire attach_volume...")) + sfaccount = self._get_sfaccount(volume['project_id']) + params = {'accountID': sfaccount['accountID']} + + sf_vol = self._get_sf_volume(volume['id'], params) + if sf_vol is None: + LOG.error(_("Volume ID %s was not found on " + "the SolidFire Cluster!"), volume['id']) + raise exception.VolumeNotFound(volume_id=volume['id']) + + attributes = sf_vol['attributes'] + attributes['attach_time'] = None + attributes['attached_to'] = None + params = { + 'volumeID': sf_vol['volumeID'], + 'attributes': attributes + } + + data = self._issue_api_request('ModifyVolume', params) + + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) + + def accept_transfer(self, context, volume, + new_user, new_project): + + sfaccount = self._get_sfaccount(volume['project_id']) + params = {'accountID': sfaccount['accountID']} + sf_vol = self._get_sf_volume(volume['id'], params) + + if new_project != volume['project_id']: + # do a create_sfaccount here as this tenant + # may not exist on the cluster yet + sfaccount = self._create_sfaccount(new_project) + + params = { + 'volumeID': sf_vol['volumeID'], + 'accountID': sfaccount['accountID'] + } + data = self._issue_api_request('ModifyVolume', + params, version='5.0') + + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) + + LOG.debug(_("Leaving SolidFire transfer volume")) diff --git a/cinder/tests/api/openstack/volume/extensions/__init__.py b/cinder/volume/drivers/vmware/__init__.py similarity index 81% rename from cinder/tests/api/openstack/volume/extensions/__init__.py rename to cinder/volume/drivers/vmware/__init__.py index 848908a953..75510bfee3 100644 --- a/cinder/tests/api/openstack/volume/extensions/__init__.py +++ b/cinder/volume/drivers/vmware/__init__.py @@ -1,6 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,3 +12,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +""" +:mod:`vmware` -- Volume support for VMware compatible datastores. +""" diff --git a/cinder/volume/drivers/vmware/api.py b/cinder/volume/drivers/vmware/api.py new file mode 100644 index 0000000000..7b300193ae --- /dev/null +++ b/cinder/volume/drivers/vmware/api.py @@ -0,0 +1,308 @@ +# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Session and API call management for VMware ESX/VC server. +Provides abstraction over cinder.volume.drivers.vmware.vim.Vim SOAP calls. +""" + +from eventlet import event + +from cinder.openstack.common import log as logging +from cinder.openstack.common import loopingcall +from cinder.volume.drivers.vmware import error_util +from cinder.volume.drivers.vmware import vim +from cinder.volume.drivers.vmware import vim_util + +LOG = logging.getLogger(__name__) + + +class Retry(object): + """Decorator for retrying a function upon suggested exceptions. + + The method retries for given number of times and the sleep + time increments till the max sleep time is reached. + If max retries is set to -1, then the decorated function is + invoked indefinitely till no exception is thrown or if + the caught exception is not in the list of suggested exceptions. + """ + + def __init__(self, max_retry_count=-1, inc_sleep_time=10, + max_sleep_time=60, exceptions=()): + """Initialize retry object based on input params. + + :param max_retry_count: Max number of times, a function must be + retried when one of input 'exceptions' + is caught. The default -1 will always + retry the function till a non-exception + case, or an un-wanted error case arises. + :param inc_sleep_time: Incremental time in seconds for sleep time + between retrial + :param max_sleep_time: Max sleep time beyond which the sleep time will + not be incremented using param inc_sleep_time + and max_sleep_time will be used as sleep time + :param exceptions: Suggested exceptions for which the function must be + retried + """ + self._max_retry_count = max_retry_count + self._inc_sleep_time = inc_sleep_time + self._max_sleep_time = max_sleep_time + self._exceptions = exceptions + self._retry_count = 0 + self._sleep_time = 0 + + def __call__(self, f): + + def _func(done, *args, **kwargs): + try: + result = f(*args, **kwargs) + done.send(result) + except self._exceptions as excep: + LOG.exception(_("Failure while invoking function: " + "%(func)s. Error: %(excep)s.") % + {'func': f.__name__, 'excep': excep}) + if (self._max_retry_count != -1 and + self._retry_count >= self._max_retry_count): + done.send_exception(excep) + else: + self._retry_count += 1 + self._sleep_time += self._inc_sleep_time + return self._sleep_time + except Exception as excep: + done.send_exception(excep) + return 0 + + def func(*args, **kwargs): + done = event.Event() + loop = loopingcall.DynamicLoopingCall(_func, done, *args, **kwargs) + loop.start(periodic_interval_max=self._max_sleep_time) + result = done.wait() + loop.stop() + return result + + return func + + +class VMwareAPISession(object): + """Sets up a session with the server and handles all calls made to it.""" + + @Retry(exceptions=(Exception)) + def __init__(self, server_ip, server_username, server_password, + api_retry_count, task_poll_interval, scheme='https', + create_session=True, wsdl_loc=None): + """Constructs session object. + + :param server_ip: IP address of ESX/VC server + :param server_username: Username of ESX/VC server admin user + :param server_password: Password for param server_username + :param api_retry_count: Number of times an API must be retried upon + session/connection related errors + :param task_poll_interval: Sleep time in seconds for polling an + on-going async task as part of the API call + :param scheme: http or https protocol + :param create_session: Boolean whether to set up connection at the + time of instance creation + :param wsdl_loc: WSDL file location for invoking SOAP calls on server + using suds + """ + self._server_ip = server_ip + self._server_username = server_username + self._server_password = server_password + self._wsdl_loc = wsdl_loc + self._api_retry_count = api_retry_count + self._task_poll_interval = task_poll_interval + self._scheme = scheme + self._session_id = None + self._vim = None + if create_session: + self.create_session() + + @property + def vim(self): + if not self._vim: + self._vim = vim.Vim(protocol=self._scheme, host=self._server_ip, + wsdl_loc=self._wsdl_loc) + return self._vim + + def create_session(self): + """Establish session with the server.""" + # Login and setup the session with the server for making + # API calls + session_manager = self.vim.service_content.sessionManager + session = self.vim.Login(session_manager, + userName=self._server_username, + password=self._server_password) + # Terminate the earlier session, if possible (For the sake of + # preserving sessions as there is a limit to the number of + # sessions we can have) + if self._session_id: + try: + self.vim.TerminateSession(session_manager, + sessionId=[self._session_id]) + except Exception as excep: + # This exception is something we can live with. It is + # just an extra caution on our side. The session may + # have been cleared. We could have made a call to + # SessionIsActive, but that is an overhead because we + # anyway would have to call TerminateSession. + LOG.exception(_("Error while terminating session: %s.") % + excep) + self._session_id = session.key + LOG.info(_("Successfully established connection to the server.")) + + def __del__(self): + """Logs-out the session.""" + try: + self.vim.Logout(self.vim.service_content.sessionManager) + except Exception as excep: + LOG.exception(_("Error while logging out the user: %s.") % + excep) + + def invoke_api(self, module, method, *args, **kwargs): + """Wrapper method for invoking APIs. + + Here we retry the API calls for exceptions which may come because + of session overload. + + Make sure if a Vim instance is being passed here, this session's + Vim (self.vim) instance is used, as we retry establishing session + in case of session timedout. + + :param module: Module invoking the VI SDK calls + :param method: Method in the module that invokes the VI SDK call + :param args: Arguments to the method + :param kwargs: Keyword arguments to the method + :return: Response of the API call + """ + + @Retry(max_retry_count=self._api_retry_count, + exceptions=(error_util.VimException)) + def _invoke_api(module, method, *args, **kwargs): + last_fault_list = [] + while True: + try: + api_method = getattr(module, method) + return api_method(*args, **kwargs) + except error_util.VimFaultException as excep: + if error_util.NOT_AUTHENTICATED not in excep.fault_list: + raise excep + # If it is a not-authenticated fault, we re-authenticate + # the user and retry the API invocation. + + # Because of the idle session returning an empty + # RetrieveProperties response and also the same is + # returned when there is an empty answer to a query + # (e.g. no VMs on the host), we have no way to + # differentiate. + # So if the previous response was also an empty + # response and after creating a new session, we get + # the same empty response, then we are sure of the + # response being an empty response. + if error_util.NOT_AUTHENTICATED in last_fault_list: + return [] + last_fault_list = excep.fault_list + LOG.exception(_("Not authenticated error occurred. " + "Will create session and try " + "API call again: %s.") % excep) + self.create_session() + + return _invoke_api(module, method, *args, **kwargs) + + def wait_for_task(self, task): + """Return a deferred that will give the result of the given task. + + The task is polled until it completes. The method returns the task + information upon successful completion. + + :param task: Managed object reference of the task + :return: Task info upon successful completion of the task + """ + done = event.Event() + loop = loopingcall.FixedIntervalLoopingCall(self._poll_task, + task, done) + loop.start(self._task_poll_interval) + task_info = done.wait() + loop.stop() + return task_info + + def _poll_task(self, task, done): + """Poll the given task. + + If the task completes successfully then returns task info. + In case of error sends back appropriate error. + + :param task: Managed object reference of the task + :param event: Event that captures task status + """ + try: + task_info = self.invoke_api(vim_util, 'get_object_property', + self.vim, task, 'info') + if task_info.state in ['queued', 'running']: + # If task already completed on server, it will not return + # the progress. + if hasattr(task_info, 'progress'): + LOG.debug(_("Task: %(task)s progress: %(prog)s.") % + {'task': task, 'prog': task_info.progress}) + return + elif task_info.state == 'success': + LOG.debug(_("Task %s status: success.") % task) + done.send(task_info) + else: + error_msg = str(task_info.error.localizedMessage) + LOG.exception(_("Task: %(task)s failed with error: %(err)s.") % + {'task': task, 'err': error_msg}) + done.send_exception(error_util.VimFaultException([], + error_msg)) + except Exception as excep: + LOG.exception(_("Task: %(task)s failed with error: %(err)s.") % + {'task': task, 'err': excep}) + done.send_exception(excep) + + def wait_for_lease_ready(self, lease): + done = event.Event() + loop = loopingcall.FixedIntervalLoopingCall(self._poll_lease, + lease, + done) + loop.start(self._task_poll_interval) + done.wait() + loop.stop() + + def _poll_lease(self, lease, done): + try: + state = self.invoke_api(vim_util, 'get_object_property', + self.vim, lease, 'state') + if state == 'ready': + # done + LOG.debug(_("Lease is ready.")) + done.send() + return + elif state == 'initializing': + LOG.debug(_("Lease initializing...")) + return + elif state == 'error': + error_msg = self.invoke_api(vim_util, 'get_object_property', + self.vim, lease, 'error') + LOG.exception(error_msg) + excep = error_util.VimFaultException([], error_msg) + done.send_exception(excep) + else: + # unknown state - complain + error_msg = _("Error: unknown lease state %s.") % state + raise error_util.VimFaultException([], error_msg) + except Exception as excep: + LOG.exception(excep) + done.send_exception(excep) diff --git a/cinder/volume/drivers/vmware/error_util.py b/cinder/volume/drivers/vmware/error_util.py new file mode 100644 index 0000000000..0be99d9539 --- /dev/null +++ b/cinder/volume/drivers/vmware/error_util.py @@ -0,0 +1,47 @@ +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception classes and SOAP response error checking module. +""" + +from cinder import exception + +NOT_AUTHENTICATED = 'NotAuthenticated' + + +class VimException(exception.CinderException): + """The VIM Exception class.""" + + def __init__(self, msg): + exception.CinderException.__init__(self, msg) + + +class SessionOverLoadException(VimException): + """Session Overload Exception.""" + pass + + +class VimAttributeException(VimException): + """VI Attribute Error.""" + pass + + +class VimFaultException(exception.VolumeBackendAPIException): + """The VIM Fault exception class.""" + + def __init__(self, fault_list, msg): + exception.VolumeBackendAPIException.__init__(self, msg) + self.fault_list = fault_list diff --git a/cinder/volume/drivers/vmware/io_util.py b/cinder/volume/drivers/vmware/io_util.py new file mode 100644 index 0000000000..35be2fc4e4 --- /dev/null +++ b/cinder/volume/drivers/vmware/io_util.py @@ -0,0 +1,196 @@ +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utility classes for defining the time saving transfer of data from the reader +to the write using a LightQueue as a Pipe between the reader and the writer. +""" + +from eventlet import event +from eventlet import greenthread +from eventlet import queue + +from cinder import exception +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) +IO_THREAD_SLEEP_TIME = .01 +GLANCE_POLL_INTERVAL = 5 + + +class ThreadSafePipe(queue.LightQueue): + """The pipe to hold the data which the reader writes to and the writer + reads from. + """ + def __init__(self, maxsize, max_transfer_size): + queue.LightQueue.__init__(self, maxsize) + self.max_transfer_size = max_transfer_size + self.transferred = 0 + + def read(self, chunk_size): + """Read data from the pipe. + + Chunksize is ignored for we have ensured that the data chunks written + to the pipe by readers is the same as the chunks asked for by Writer. + """ + if self.transferred < self.max_transfer_size: + data_item = self.get() + self.transferred += len(data_item) + LOG.debug(_("Read %(bytes)s out of %(max)s from ThreadSafePipe.") % + {'bytes': self.transferred, + 'max': self.max_transfer_size}) + return data_item + else: + LOG.debug(_("Completed transfer of size %s.") % self.transferred) + return "" + + def write(self, data): + """Put a data item in the pipe.""" + self.put(data) + + def seek(self, offset, whence=0): + """Set the file's current position at the offset.""" + pass + + def tell(self): + """Get size of the file to be read.""" + return self.max_transfer_size + + def close(self): + """A place-holder to maintain consistency.""" + pass + + +class GlanceWriteThread(object): + """Ensures that image data is written to in the glance client and that + it is in correct ('active')state. + """ + + def __init__(self, context, input_file, image_service, image_id, + image_meta=None): + if not image_meta: + image_meta = {} + + self.context = context + self.input_file = input_file + self.image_service = image_service + self.image_id = image_id + self.image_meta = image_meta + self._running = False + + def start(self): + self.done = event.Event() + + def _inner(): + """Initiate write thread. + + Function to do the image data transfer through an update + and thereon checks if the state is 'active'. + """ + LOG.debug(_("Initiating image service update on image: %(image)s " + "with meta: %(meta)s") % {'image': self.image_id, + 'meta': self.image_meta}) + self.image_service.update(self.context, + self.image_id, + self.image_meta, + data=self.input_file) + self._running = True + while self._running: + try: + image_meta = self.image_service.show(self.context, + self.image_id) + image_status = image_meta.get('status') + if image_status == 'active': + self.stop() + LOG.debug(_("Glance image: %s is now active.") % + self.image_id) + self.done.send(True) + # If the state is killed, then raise an exception. + elif image_status == 'killed': + self.stop() + msg = (_("Glance image: %s is in killed state.") % + self.image_id) + LOG.error(msg) + excep = exception.CinderException(msg) + self.done.send_exception(excep) + elif image_status in ['saving', 'queued']: + greenthread.sleep(GLANCE_POLL_INTERVAL) + else: + self.stop() + msg = _("Glance image %(id)s is in unknown state " + "- %(state)s") % {'id': self.image_id, + 'state': image_status} + LOG.error(msg) + excep = exception.CinderException(msg) + self.done.send_exception(excep) + except Exception as exc: + self.stop() + self.done.send_exception(exc) + + greenthread.spawn(_inner) + return self.done + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + def close(self): + pass + + +class IOThread(object): + """Class that reads chunks from the input file and writes them to the + output file till the transfer is completely done. + """ + + def __init__(self, input_file, output_file): + self.input_file = input_file + self.output_file = output_file + self._running = False + self.got_exception = False + + def start(self): + self.done = event.Event() + + def _inner(): + """Read data from input and write the same to output.""" + self._running = True + while self._running: + try: + data = self.input_file.read(None) + if not data: + self.stop() + self.done.send(True) + self.output_file.write(data) + if hasattr(self.input_file, "update_progress"): + self.input_file.update_progress() + if hasattr(self.output_file, "update_progress"): + self.output_file.update_progress() + greenthread.sleep(IO_THREAD_SLEEP_TIME) + except Exception as exc: + self.stop() + LOG.exception(exc) + self.done.send_exception(exc) + + greenthread.spawn(_inner) + return self.done + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() diff --git a/cinder/volume/drivers/vmware/read_write_util.py b/cinder/volume/drivers/vmware/read_write_util.py new file mode 100644 index 0000000000..37263c0ddd --- /dev/null +++ b/cinder/volume/drivers/vmware/read_write_util.py @@ -0,0 +1,337 @@ +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Classes to handle image files. +Collection of classes to handle image upload/download to/from Image service +(like Glance image storage and retrieval service) from/to VMware server. +""" + +import httplib +import netaddr +import urllib +import urllib2 +import urlparse + +from cinder.openstack.common import log as logging +from cinder.volume.drivers.vmware import error_util +from cinder.volume.drivers.vmware import vim_util + +LOG = logging.getLogger(__name__) +USER_AGENT = 'OpenStack-ESX-Adapter' +READ_CHUNKSIZE = 65536 + + +class GlanceFileRead(object): + """Glance file read handler class.""" + + def __init__(self, glance_read_iter): + self.glance_read_iter = glance_read_iter + self.iter = self.get_next() + + def read(self, chunk_size): + """Read an item from the queue. + + The chunk size is ignored for the Client ImageBodyIterator + uses its own CHUNKSIZE. + """ + try: + return self.iter.next() + except StopIteration: + return "" + + def get_next(self): + """Get the next item from the image iterator.""" + for data in self.glance_read_iter: + yield data + + def close(self): + """A dummy close just to maintain consistency.""" + pass + + +class VMwareHTTPFile(object): + """Base class for VMDK file access over HTTP.""" + + def __init__(self, file_handle): + self.eof = False + self.file_handle = file_handle + + def close(self): + """Close the file handle.""" + try: + self.file_handle.close() + except Exception as exc: + LOG.exception(exc) + + def __del__(self): + """Close the file handle on garbage collection.""" + self.close() + + def _build_vim_cookie_headers(self, vim_cookies): + """Build ESX host session cookie headers.""" + cookie_header = "" + for vim_cookie in vim_cookies: + cookie_header = vim_cookie.name + '=' + vim_cookie.value + break + return cookie_header + + def write(self, data): + """Write data to the file.""" + raise NotImplementedError() + + def read(self, chunk_size): + """Read a chunk of data.""" + raise NotImplementedError() + + def get_size(self): + """Get size of the file to be read.""" + raise NotImplementedError() + + def _is_valid_ipv6(self, address): + """Whether given host address is a valid IPv6 address.""" + try: + return netaddr.valid_ipv6(address) + except Exception: + return False + + def get_soap_url(self, scheme, host): + """return IPv4/v6 compatible url constructed for host.""" + if self._is_valid_ipv6(host): + return '%s://[%s]' % (scheme, host) + return '%s://%s' % (scheme, host) + + def _fix_esx_url(self, url, host): + """Fix netloc if it is a ESX host. + + For a ESX host the netloc is set to '*' in the url returned in + HttpNfcLeaseInfo. The netloc is right IP when talking to a VC. + """ + urlp = urlparse.urlparse(url) + if urlp.netloc == '*': + scheme, _, path, params, query, fragment = urlp + url = urlparse.urlunparse((scheme, host, path, params, + query, fragment)) + return url + + def find_vmdk_url(self, lease_info, host): + """Find the URL corresponding to a vmdk disk in lease info.""" + url = None + for deviceUrl in lease_info.deviceUrl: + if deviceUrl.disk: + url = self._fix_esx_url(deviceUrl.url, host) + break + return url + + +class VMwareHTTPWriteFile(VMwareHTTPFile): + """VMware file write handler class.""" + + def __init__(self, host, data_center_name, datastore_name, cookies, + file_path, file_size, scheme='https'): + soap_url = self.get_soap_url(scheme, host) + base_url = '%s/folder/%s' % (soap_url, file_path) + param_list = {'dcPath': data_center_name, 'dsName': datastore_name} + base_url = base_url + '?' + urllib.urlencode(param_list) + _urlparse = urlparse.urlparse(base_url) + scheme, netloc, path, params, query, fragment = _urlparse + if scheme == 'http': + conn = httplib.HTTPConnection(netloc) + elif scheme == 'https': + conn = httplib.HTTPSConnection(netloc) + conn.putrequest('PUT', path + '?' + query) + conn.putheader('User-Agent', USER_AGENT) + conn.putheader('Content-Length', file_size) + conn.putheader('Cookie', self._build_vim_cookie_headers(cookies)) + conn.endheaders() + self.conn = conn + VMwareHTTPFile.__init__(self, conn) + + def write(self, data): + """Write to the file.""" + self.file_handle.send(data) + + def close(self): + """Get the response and close the connection.""" + try: + self.conn.getresponse() + except Exception as excep: + LOG.debug(_("Exception during HTTP connection close in " + "VMwareHTTPWrite. Exception is %s.") % excep) + super(VMwareHTTPWriteFile, self).close() + + +class VMwareHTTPWriteVmdk(VMwareHTTPFile): + """Write VMDK over HTTP using VMware HttpNfcLease.""" + + def __init__(self, session, host, rp_ref, vm_folder_ref, vm_create_spec, + vmdk_size): + """Initialize a writer for vmdk file. + + :param session: a valid api session to ESX/VC server + :param host: the ESX or VC host IP + :param rp_ref: resource pool into which backing VM is imported + :param vm_folder_ref: VM folder in ESX/VC inventory to use as parent + of backing VM + :param vm_create_spec: backing VM created using this create spec + :param vmdk_size: VMDK size to be imported into backing VM + """ + self._session = session + self._vmdk_size = vmdk_size + self._progress = 0 + lease = session.invoke_api(session.vim, 'ImportVApp', rp_ref, + spec=vm_create_spec, folder=vm_folder_ref) + session.wait_for_lease_ready(lease) + self._lease = lease + lease_info = session.invoke_api(vim_util, 'get_object_property', + session.vim, lease, 'info') + # Find the url for vmdk device + url = self.find_vmdk_url(lease_info, host) + if not url: + msg = _("Could not retrieve URL from lease.") + LOG.exception(msg) + raise error_util.VimException(msg) + LOG.info(_("Opening vmdk url: %s for write.") % url) + + # Prepare the http connection to the vmdk url + cookies = session.vim.client.options.transport.cookiejar + _urlparse = urlparse.urlparse(url) + scheme, netloc, path, params, query, fragment = _urlparse + if scheme == 'http': + conn = httplib.HTTPConnection(netloc) + elif scheme == 'https': + conn = httplib.HTTPSConnection(netloc) + if query: + path = path + '?' + query + conn.putrequest('PUT', path) + conn.putheader('User-Agent', USER_AGENT) + conn.putheader('Content-Length', str(vmdk_size)) + conn.putheader('Overwrite', 't') + conn.putheader('Cookie', self._build_vim_cookie_headers(cookies)) + conn.putheader('Content-Type', 'binary/octet-stream') + conn.endheaders() + self.conn = conn + VMwareHTTPFile.__init__(self, conn) + + def write(self, data): + """Write to the file.""" + self._progress += len(data) + LOG.debug(_("Written %s bytes to vmdk.") % self._progress) + self.file_handle.send(data) + + def update_progress(self): + """Updates progress to lease. + + This call back to the lease is essential to keep the lease alive + across long running write operations. + """ + percent = int(float(self._progress) / self._vmdk_size * 100) + try: + LOG.debug(_("Updating progress to %s percent.") % percent) + self._session.invoke_api(self._session.vim, + 'HttpNfcLeaseProgress', + self._lease, percent=percent) + except error_util.VimException as ex: + LOG.exception(ex) + raise ex + + def close(self): + """End the lease and close the connection.""" + state = self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, + self._lease, 'state') + if state == 'ready': + self._session.invoke_api(self._session.vim, 'HttpNfcLeaseComplete', + self._lease) + LOG.debug(_("Lease released.")) + else: + LOG.debug(_("Lease is already in state: %s.") % state) + super(VMwareHTTPWriteVmdk, self).close() + + +class VMwareHTTPReadVmdk(VMwareHTTPFile): + """read VMDK over HTTP using VMware HttpNfcLease.""" + + def __init__(self, session, host, vm_ref, vmdk_path, vmdk_size): + """Initialize a writer for vmdk file. + + During an export operation the vmdk disk is converted to a + stream-optimized sparse disk format. So the size of the VMDK + after export may be smaller than the current vmdk disk size. + + :param session: a valid api session to ESX/VC server + :param host: the ESX or VC host IP + :param vm_ref: backing VM whose vmdk is to be exported + :param vmdk_path: datastore relative path to vmdk file to be exported + :param vmdk_size: current disk size of vmdk file to be exported + """ + self._session = session + self._vmdk_size = vmdk_size + self._progress = 0 + lease = session.invoke_api(session.vim, 'ExportVm', vm_ref) + session.wait_for_lease_ready(lease) + self._lease = lease + lease_info = session.invoke_api(vim_util, 'get_object_property', + session.vim, lease, 'info') + + # find the right disk url corresponding to given vmdk_path + url = self.find_vmdk_url(lease_info, host) + if not url: + msg = _("Could not retrieve URL from lease.") + LOG.exception(msg) + raise error_util.VimException(msg) + LOG.info(_("Opening vmdk url: %s for read.") % url) + + cookies = session.vim.client.options.transport.cookiejar + headers = {'User-Agent': USER_AGENT, + 'Cookie': self._build_vim_cookie_headers(cookies)} + request = urllib2.Request(url, None, headers) + conn = urllib2.urlopen(request) + VMwareHTTPFile.__init__(self, conn) + + def read(self, chunk_size): + """Read a chunk from file.""" + self._progress += READ_CHUNKSIZE + LOG.debug(_("Read %s bytes from vmdk.") % self._progress) + return self.file_handle.read(READ_CHUNKSIZE) + + def update_progress(self): + """Updates progress to lease. + + This call back to the lease is essential to keep the lease alive + across long running read operations. + """ + percent = int(float(self._progress) / self._vmdk_size * 100) + try: + LOG.debug(_("Updating progress to %s percent.") % percent) + self._session.invoke_api(self._session.vim, + 'HttpNfcLeaseProgress', + self._lease, percent=percent) + except error_util.VimException as ex: + LOG.exception(ex) + raise ex + + def close(self): + """End the lease and close the connection.""" + state = self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, + self._lease, 'state') + if state == 'ready': + self._session.invoke_api(self._session.vim, 'HttpNfcLeaseComplete', + self._lease) + LOG.debug(_("Lease released.")) + else: + LOG.debug(_("Lease is already in state: %s.") % state) + super(VMwareHTTPReadVmdk, self).close() diff --git a/cinder/volume/drivers/vmware/vim.py b/cinder/volume/drivers/vmware/vim.py new file mode 100644 index 0000000000..0d02543e0e --- /dev/null +++ b/cinder/volume/drivers/vmware/vim.py @@ -0,0 +1,236 @@ +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Classes for making VMware VI SOAP calls. +""" + +import httplib +import suds + +from cinder.volume.drivers.vmware import error_util + +RESP_NOT_XML_ERROR = "Response is 'text/html', not 'text/xml'" +CONN_ABORT_ERROR = 'Software caused connection abort' +ADDRESS_IN_USE_ERROR = 'Address already in use' + + +def get_moref(value, type): + """Get managed object reference. + + :param value: value for the managed object + :param type: type of the managed object + :return: Managed object reference with with input value and type + """ + moref = suds.sudsobject.Property(value) + moref._type = type + return moref + + +class VIMMessagePlugin(suds.plugin.MessagePlugin): + + def addAttributeForValue(self, node): + """Helper to handle AnyType. + + suds does not handle AnyType properly. + VI SDK requires type attribute to be set when AnyType is used + + :param node: XML value node + """ + if node.name == 'value': + node.set('xsi:type', 'xsd:string') + + def marshalled(self, context): + """Marshal soap context. + + Provides the plugin with the opportunity to prune empty + nodes and fixup nodes before sending it to the server. + + :param context: SOAP context + """ + # suds builds the entire request object based on the wsdl schema. + # VI SDK throws server errors if optional SOAP nodes are sent + # without values, e.g. as opposed to test + context.envelope.prune() + context.envelope.walk(self.addAttributeForValue) + + +class Vim(object): + """The VIM Object.""" + + def __init__(self, protocol='https', host='localhost', wsdl_loc=None): + """Create communication interfaces for initiating SOAP transactions. + + :param protocol: http or https + :param host: Server IPAddress[:port] or Hostname[:port] + """ + self._protocol = protocol + self._host_name = host + if not wsdl_loc: + wsdl_loc = Vim._get_wsdl_loc(protocol, host) + soap_url = Vim._get_soap_url(protocol, host) + self._client = suds.client.Client(wsdl_loc, location=soap_url, + plugins=[VIMMessagePlugin()]) + self._service_content = self.RetrieveServiceContent('ServiceInstance') + + @staticmethod + def _get_wsdl_loc(protocol, host_name): + """Return default WSDL file location hosted at the server. + + :param protocol: http or https + :param host_name: ESX/VC server host name + :return: Default WSDL file location hosted at the server + """ + return '%s://%s/sdk/vimService.wsdl' % (protocol, host_name) + + @staticmethod + def _get_soap_url(protocol, host_name): + """Return URL to SOAP services for ESX/VC server. + + :param protocol: https or http + :param host_name: ESX/VC server host name + :return: URL to SOAP services for ESX/VC server + """ + return '%s://%s/sdk' % (protocol, host_name) + + @property + def service_content(self): + return self._service_content + + @property + def client(self): + return self._client + + def __getattr__(self, attr_name): + """Makes the API call and gets the result.""" + + def retrieve_properties_ex_fault_checker(response): + """Checks the RetrievePropertiesEx response for errors. + + Certain faults are sent as part of the SOAP body as property of + missingSet. For example NotAuthenticated fault. The method raises + appropriate VimFaultException when an error is found. + + :param response: Response from RetrievePropertiesEx API call + """ + + fault_list = [] + if not response: + # This is the case when the session has timed out. ESX SOAP + # server sends an empty RetrievePropertiesExResponse. Normally + # missingSet in the returnval field has the specifics about + # the error, but that's not the case with a timed out idle + # session. It is as bad as a terminated session for we cannot + # use the session. So setting fault to NotAuthenticated fault. + fault_list = [error_util.NOT_AUTHENTICATED] + else: + for obj_cont in response: + if hasattr(obj_cont, 'missingSet'): + for missing_elem in obj_cont.missingSet: + fault_type = missing_elem.fault.fault.__class__ + # Fault needs to be added to the type of fault + # for uniformity in error checking as SOAP faults + # define + fault_list.append(fault_type.__name__) + if fault_list: + exc_msg_list = ', '.join(fault_list) + raise error_util.VimFaultException(fault_list, + _("Error(s): %s occurred " + "in the call to " + "RetrievePropertiesEx.") % + exc_msg_list) + + def vim_request_handler(managed_object, **kwargs): + """Handler for VI SDK calls. + + Builds the SOAP message and parses the response for fault + checking and other errors. + + :param managed_object:Managed object reference + :param kwargs: Keyword arguments of the call + :return: Response of the API call + """ + + try: + if isinstance(managed_object, str): + # For strings use string value for value and type + # of the managed object. + managed_object = get_moref(managed_object, managed_object) + request = getattr(self.client.service, attr_name) + response = request(managed_object, **kwargs) + if (attr_name.lower() == 'retrievepropertiesex'): + retrieve_properties_ex_fault_checker(response) + return response + + except error_util.VimFaultException as excep: + raise + + except suds.WebFault as excep: + doc = excep.document + detail = doc.childAtPath('/Envelope/Body/Fault/detail') + fault_list = [] + for child in detail.getChildren(): + fault_list.append(child.get('type')) + raise error_util.VimFaultException(fault_list, str(excep)) + + except AttributeError as excep: + raise error_util.VimAttributeException(_("No such SOAP method " + "%(attr)s. Detailed " + "error: %(excep)s.") % + {'attr': attr_name, + 'excep': excep}) + + except (httplib.CannotSendRequest, + httplib.ResponseNotReady, + httplib.CannotSendHeader) as excep: + raise error_util.SessionOverLoadException(_("httplib error in " + "%(attr)s: " + "%(excep)s.") % + {'attr': attr_name, + 'excep': excep}) + + except Exception as excep: + # Socket errors which need special handling for they + # might be caused by server API call overload + if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or + str(excep).find(CONN_ABORT_ERROR)) != -1: + raise error_util.SessionOverLoadException(_("Socket error " + "in %(attr)s: " + "%(excep)s.") % + {'attr': + attr_name, + 'excep': excep}) + # Type error that needs special handling for it might be + # caused by server API call overload + elif str(excep).find(RESP_NOT_XML_ERROR) != -1: + raise error_util.SessionOverLoadException(_("Type error " + "in %(attr)s: " + "%(excep)s.") % + {'attr': + attr_name, + 'excep': excep}) + else: + raise error_util.VimException(_("Error in %(attr)s. " + "Detailed error: " + "%(excep)s.") % + {'attr': attr_name, + 'excep': excep}) + return vim_request_handler + + def __repr__(self): + return "VIM Object." + + def __str__(self): + return "VIM Object." diff --git a/cinder/volume/drivers/vmware/vim_util.py b/cinder/volume/drivers/vmware/vim_util.py new file mode 100644 index 0000000000..06e3100585 --- /dev/null +++ b/cinder/volume/drivers/vmware/vim_util.py @@ -0,0 +1,301 @@ +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The VMware API utility module. +""" + + +def build_selection_spec(client_factory, name): + """Builds the selection spec. + + :param client_factory: Factory to get API input specs + :param name: Name for the selection spec + :return: Selection spec + """ + sel_spec = client_factory.create('ns0:SelectionSpec') + sel_spec.name = name + return sel_spec + + +def build_traversal_spec(client_factory, name, type, path, skip, + select_set): + """Builds the traversal spec object. + + :param client_factory: Factory to get API input specs + :param name: Name for the traversal spec + :param type: Type of the managed object reference + :param path: Property path of the managed object reference + :param skip: Whether or not to filter the object identified by param path + :param select_set: Set of selection specs specifying additional objects + to filter + :return: Traversal spec + """ + traversal_spec = client_factory.create('ns0:TraversalSpec') + traversal_spec.name = name + traversal_spec.type = type + traversal_spec.path = path + traversal_spec.skip = skip + traversal_spec.selectSet = select_set + return traversal_spec + + +def build_recursive_traversal_spec(client_factory): + """Builds Recursive Traversal Spec to traverse managed object hierarchy. + + :param client_factory: Factory to get API input specs + :return: Recursive traversal spec + """ + visit_folders_select_spec = build_selection_spec(client_factory, + 'visitFolders') + # Next hop from Datacenter + dc_to_hf = build_traversal_spec(client_factory, 'dc_to_hf', 'Datacenter', + 'hostFolder', False, + [visit_folders_select_spec]) + dc_to_vmf = build_traversal_spec(client_factory, 'dc_to_vmf', 'Datacenter', + 'vmFolder', False, + [visit_folders_select_spec]) + + # Next hop from HostSystem + h_to_vm = build_traversal_spec(client_factory, 'h_to_vm', 'HostSystem', + 'vm', False, + [visit_folders_select_spec]) + + # Next hop from ComputeResource + cr_to_h = build_traversal_spec(client_factory, 'cr_to_h', + 'ComputeResource', 'host', False, []) + cr_to_ds = build_traversal_spec(client_factory, 'cr_to_ds', + 'ComputeResource', 'datastore', False, []) + + rp_to_rp_select_spec = build_selection_spec(client_factory, 'rp_to_rp') + rp_to_vm_select_spec = build_selection_spec(client_factory, 'rp_to_vm') + + cr_to_rp = build_traversal_spec(client_factory, 'cr_to_rp', + 'ComputeResource', 'resourcePool', False, + [rp_to_rp_select_spec, + rp_to_vm_select_spec]) + + # Next hop from ClusterComputeResource + ccr_to_h = build_traversal_spec(client_factory, 'ccr_to_h', + 'ClusterComputeResource', 'host', + False, []) + ccr_to_ds = build_traversal_spec(client_factory, 'ccr_to_ds', + 'ClusterComputeResource', 'datastore', + False, []) + ccr_to_rp = build_traversal_spec(client_factory, 'ccr_to_rp', + 'ClusterComputeResource', 'resourcePool', + False, + [rp_to_rp_select_spec, + rp_to_vm_select_spec]) + # Next hop from ResourcePool + rp_to_rp = build_traversal_spec(client_factory, 'rp_to_rp', 'ResourcePool', + 'resourcePool', False, + [rp_to_rp_select_spec, + rp_to_vm_select_spec]) + rp_to_vm = build_traversal_spec(client_factory, 'rp_to_vm', 'ResourcePool', + 'vm', False, + [rp_to_rp_select_spec, + rp_to_vm_select_spec]) + + # Get the assorted traversal spec which takes care of the objects to + # be searched for from the rootFolder + traversal_spec = build_traversal_spec(client_factory, 'visitFolders', + 'Folder', 'childEntity', False, + [visit_folders_select_spec, + h_to_vm, dc_to_hf, dc_to_vmf, + cr_to_ds, cr_to_h, cr_to_rp, + ccr_to_h, ccr_to_ds, ccr_to_rp, + rp_to_rp, rp_to_vm]) + return traversal_spec + + +def build_property_spec(client_factory, type='VirtualMachine', + properties_to_collect=None, + all_properties=False): + """Builds the Property Spec. + + :param client_factory: Factory to get API input specs + :param type: Type of the managed object reference property + :param properties_to_collect: Properties of the managed object reference + to be collected while traversal filtering + :param all_properties: Whether all the properties of managed object + reference needs to be collected + :return: Property spec + """ + if not properties_to_collect: + properties_to_collect = ['name'] + + property_spec = client_factory.create('ns0:PropertySpec') + property_spec.all = all_properties + property_spec.pathSet = properties_to_collect + property_spec.type = type + return property_spec + + +def build_object_spec(client_factory, root_folder, traversal_specs): + """Builds the object Spec. + + :param client_factory: Factory to get API input specs + :param root_folder: Root folder reference as the starting point for + traversal + :param traversal_specs: filter specs required for traversal + :return: Object spec + """ + object_spec = client_factory.create('ns0:ObjectSpec') + object_spec.obj = root_folder + object_spec.skip = False + object_spec.selectSet = traversal_specs + return object_spec + + +def build_property_filter_spec(client_factory, property_specs, object_specs): + """Builds the Property Filter Spec. + + :param client_factory: Factory to get API input specs + :param property_specs: Property specs to be collected for filtered objects + :param object_specs: Object specs to identify objects to be filtered + :return: Property filter spec + """ + property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') + property_filter_spec.propSet = property_specs + property_filter_spec.objectSet = object_specs + return property_filter_spec + + +def get_objects(vim, type, max_objects, props_to_collect=None, + all_properties=False): + """Gets all managed object references of a specified type. + + It is caller's responsibility to continue or cancel retrieval. + + :param vim: Vim object + :param type: Type of the managed object reference + :param max_objects: Maximum number of objects that should be returned in + a single call + :param props_to_collect: Properties of the managed object reference + to be collected + :param all_properties: Whether all properties of the managed object + reference are to be collected + :return: All managed object references of a specified type + """ + + if not props_to_collect: + props_to_collect = ['name'] + + client_factory = vim.client.factory + recur_trav_spec = build_recursive_traversal_spec(client_factory) + object_spec = build_object_spec(client_factory, + vim.service_content.rootFolder, + [recur_trav_spec]) + property_spec = build_property_spec(client_factory, type=type, + properties_to_collect=props_to_collect, + all_properties=all_properties) + property_filter_spec = build_property_filter_spec(client_factory, + [property_spec], + [object_spec]) + options = client_factory.create('ns0:RetrieveOptions') + options.maxObjects = max_objects + return vim.RetrievePropertiesEx(vim.service_content.propertyCollector, + specSet=[property_filter_spec], + options=options) + + +def get_object_properties(vim, mobj, properties): + """Gets properties of the managed object specified. + + :param vim: Vim object + :param mobj: Reference to the managed object + :param properties: Properties of the managed object reference + to be retrieved + :return: Properties of the managed object specified + """ + + client_factory = vim.client.factory + if mobj is None: + return None + collector = vim.service_content.propertyCollector + property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') + property_spec = client_factory.create('ns0:PropertySpec') + property_spec.all = (properties is None or len(properties) == 0) + property_spec.pathSet = properties + property_spec.type = mobj._type + object_spec = client_factory.create('ns0:ObjectSpec') + object_spec.obj = mobj + object_spec.skip = False + property_filter_spec.propSet = [property_spec] + property_filter_spec.objectSet = [object_spec] + options = client_factory.create('ns0:RetrieveOptions') + options.maxObjects = 1 + retrieve_result = vim.RetrievePropertiesEx(collector, + specSet=[property_filter_spec], + options=options) + cancel_retrieval(vim, retrieve_result) + return retrieve_result.objects + + +def _get_token(retrieve_result): + """Get token from results to obtain next set of results. + + :retrieve_result: Result from the RetrievePropertiesEx API + :return: Token to obtain next set of results. None if no more results. + """ + return getattr(retrieve_result, 'token', None) + + +def cancel_retrieval(vim, retrieve_result): + """Cancels the retrieve operation if necessary. + + :param vim: Vim object + :param retrieve_result: Result from the RetrievePropertiesEx API + """ + + token = _get_token(retrieve_result) + if token: + collector = vim.service_content.propertyCollector + vim.CancelRetrievePropertiesEx(collector, token=token) + + +def continue_retrieval(vim, retrieve_result): + """Continue retrieving results, if present. + + :param vim: Vim object + :param retrieve_result: Result from the RetrievePropertiesEx API + """ + + token = _get_token(retrieve_result) + if token: + collector = vim.service_content.propertyCollector + return vim.ContinueRetrievePropertiesEx(collector, token=token) + + +def get_object_property(vim, mobj, property_name): + """Gets property of the managed object specified. + + :param vim: Vim object + :param mobj: Reference to the managed object + :param property_name: Name of the property to be retrieved + :return: Property of the managed object specified + """ + props = get_object_properties(vim, mobj, [property_name]) + prop_val = None + if props: + prop = None + if hasattr(props[0], 'propSet'): + # propSet will be set only if the server provides value + # for the field + prop = props[0].propSet + if prop: + prop_val = prop[0].val + return prop_val diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py new file mode 100644 index 0000000000..fc6e3e4da0 --- /dev/null +++ b/cinder/volume/drivers/vmware/vmdk.py @@ -0,0 +1,1034 @@ +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Volume driver for VMware vCenter/ESX managed datastores. + +The volumes created by this driver are backed by VMDK (Virtual Machine +Disk) files stored in datastores. For ease of managing the VMDKs, the +driver creates a virtual machine for each of the volumes. This virtual +machine is never powered on and is often referred as the shadow VM. +""" + +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder import units +from cinder.volume import driver +from cinder.volume.drivers.vmware import api +from cinder.volume.drivers.vmware import error_util +from cinder.volume.drivers.vmware import vim +from cinder.volume.drivers.vmware import vmware_images +from cinder.volume.drivers.vmware import volumeops +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +THIN_VMDK_TYPE = 'thin' +THICK_VMDK_TYPE = 'thick' +EAGER_ZEROED_THICK_VMDK_TYPE = 'eagerZeroedThick' + +vmdk_opts = [ + cfg.StrOpt('vmware_host_ip', + default=None, + help='IP address for connecting to VMware ESX/VC server.'), + cfg.StrOpt('vmware_host_username', + default=None, + help='Username for authenticating with VMware ESX/VC server.'), + cfg.StrOpt('vmware_host_password', + default=None, + help='Password for authenticating with VMware ESX/VC server.', + secret=True), + cfg.StrOpt('vmware_wsdl_location', + default=None, + help='Optional VIM service WSDL Location ' + 'e.g http:///vimService.wsdl. Optional over-ride ' + 'to default location for bug work-arounds.'), + cfg.IntOpt('vmware_api_retry_count', + default=10, + help='Number of times VMware ESX/VC server API must be ' + 'retried upon connection related issues.'), + cfg.IntOpt('vmware_task_poll_interval', + default=5, + help='The interval used for polling remote tasks invoked on ' + 'VMware ESX/VC server.'), + cfg.StrOpt('vmware_volume_folder', + default='cinder-volumes', + help='Name for the folder in the VC datacenter that will ' + 'contain cinder volumes.'), + cfg.IntOpt('vmware_image_transfer_timeout_secs', + default=7200, + help='Timeout in seconds for VMDK volume transfer between ' + 'Cinder and Glance.'), + cfg.IntOpt('vmware_max_objects_retrieval', + default=100, + help='Max number of objects to be retrieved per batch. ' + 'Query results will be obtained in batches from the ' + 'server and not in one shot. Server may still limit the ' + 'count to something less than the configured value.'), +] + +CONF = cfg.CONF +CONF.register_opts(vmdk_opts) + + +def _get_volume_type_extra_spec(type_id, spec_key, possible_values, + default_value): + """Get extra spec value. + + If the spec value is not present in the input possible_values, then + default_value will be returned. + If the type_id is None, then default_value is returned. + + The caller must not consider scope and the implementation adds/removes + scope. The scope used here is 'vmware' e.g. key 'vmware:vmdk_type' and + so the caller must pass vmdk_type as an input ignoring the scope. + + :param type_id: Volume type ID + :param spec_key: Extra spec key + :param possible_values: Permitted values for the extra spec + :param default_value: Default value for the extra spec incase of an + invalid value or if the entry does not exist + :return: extra spec value + """ + if type_id: + spec_key = ('vmware:%s') % spec_key + spec_value = volume_types.get_volume_type_extra_specs(type_id, + spec_key) + if spec_value in possible_values: + LOG.debug(_("Returning spec value %s") % spec_value) + return spec_value + + LOG.debug(_("Invalid spec value: %s specified.") % spec_value) + + # Default we return thin disk type + LOG.debug(_("Returning default spec value: %s.") % default_value) + return default_value + + +class VMwareEsxVmdkDriver(driver.VolumeDriver): + """Manage volumes on VMware ESX server.""" + + VERSION = '1.1.0' + + def __init__(self, *args, **kwargs): + super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(vmdk_opts) + self._session = None + self._stats = None + self._volumeops = None + + @property + def session(self): + if not self._session: + ip = self.configuration.vmware_host_ip + username = self.configuration.vmware_host_username + password = self.configuration.vmware_host_password + api_retry_count = self.configuration.vmware_api_retry_count + task_poll_interval = self.configuration.vmware_task_poll_interval + wsdl_loc = self.configuration.safe_get('vmware_wsdl_location') + self._session = api.VMwareAPISession(ip, username, + password, api_retry_count, + task_poll_interval, + wsdl_loc=wsdl_loc) + return self._session + + @property + def volumeops(self): + if not self._volumeops: + max_objects = self.configuration.vmware_max_objects_retrieval + self._volumeops = volumeops.VMwareVolumeOps(self.session, + max_objects) + return self._volumeops + + def do_setup(self, context): + """Perform validations and establish connection to server. + + :param context: Context information + """ + + # Throw error if required parameters are not set. + required_params = ['vmware_host_ip', + 'vmware_host_username', + 'vmware_host_password'] + for param in required_params: + if not getattr(self.configuration, param, None): + raise exception.InvalidInput(_("%s not set.") % param) + + # Create the session object for the first time + max_objects = self.configuration.vmware_max_objects_retrieval + self._volumeops = volumeops.VMwareVolumeOps(self.session, max_objects) + LOG.info(_("Successfully setup driver: %(driver)s for " + "server: %(ip)s.") % + {'driver': self.__class__.__name__, + 'ip': self.configuration.vmware_host_ip}) + + def check_for_setup_error(self): + pass + + def get_volume_stats(self, refresh=False): + """Obtain status of the volume service. + + :param refresh: Whether to get refreshed information + """ + + if not self._stats: + backend_name = self.configuration.safe_get('volume_backend_name') + if not backend_name: + backend_name = self.__class__.__name__ + data = {'volume_backend_name': backend_name, + 'vendor_name': 'VMware', + 'driver_version': self.VERSION, + 'storage_protocol': 'LSI Logic SCSI', + 'reserved_percentage': 0, + 'total_capacity_gb': 'unknown', + 'free_capacity_gb': 'unknown'} + self._stats = data + return self._stats + + def create_volume(self, volume): + """Creates a volume. + + We do not create any backing. We do it only for the first time + it is being attached to a virtual machine. + + :param volume: Volume object + """ + pass + + def _delete_volume(self, volume): + """Delete the volume backing if it is present. + + :param volume: Volume object + """ + backing = self.volumeops.get_backing(volume['name']) + if not backing: + LOG.info(_("Backing not available, no operation to be performed.")) + return + self.volumeops.delete_backing(backing) + + def delete_volume(self, volume): + """Deletes volume backing. + + :param volume: Volume object + """ + self._delete_volume(volume) + + def _get_volume_group_folder(self, datacenter): + """Return vmFolder of datacenter as we cannot create folder in ESX. + + :param datacenter: Reference to the datacenter + :return: vmFolder reference of the datacenter + """ + return self.volumeops.get_vmfolder(datacenter) + + def _compute_space_utilization(self, datastore_summary): + """Compute the space utilization of the given datastore. + + :param datastore_summary: Summary of the datastore for which + space utilization is to be computed + :return: space utilization in the range [0..1] + """ + return ( + 1.0 - + datastore_summary.freeSpace / float(datastore_summary.capacity) + ) + + def _select_datastore_summary(self, size_bytes, datastores): + """Get the best datastore summary from the given datastore list. + + The implementation selects a datastore which is connected to maximum + number of hosts, provided there is enough space to accommodate the + volume. Ties are broken based on space utilization; datastore with + low space utilization is preferred. + + :param size_bytes: Size in bytes of the volume + :param datastores: Datastores from which a choice is to be made + for the volume + :return: Summary of the best datastore selected for volume + """ + best_summary = None + max_host_count = 0 + best_space_utilization = 1.0 + + for datastore in datastores: + summary = self.volumeops.get_summary(datastore) + if summary.freeSpace > size_bytes: + host_count = len(self.volumeops.get_connected_hosts(datastore)) + if host_count > max_host_count: + max_host_count = host_count + best_space_utilization = self._compute_space_utilization( + summary + ) + best_summary = summary + elif host_count == max_host_count: + # break the tie based on space utilization + space_utilization = self._compute_space_utilization( + summary + ) + if space_utilization < best_space_utilization: + best_space_utilization = space_utilization + best_summary = summary + + if not best_summary: + msg = _("Unable to pick datastore to accommodate %(size)s bytes " + "from the datastores: %(dss)s.") % {'size': size_bytes, + 'dss': datastores} + LOG.error(msg) + raise error_util.VimException(msg) + + LOG.debug(_("Selected datastore: %(datastore)s with %(host_count)d " + "connected host(s) for the volume.") % + {'datastore': best_summary, 'host_count': max_host_count}) + return best_summary + + def _get_folder_ds_summary(self, size_gb, resource_pool, datastores): + """Get folder and best datastore summary where volume can be placed. + + :param size_gb: Size of the volume in GB + :param resource_pool: Resource pool reference + :param datastores: Datastores from which a choice is to be made + for the volume + :return: Folder and best datastore summary where volume can be + placed on + """ + datacenter = self.volumeops.get_dc(resource_pool) + folder = self._get_volume_group_folder(datacenter) + size_bytes = size_gb * units.GiB + datastore_summary = self._select_datastore_summary(size_bytes, + datastores) + return (folder, datastore_summary) + + @staticmethod + def _get_disk_type(volume): + """Get disk type from volume type. + + :param volume: Volume object + :return: Disk type + """ + return _get_volume_type_extra_spec(volume['volume_type_id'], + 'vmdk_type', + (THIN_VMDK_TYPE, THICK_VMDK_TYPE, + EAGER_ZEROED_THICK_VMDK_TYPE), + THIN_VMDK_TYPE) + + def _create_backing(self, volume, host): + """Create volume backing under the given host. + + :param volume: Volume object + :param host: Reference of the host + :return: Reference to the created backing + """ + # Get datastores and resource pool of the host + (datastores, resource_pool) = self.volumeops.get_dss_rp(host) + # Pick a folder and datastore to create the volume backing on + (folder, summary) = self._get_folder_ds_summary(volume['size'], + resource_pool, + datastores) + disk_type = VMwareEsxVmdkDriver._get_disk_type(volume) + size_kb = volume['size'] * units.MiB + return self.volumeops.create_backing(volume['name'], + size_kb, + disk_type, folder, + resource_pool, + host, + summary.name) + + def _relocate_backing(self, size_gb, backing, host): + pass + + def _select_ds_for_volume(self, size_gb): + """Select datastore that can accommodate a volume of given size. + + Returns the selected datastore summary along with a compute host and + its resource pool and folder where the volume can be created + :return: (host, rp, folder, summary) + """ + retrv_result = self.volumeops.get_hosts() + while retrv_result: + hosts = retrv_result.objects + if not hosts: + break + (selected_host, rp, folder, summary) = (None, None, None, None) + for host in hosts: + host = host.obj + try: + (dss, rp) = self.volumeops.get_dss_rp(host) + (folder, summary) = self._get_folder_ds_summary(size_gb, + rp, dss) + selected_host = host + break + except error_util.VimException as excep: + LOG.warn(_("Unable to find suitable datastore for volume " + "of size: %(vol)s GB under host: %(host)s. " + "More details: %(excep)s") % + {'vol': size_gb, + 'host': host.obj, 'excep': excep}) + if selected_host: + self.volumeops.cancel_retrieval(retrv_result) + return (selected_host, rp, folder, summary) + retrv_result = self.volumeops.continue_retrieval(retrv_result) + + msg = _("Unable to find host to accommodate a disk of size: %s " + "in the inventory.") % size_gb + LOG.error(msg) + raise error_util.VimException(msg) + + def _create_backing_in_inventory(self, volume): + """Creates backing under any suitable host. + + The method tries to pick datastore that can fit the volume under + any host in the inventory. + + :param volume: Volume object + :return: Reference to the created backing + """ + + retrv_result = self.volumeops.get_hosts() + while retrv_result: + hosts = retrv_result.objects + if not hosts: + break + backing = None + for host in hosts: + try: + backing = self._create_backing(volume, host.obj) + if backing: + break + except error_util.VimException as excep: + LOG.warn(_("Unable to find suitable datastore for " + "volume: %(vol)s under host: %(host)s. " + "More details: %(excep)s") % + {'vol': volume['name'], + 'host': host.obj, 'excep': excep}) + if backing: + self.volumeops.cancel_retrieval(retrv_result) + return backing + retrv_result = self.volumeops.continue_retrieval(retrv_result) + + msg = _("Unable to create volume: %s in the inventory.") + LOG.error(msg % volume['name']) + raise error_util.VimException(msg % volume['name']) + + def _initialize_connection(self, volume, connector): + """Get information of volume's backing. + + If the volume does not have a backing yet. It will be created. + + :param volume: Volume object + :param connector: Connector information + :return: Return connection information + """ + connection_info = {'driver_volume_type': 'vmdk'} + + backing = self.volumeops.get_backing(volume['name']) + if 'instance' in connector: + # The instance exists + instance = vim.get_moref(connector['instance'], 'VirtualMachine') + LOG.debug(_("The instance: %s for which initialize connection " + "is called, exists.") % instance) + # Get host managing the instance + host = self.volumeops.get_host(instance) + if not backing: + # Create a backing in case it does not exist under the + # host managing the instance. + LOG.info(_("There is no backing for the volume: %s. " + "Need to create one.") % volume['name']) + backing = self._create_backing(volume, host) + else: + # Relocate volume is necessary + self._relocate_backing(volume['size'], backing, host) + else: + # The instance does not exist + LOG.debug(_("The instance for which initialize connection " + "is called, does not exist.")) + if not backing: + # Create a backing in case it does not exist. It is a bad use + # case to boot from an empty volume. + LOG.warn(_("Trying to boot from an empty volume: %s.") % + volume['name']) + # Create backing + backing = self._create_backing_in_inventory(volume) + + # Set volume's moref value and name + connection_info['data'] = {'volume': backing.value, + 'volume_id': volume['id']} + + LOG.info(_("Returning connection_info: %(info)s for volume: " + "%(volume)s with connector: %(connector)s.") % + {'info': connection_info, + 'volume': volume['name'], + 'connector': connector}) + + return connection_info + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info. + + The implementation returns the following information: + {'driver_volume_type': 'vmdk' + 'data': {'volume': $VOLUME_MOREF_VALUE + 'volume_id': $VOLUME_ID + } + } + + :param volume: Volume object + :param connector: Connector information + :return: Return connection information + """ + return self._initialize_connection(volume, connector) + + def terminate_connection(self, volume, connector, force=False, **kwargs): + pass + + def create_export(self, context, volume): + pass + + def ensure_export(self, context, volume): + pass + + def remove_export(self, context, volume): + pass + + def _create_snapshot(self, snapshot): + """Creates a snapshot. + + If the volume does not have a backing then simply pass, else create + a snapshot. + Snapshot of only available volume is supported. + + :param snapshot: Snapshot object + """ + + volume = snapshot['volume'] + if volume['status'] != 'available': + msg = _("Snapshot of volume not supported in state: %s.") + LOG.error(msg % volume['status']) + raise exception.InvalidVolume(msg % volume['status']) + backing = self.volumeops.get_backing(snapshot['volume_name']) + if not backing: + LOG.info(_("There is no backing, so will not create " + "snapshot: %s.") % snapshot['name']) + return + self.volumeops.create_snapshot(backing, snapshot['name'], + snapshot['display_description']) + LOG.info(_("Successfully created snapshot: %s.") % snapshot['name']) + + def create_snapshot(self, snapshot): + """Creates a snapshot. + + :param snapshot: Snapshot object + """ + self._create_snapshot(snapshot) + + def _delete_snapshot(self, snapshot): + """Delete snapshot. + + If the volume does not have a backing or the snapshot does not exist + then simply pass, else delete the snapshot. + Snapshot deletion of only available volume is supported. + + :param snapshot: Snapshot object + """ + + volume = snapshot['volume'] + if volume['status'] != 'available': + msg = _("Delete snapshot of volume not supported in state: %s.") + LOG.error(msg % volume['status']) + raise exception.InvalidVolume(msg % volume['status']) + backing = self.volumeops.get_backing(snapshot['volume_name']) + if not backing: + LOG.info(_("There is no backing, and so there is no " + "snapshot: %s.") % snapshot['name']) + else: + self.volumeops.delete_snapshot(backing, snapshot['name']) + LOG.info(_("Successfully deleted snapshot: %s.") % + snapshot['name']) + + def delete_snapshot(self, snapshot): + """Delete snapshot. + + :param snapshot: Snapshot object + """ + self._delete_snapshot(snapshot) + + def _clone_backing_by_copying(self, volume, src_vmdk_path): + """Clones volume backing. + + Creates a backing for the input volume and replaces its VMDK file + with the input VMDK file copy. + + :param volume: New Volume object + :param src_vmdk_path: VMDK file path of the source volume backing + """ + + # Create a backing + backing = self._create_backing_in_inventory(volume) + new_vmdk_path = self.volumeops.get_vmdk_path(backing) + datacenter = self.volumeops.get_dc(backing) + # Deleting the current VMDK file + self.volumeops.delete_vmdk_file(new_vmdk_path, datacenter) + # Copying the source VMDK file + self.volumeops.copy_vmdk_file(datacenter, src_vmdk_path, new_vmdk_path) + LOG.info(_("Successfully cloned new backing: %(back)s from " + "source VMDK file: %(vmdk)s.") % + {'back': backing, 'vmdk': src_vmdk_path}) + + def _create_cloned_volume(self, volume, src_vref): + """Creates volume clone. + + If source volume's backing does not exist, then pass. + Creates a backing and replaces its VMDK file with a copy of the + source backing's VMDK file. + + :param volume: New Volume object + :param src_vref: Volume object that must be cloned + """ + + backing = self.volumeops.get_backing(src_vref['name']) + if not backing: + LOG.info(_("There is no backing for the source volume: " + "%(svol)s. Not creating any backing for the " + "volume: %(vol)s.") % + {'svol': src_vref['name'], + 'vol': volume['name']}) + return + src_vmdk_path = self.volumeops.get_vmdk_path(backing) + self._clone_backing_by_copying(volume, src_vmdk_path) + + def create_cloned_volume(self, volume, src_vref): + """Creates volume clone. + + :param volume: New Volume object + :param src_vref: Volume object that must be cloned + """ + self._create_cloned_volume(volume, src_vref) + + def _create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot. + + If the snapshot does not exist or source volume's backing does not + exist, then pass. + Else creates clone of source volume backing by copying its VMDK file. + + :param volume: Volume object + :param snapshot: Snapshot object + """ + + backing = self.volumeops.get_backing(snapshot['volume_name']) + if not backing: + LOG.info(_("There is no backing for the source snapshot: " + "%(snap)s. Not creating any backing for the " + "volume: %(vol)s.") % + {'snap': snapshot['name'], + 'vol': volume['name']}) + return + snapshot_moref = self.volumeops.get_snapshot(backing, + snapshot['name']) + if not snapshot_moref: + LOG.info(_("There is no snapshot point for the snapshoted volume: " + "%(snap)s. Not creating any backing for the " + "volume: %(vol)s.") % + {'snap': snapshot['name'], 'vol': volume['name']}) + return + src_vmdk_path = self.volumeops.get_vmdk_path(snapshot_moref) + self._clone_backing_by_copying(volume, src_vmdk_path) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot. + + :param volume: Volume object + :param snapshot: Snapshot object + """ + self._create_volume_from_snapshot(volume, snapshot) + + def _get_ds_name_flat_vmdk_path(self, backing, vol_name): + """Get datastore name and folder path of the flat VMDK of the backing. + + :param backing: Reference to the backing entity + :param vol_name: Name of the volume + :return: datastore name and folder path of the VMDK of the backing + """ + file_path_name = self.volumeops.get_path_name(backing) + (datastore_name, + folder_path, _) = volumeops.split_datastore_path(file_path_name) + flat_vmdk_path = '%s%s-flat.vmdk' % (folder_path, vol_name) + return (datastore_name, flat_vmdk_path) + + @staticmethod + def _validate_disk_format(disk_format): + """Verify vmdk as disk format. + + :param disk_format: Disk format of the image + """ + if disk_format and disk_format.lower() != 'vmdk': + msg = _("Cannot create image of disk format: %s. Only vmdk " + "disk format is accepted.") % disk_format + LOG.error(msg) + raise exception.ImageUnacceptable(msg) + + def _fetch_flat_image(self, context, volume, image_service, image_id, + image_size): + """Creates a volume from flat glance image. + + Creates a backing for the volume under the ESX/VC server and + copies the VMDK flat file from the glance image content. + The method assumes glance image is VMDK disk format and its + vmware_disktype is "sparse" or "preallocated", but not + "streamOptimized" + """ + # Set volume size in GB from image metadata + volume['size'] = float(image_size) / units.GiB + # First create empty backing in the inventory + backing = self._create_backing_in_inventory(volume) + + try: + (datastore_name, + flat_vmdk_path) = self._get_ds_name_flat_vmdk_path(backing, + volume['name']) + host = self.volumeops.get_host(backing) + datacenter = self.volumeops.get_dc(host) + datacenter_name = self.volumeops.get_entity_name(datacenter) + flat_vmdk_ds_path = '[%s] %s' % (datastore_name, flat_vmdk_path) + # Delete the *-flat.vmdk file within the backing + self.volumeops.delete_file(flat_vmdk_ds_path, datacenter) + + # copy over image from glance into *-flat.vmdk + timeout = self.configuration.vmware_image_transfer_timeout_secs + host_ip = self.configuration.vmware_host_ip + cookies = self.session.vim.client.options.transport.cookiejar + LOG.debug(_("Fetching glance image: %(id)s to server: %(host)s.") % + {'id': image_id, 'host': host_ip}) + vmware_images.fetch_flat_image(context, timeout, image_service, + image_id, image_size=image_size, + host=host_ip, + data_center_name=datacenter_name, + datastore_name=datastore_name, + cookies=cookies, + file_path=flat_vmdk_path) + LOG.info(_("Done copying image: %(id)s to volume: %(vol)s.") % + {'id': image_id, 'vol': volume['name']}) + except Exception as excep: + LOG.exception(_("Exception in copy_image_to_volume: %(excep)s. " + "Deleting the backing: %(back)s.") % + {'excep': excep, 'back': backing}) + # delete the backing + self.volumeops.delete_backing(backing) + raise excep + + def _fetch_stream_optimized_image(self, context, volume, image_service, + image_id, image_size): + """Creates volume from image using HttpNfc VM import. + + Uses Nfc API to download the VMDK file from Glance. Nfc creates the + backing VM that wraps the VMDK in the ESX/VC inventory. + This method assumes glance image is VMDK disk format and its + vmware_disktype is 'streamOptimized'. + """ + try: + # find host in which to create the volume + size_gb = volume['size'] + (host, rp, folder, summary) = self._select_ds_for_volume(size_gb) + except error_util.VimException as excep: + LOG.exception(_("Exception in _select_ds_for_volume: %s.") % excep) + raise excep + + LOG.debug(_("Selected datastore %(ds)s for new volume of size " + "%(size)s GB.") % {'ds': summary.name, 'size': size_gb}) + + # prepare create spec for backing vm + disk_type = VMwareEsxVmdkDriver._get_disk_type(volume) + + # The size of stream optimized glance image is often suspect, + # so better let VC figure out the disk capacity during import. + dummy_disk_size = 0 + vm_create_spec = self.volumeops._get_create_spec(volume['name'], + dummy_disk_size, + disk_type, + summary.name) + # convert vm_create_spec to vm_import_spec + cf = self.session.vim.client.factory + vm_import_spec = cf.create('ns0:VirtualMachineImportSpec') + vm_import_spec.configSpec = vm_create_spec + + try: + # fetching image from glance will also create the backing + timeout = self.configuration.vmware_image_transfer_timeout_secs + host_ip = self.configuration.vmware_host_ip + LOG.debug(_("Fetching glance image: %(id)s to server: %(host)s.") % + {'id': image_id, 'host': host_ip}) + vmware_images.fetch_stream_optimized_image(context, timeout, + image_service, + image_id, + session=self.session, + host=host_ip, + resource_pool=rp, + vm_folder=folder, + vm_create_spec= + vm_import_spec, + image_size=image_size) + except exception.CinderException as excep: + LOG.exception(_("Exception in copy_image_to_volume: %s.") % excep) + backing = self.volumeops.get_backing(volume['name']) + if backing: + LOG.exception(_("Deleting the backing: %s") % backing) + # delete the backing + self.volumeops.delete_backing(backing) + raise excep + + LOG.info(_("Done copying image: %(id)s to volume: %(vol)s.") % + {'id': image_id, 'vol': volume['name']}) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Creates volume from image. + + This method only supports Glance image of VMDK disk format. + Uses flat vmdk file copy for "sparse" and "preallocated" disk types + Uses HttpNfc import API for "streamOptimized" disk types. This API + creates a backing VM that wraps the VMDK in the ESX/VC inventory. + + :param context: context + :param volume: Volume object + :param image_service: Glance image service + :param image_id: Glance image id + """ + LOG.debug(_("Copy glance image: %s to create new volume.") % image_id) + + # Verify glance image is vmdk disk format + metadata = image_service.show(context, image_id) + VMwareEsxVmdkDriver._validate_disk_format(metadata['disk_format']) + + # Get disk_type for vmdk disk + disk_type = None + properties = metadata['properties'] + if properties and 'vmware_disktype' in properties: + disk_type = properties['vmware_disktype'] + + if disk_type == 'streamOptimized': + self._fetch_stream_optimized_image(context, volume, image_service, + image_id, metadata['size']) + else: + self._fetch_flat_image(context, volume, image_service, image_id, + metadata['size']) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Creates glance image from volume. + + Upload of only available volume is supported. The uploaded glance image + has a vmdk disk type of "streamOptimized" that can only be downloaded + using the HttpNfc API. + Steps followed are: + 1. Get the name of the vmdk file which the volume points to right now. + Can be a chain of snapshots, so we need to know the last in the + chain. + 2. Use Nfc APIs to upload the contents of the vmdk file to glance. + """ + + # if volume is attached raise exception + if volume['instance_uuid'] or volume['attached_host']: + msg = _("Upload to glance of attached volume is not supported.") + LOG.error(msg) + raise exception.InvalidVolume(msg) + + # validate disk format is vmdk + LOG.debug(_("Copy Volume: %s to new image.") % volume['name']) + VMwareEsxVmdkDriver._validate_disk_format(image_meta['disk_format']) + + # get backing vm of volume and its vmdk path + backing = self.volumeops.get_backing(volume['name']) + if not backing: + LOG.info(_("Backing not found, creating for volume: %s") % + volume['name']) + backing = self._create_backing_in_inventory(volume) + vmdk_file_path = self.volumeops.get_vmdk_path(backing) + + # Upload image from vmdk + timeout = self.configuration.vmware_image_transfer_timeout_secs + host_ip = self.configuration.vmware_host_ip + + vmware_images.upload_image(context, timeout, image_service, + image_meta['id'], + volume['project_id'], + session=self.session, + host=host_ip, + vm=backing, + vmdk_file_path=vmdk_file_path, + vmdk_size=volume['size'] * units.GiB, + image_name=image_meta['name'], + image_version=1) + LOG.info(_("Done copying volume %(vol)s to a new image %(img)s") % + {'vol': volume['name'], 'img': image_meta['name']}) + + +class VMwareVcVmdkDriver(VMwareEsxVmdkDriver): + """Manage volumes on VMware VC server.""" + + def _get_volume_group_folder(self, datacenter): + """Get volume group folder. + + Creates a folder under the vmFolder of the input datacenter with the + volume group name if it does not exists. + + :param datacenter: Reference to the datacenter + :return: Reference to the volume folder + """ + vm_folder = super(VMwareVcVmdkDriver, + self)._get_volume_group_folder(datacenter) + volume_folder = self.configuration.vmware_volume_folder + return self.volumeops.create_folder(vm_folder, volume_folder) + + def _relocate_backing(self, size_gb, backing, host): + """Relocate volume backing under host and move to volume_group folder. + + If the volume backing is on a datastore that is visible to the host, + then need not do any operation. + + :param size_gb: Size of the volume in GB + :param backing: Reference to the backing + :param host: Reference to the host + """ + # Check if volume's datastore is visible to host managing + # the instance + (datastores, resource_pool) = self.volumeops.get_dss_rp(host) + datastore = self.volumeops.get_datastore(backing) + + visible_to_host = False + for _datastore in datastores: + if _datastore.value == datastore.value: + visible_to_host = True + break + if visible_to_host: + return + + # The volume's backing is on a datastore that is not visible to the + # host managing the instance. We relocate the volume's backing. + + # Pick a folder and datastore to relocate volume backing to + (folder, summary) = self._get_folder_ds_summary(size_gb, resource_pool, + datastores) + LOG.info(_("Relocating volume: %(backing)s to %(ds)s and %(rp)s.") % + {'backing': backing, 'ds': summary, 'rp': resource_pool}) + # Relocate the backing to the datastore and folder + self.volumeops.relocate_backing(backing, summary.datastore, + resource_pool, host) + self.volumeops.move_backing_to_folder(backing, folder) + + @staticmethod + def _get_clone_type(volume): + """Get clone type from volume type. + + :param volume: Volume object + :return: Clone type from the extra spec if present, else return + default 'full' clone type + """ + return _get_volume_type_extra_spec(volume['volume_type_id'], + 'clone_type', + (volumeops.FULL_CLONE_TYPE, + volumeops.LINKED_CLONE_TYPE), + volumeops.FULL_CLONE_TYPE) + + def _clone_backing(self, volume, backing, snapshot, clone_type): + """Clone the backing. + + :param volume: New Volume object + :param backing: Reference to the backing entity + :param snapshot: Reference to snapshot entity + :param clone_type: type of the clone + """ + datastore = None + if not clone_type == volumeops.LINKED_CLONE_TYPE: + # Pick a datastore where to create the full clone under same host + host = self.volumeops.get_host(backing) + (datastores, resource_pool) = self.volumeops.get_dss_rp(host) + size_bytes = volume['size'] * units.GiB + datastore = self._select_datastore_summary(size_bytes, + datastores).datastore + clone = self.volumeops.clone_backing(volume['name'], backing, + snapshot, clone_type, datastore) + LOG.info(_("Successfully created clone: %s.") % clone) + + def _create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot. + + If the snapshot does not exist or source volume's backing does not + exist, then pass. + + :param volume: New Volume object + :param snapshot: Reference to snapshot entity + """ + backing = self.volumeops.get_backing(snapshot['volume_name']) + if not backing: + LOG.info(_("There is no backing for the snapshoted volume: " + "%(snap)s. Not creating any backing for the " + "volume: %(vol)s.") % + {'snap': snapshot['name'], 'vol': volume['name']}) + return + snapshot_moref = self.volumeops.get_snapshot(backing, + snapshot['name']) + if not snapshot_moref: + LOG.info(_("There is no snapshot point for the snapshoted volume: " + "%(snap)s. Not creating any backing for the " + "volume: %(vol)s.") % + {'snap': snapshot['name'], 'vol': volume['name']}) + return + clone_type = VMwareVcVmdkDriver._get_clone_type(volume) + self._clone_backing(volume, backing, snapshot_moref, clone_type) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot. + + :param volume: New Volume object + :param snapshot: Reference to snapshot entity + """ + self._create_volume_from_snapshot(volume, snapshot) + + def _create_cloned_volume(self, volume, src_vref): + """Creates volume clone. + + If source volume's backing does not exist, then pass. + Linked clone of attached volume is not supported. + + :param volume: New Volume object + :param src_vref: Source Volume object + """ + + backing = self.volumeops.get_backing(src_vref['name']) + if not backing: + LOG.info(_("There is no backing for the source volume: %(src)s. " + "Not creating any backing for volume: %(vol)s.") % + {'src': src_vref['name'], 'vol': volume['name']}) + return + clone_type = VMwareVcVmdkDriver._get_clone_type(volume) + snapshot = None + if clone_type == volumeops.LINKED_CLONE_TYPE: + if src_vref['status'] != 'available': + msg = _("Linked clone of source volume not supported " + "in state: %s.") + LOG.error(msg % src_vref['status']) + raise exception.InvalidVolume(msg % src_vref['status']) + # For performing a linked clone, we snapshot the volume and + # then create the linked clone out of this snapshot point. + name = 'snapshot-%s' % volume['id'] + snapshot = self.volumeops.create_snapshot(backing, name, None) + self._clone_backing(volume, backing, snapshot, clone_type) + + def create_cloned_volume(self, volume, src_vref): + """Creates volume clone. + + :param volume: New Volume object + :param src_vref: Source Volume object + """ + self._create_cloned_volume(volume, src_vref) diff --git a/cinder/volume/drivers/vmware/vmware_images.py b/cinder/volume/drivers/vmware/vmware_images.py new file mode 100644 index 0000000000..c56f158fb5 --- /dev/null +++ b/cinder/volume/drivers/vmware/vmware_images.py @@ -0,0 +1,158 @@ +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Utility functions for Image transfer. +""" + +from eventlet import timeout + +from cinder import exception +from cinder.openstack.common import log as logging +from cinder.volume.drivers.vmware import io_util +from cinder.volume.drivers.vmware import read_write_util as rw_util + +LOG = logging.getLogger(__name__) + +QUEUE_BUFFER_SIZE = 10 + + +def start_transfer(context, timeout_secs, read_file_handle, max_data_size, + write_file_handle=None, image_service=None, image_id=None, + image_meta=None): + """Start the data transfer from the reader to the writer. + + Reader writes to the pipe and the writer reads from the pipe. This means + that the total transfer time boils down to the slower of the read/write + and not the addition of the two times. + """ + + if not image_meta: + image_meta = {} + + # The pipe that acts as an intermediate store of data for reader to write + # to and writer to grab from. + thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, max_data_size) + # The read thread. In case of glance it is the instance of the + # GlanceFileRead class. The glance client read returns an iterator + # and this class wraps that iterator to provide datachunks in calls + # to read. + read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe) + + # In case of Glance - VMware transfer, we just need a handle to the + # HTTP Connection that is to send transfer data to the VMware datastore. + if write_file_handle: + write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle) + # In case of VMware - Glance transfer, we relinquish VMware HTTP file read + # handle to Glance Client instance, but to be sure of the transfer we need + # to be sure of the status of the image on glance changing to active. + # The GlanceWriteThread handles the same for us. + elif image_service and image_id: + write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe, + image_service, image_id, + image_meta) + # Start the read and write threads. + read_event = read_thread.start() + write_event = write_thread.start() + timer = timeout.Timeout(timeout_secs) + try: + # Wait on the read and write events to signal their end + read_event.wait() + write_event.wait() + except (timeout.Timeout, Exception) as exc: + # In case of any of the reads or writes raising an exception, + # stop the threads so that we un-necessarily don't keep the other one + # waiting. + read_thread.stop() + write_thread.stop() + + # Log and raise the exception. + LOG.exception(exc) + raise exception.CinderException(exc) + finally: + timer.cancel() + # No matter what, try closing the read and write handles, if it so + # applies. + read_file_handle.close() + if write_file_handle: + write_file_handle.close() + + +def fetch_flat_image(context, timeout_secs, image_service, image_id, **kwargs): + """Download flat image from the glance image server.""" + LOG.debug(_("Downloading image: %s from glance image server as a flat vmdk" + " file.") % image_id) + file_size = int(kwargs.get('image_size')) + read_iter = image_service.download(context, image_id) + read_handle = rw_util.GlanceFileRead(read_iter) + write_handle = rw_util.VMwareHTTPWriteFile(kwargs.get('host'), + kwargs.get('data_center_name'), + kwargs.get('datastore_name'), + kwargs.get('cookies'), + kwargs.get('file_path'), + file_size) + start_transfer(context, timeout_secs, read_handle, file_size, + write_file_handle=write_handle) + LOG.info(_("Downloaded image: %s from glance image server.") % image_id) + + +def fetch_stream_optimized_image(context, timeout_secs, image_service, + image_id, **kwargs): + """Download stream optimized image from glance image server.""" + LOG.debug(_("Downloading image: %s from glance image server using HttpNfc" + " import.") % image_id) + file_size = int(kwargs.get('image_size')) + read_iter = image_service.download(context, image_id) + read_handle = rw_util.GlanceFileRead(read_iter) + write_handle = rw_util.VMwareHTTPWriteVmdk(kwargs.get('session'), + kwargs.get('host'), + kwargs.get('resource_pool'), + kwargs.get('vm_folder'), + kwargs.get('vm_create_spec'), + file_size) + start_transfer(context, timeout_secs, read_handle, file_size, + write_file_handle=write_handle) + LOG.info(_("Downloaded image: %s from glance image server.") % image_id) + + +def upload_image(context, timeout_secs, image_service, image_id, owner_id, + **kwargs): + """Upload the vm's disk file to Glance image server.""" + LOG.debug(_("Uploading image: %s to the Glance image server using HttpNfc" + " export.") % image_id) + file_size = kwargs.get('vmdk_size') + read_handle = rw_util.VMwareHTTPReadVmdk(kwargs.get('session'), + kwargs.get('host'), + kwargs.get('vm'), + kwargs.get('vmdk_file_path'), + file_size) + + # The properties and other fields that we need to set for the image. + # Important to set the 'size' to 0 here. Otherwise the glance client + # uses the volume size which may not be image size after upload since + # it is converted to a stream-optimized sparse disk + image_metadata = {'disk_format': 'vmdk', + 'is_public': 'false', + 'name': kwargs.get('image_name'), + 'status': 'active', + 'container_format': 'bare', + 'size': 0, + 'properties': {'vmware_image_version': + kwargs.get('image_version'), + 'vmware_disktype': 'streamOptimized', + 'owner_id': owner_id}} + start_transfer(context, timeout_secs, read_handle, file_size, + image_service=image_service, image_id=image_id, + image_meta=image_metadata) + LOG.info(_("Uploaded image: %s to the Glance image server.") % image_id) diff --git a/cinder/volume/drivers/vmware/volumeops.py b/cinder/volume/drivers/vmware/volumeops.py new file mode 100644 index 0000000000..e2acd8f590 --- /dev/null +++ b/cinder/volume/drivers/vmware/volumeops.py @@ -0,0 +1,731 @@ +# Copyright (c) 2013 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Implements operations on volumes residing on VMware datastores. +""" + +from cinder.openstack.common import log as logging +from cinder.volume.drivers.vmware import error_util +from cinder.volume.drivers.vmware import vim_util + +LOG = logging.getLogger(__name__) +LINKED_CLONE_TYPE = 'linked' +FULL_CLONE_TYPE = 'full' +ALREADY_EXISTS = 'AlreadyExists' +FILE_ALREADY_EXISTS = 'FileAlreadyExists' + + +def split_datastore_path(datastore_path): + """Split the datastore path to components. + + return the datastore name, relative folder path and the file name + + E.g. datastore_path = [datastore1] my_volume/my_volume.vmdk, returns + (datastore1, my_volume/, my_volume.vmdk) + + :param datastore_path: Datastore path of a file + :return: Parsed datastore name, relative folder path and file name + """ + splits = datastore_path.split('[', 1)[1].split(']', 1) + datastore_name = None + folder_path = None + file_name = None + if len(splits) == 1: + datastore_name = splits[0] + else: + datastore_name, path = splits + # Path will be of form my_volume/my_volume.vmdk + # we need into my_volumes/ and my_volume.vmdk + splits = path.split('/') + file_name = splits[len(splits) - 1] + folder_path = path[:-len(file_name)] + + return (datastore_name.strip(), folder_path.strip(), file_name.strip()) + + +class VMwareVolumeOps(object): + """Manages volume operations.""" + + def __init__(self, session, max_objects): + self._session = session + self._max_objects = max_objects + + def get_backing(self, name): + """Get the backing based on name. + + :param name: Name of the backing + :return: Managed object reference to the backing + """ + + retrieve_result = self._session.invoke_api(vim_util, 'get_objects', + self._session.vim, + 'VirtualMachine', + self._max_objects) + while retrieve_result: + vms = retrieve_result.objects + for vm in vms: + if vm.propSet[0].val == name: + # We got the result, so cancel further retrieval. + self.cancel_retrieval(retrieve_result) + return vm.obj + # Result not obtained, continue retrieving results. + retrieve_result = self.continue_retrieval(retrieve_result) + + LOG.debug(_("Did not find any backing with name: %s") % name) + + def delete_backing(self, backing): + """Delete the backing. + + :param backing: Managed object reference to the backing + """ + LOG.debug(_("Deleting the VM backing: %s.") % backing) + task = self._session.invoke_api(self._session.vim, 'Destroy_Task', + backing) + LOG.debug(_("Initiated deletion of VM backing: %s.") % backing) + self._session.wait_for_task(task) + LOG.info(_("Deleted the VM backing: %s.") % backing) + + # TODO(kartikaditya) Keep the methods not specific to volume in + # a different file + def get_host(self, instance): + """Get host under which instance is present. + + :param instance: Managed object reference of the instance VM + :return: Host managing the instance VM + """ + return self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, instance, + 'runtime.host') + + def get_hosts(self): + """Get all host from the inventory. + + :return: All the hosts from the inventory + """ + return self._session.invoke_api(vim_util, 'get_objects', + self._session.vim, + 'HostSystem', self._max_objects) + + def continue_retrieval(self, retrieve_result): + """Continue retrieval of results if necessary. + + :param retrieve_result: Result from RetrievePropertiesEx + """ + + return self._session.invoke_api(vim_util, 'continue_retrieval', + self._session.vim, retrieve_result) + + def cancel_retrieval(self, retrieve_result): + """Cancel retrieval of results if necessary. + + :param retrieve_result: Result from RetrievePropertiesEx + """ + + self._session.invoke_api(vim_util, 'cancel_retrieval', + self._session.vim, retrieve_result) + + def _is_usable(self, datastore, mount_info): + """Check if the given datastore is usable as per the given mount info. + + The datastore is considered to be usable for a host only if it is + writable, mounted and accessible. + + :param datastore: Reference to the datastore entity + :param mount_info: host mount information + :return: True if datastore is usable + """ + + writable = mount_info.accessMode == "readWrite" + + # If mounted attribute is not set, then default is True + mounted = True + if hasattr(mount_info, "mounted"): + mounted = mount_info.mounted + + if hasattr(mount_info, "accessible"): + accessible = mount_info.accessible + else: + # If accessible attribute is not set, we look at summary + summary = self.get_summary(datastore) + accessible = summary.accessible + + return writable and mounted and accessible + + def get_connected_hosts(self, datastore): + """Get all the hosts to which the datastore is connected and usable. + + The datastore is considered to be usable for a host only if it is + writable, mounted and accessible. + + :param datastore: Reference to the datastore entity + :return: List of managed object references of all connected + hosts + """ + + host_mounts = self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, datastore, + 'host') + connected_hosts = [] + for host_mount in host_mounts.DatastoreHostMount: + if self._is_usable(datastore, host_mount.mountInfo): + connected_hosts.append(host_mount.key.value) + + return connected_hosts + + def _is_valid(self, datastore, host): + """Check if host's datastore is accessible, mounted and writable. + + :param datastore: Reference to the datastore entity + :param host: Reference to the host entity + :return: True if datastore can be used for volume creation + """ + + host_mounts = self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, datastore, + 'host') + for host_mount in host_mounts.DatastoreHostMount: + if host_mount.key.value == host.value: + return self._is_usable(datastore, host_mount.mountInfo) + return False + + def get_dss_rp(self, host): + """Get accessible datastores and resource pool of the host. + + :param host: Managed object reference of the host + :return: Datastores accessible to the host and resource pool to which + the host belongs to + """ + + props = self._session.invoke_api(vim_util, 'get_object_properties', + self._session.vim, host, + ['datastore', 'parent']) + # Get datastores and compute resource or cluster compute resource + datastores = [] + compute_resource = None + for elem in props: + for prop in elem.propSet: + if prop.name == 'datastore' and prop.val: + # Consider only if datastores are present under host + datastores = prop.val.ManagedObjectReference + elif prop.name == 'parent': + compute_resource = prop.val + # Filter datastores based on if it is accessible, mounted and writable + valid_dss = [] + for datastore in datastores: + if self._is_valid(datastore, host): + valid_dss.append(datastore) + # Get resource pool from compute resource or cluster compute resource + resource_pool = self._session.invoke_api(vim_util, + 'get_object_property', + self._session.vim, + compute_resource, + 'resourcePool') + if not valid_dss: + msg = _("There are no valid datastores attached to %s.") % host + LOG.error(msg) + raise error_util.VimException(msg) + return (valid_dss, resource_pool) + + def _get_parent(self, child, parent_type): + """Get immediate parent of given type via 'parent' property. + + :param child: Child entity reference + :param parent_type: Entity type of the parent + :return: Immediate parent of specific type up the hierarchy via + 'parent' property + """ + + if not child: + return None + if child._type == parent_type: + return child + parent = self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, child, 'parent') + return self._get_parent(parent, parent_type) + + def get_dc(self, child): + """Get parent datacenter up the hierarchy via 'parent' property. + + :param child: Reference of the child entity + :return: Parent Datacenter of the param child entity + """ + return self._get_parent(child, 'Datacenter') + + def get_vmfolder(self, datacenter): + """Get the vmFolder. + + :param datacenter: Reference to the datacenter entity + :return: vmFolder property of the datacenter + """ + return self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, datacenter, + 'vmFolder') + + def create_folder(self, parent_folder, child_folder_name): + """Creates child folder with given name under the given parent folder. + + The method first checks if a child folder already exists, if it does, + then it returns a moref for the folder, else it creates one and then + return the moref. + + :param parent_folder: Reference to the folder entity + :param child_folder_name: Name of the child folder + :return: Reference to the child folder with input name if it already + exists, else create one and return the reference + """ + LOG.debug(_("Creating folder: %(child_folder_name)s under parent " + "folder: %(parent_folder)s.") % + {'child_folder_name': child_folder_name, + 'parent_folder': parent_folder}) + + # Get list of child entities for the parent folder + prop_val = self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, parent_folder, + 'childEntity') + child_entities = prop_val.ManagedObjectReference + + # Return if the child folder with input name is already present + for child_entity in child_entities: + if child_entity._type != 'Folder': + continue + child_entity_name = self.get_entity_name(child_entity) + if child_entity_name == child_folder_name: + LOG.debug(_("Child folder already present: %s.") % + child_entity) + return child_entity + + # Need to create the child folder + child_folder = self._session.invoke_api(self._session.vim, + 'CreateFolder', parent_folder, + name=child_folder_name) + LOG.debug(_("Created child folder: %s.") % child_folder) + return child_folder + + def _get_create_spec(self, name, size_kb, disk_type, ds_name): + """Return spec for creating volume backing. + + :param name: Name of the backing + :param size_kb: Size in KB of the backing + :param disk_type: VMDK type for the disk + :param ds_name: Datastore name where the disk is to be provisioned + :return: Spec for creation + """ + cf = self._session.vim.client.factory + controller_device = cf.create('ns0:VirtualLsiLogicController') + controller_device.key = -100 + controller_device.busNumber = 0 + controller_device.sharedBus = 'noSharing' + controller_spec = cf.create('ns0:VirtualDeviceConfigSpec') + controller_spec.operation = 'add' + controller_spec.device = controller_device + + disk_device = cf.create('ns0:VirtualDisk') + # for very small disks allocate at least 1KB + disk_device.capacityInKB = max(1, int(size_kb)) + disk_device.key = -101 + disk_device.unitNumber = 0 + disk_device.controllerKey = -100 + disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo') + if disk_type == 'eagerZeroedThick': + disk_device_bkng.eagerlyScrub = True + elif disk_type == 'thin': + disk_device_bkng.thinProvisioned = True + disk_device_bkng.fileName = '' + disk_device_bkng.diskMode = 'persistent' + disk_device.backing = disk_device_bkng + disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') + disk_spec.operation = 'add' + disk_spec.fileOperation = 'create' + disk_spec.device = disk_device + + vm_file_info = cf.create('ns0:VirtualMachineFileInfo') + vm_file_info.vmPathName = '[%s]' % ds_name + + create_spec = cf.create('ns0:VirtualMachineConfigSpec') + create_spec.name = name + create_spec.guestId = 'otherGuest' + create_spec.numCPUs = 1 + create_spec.memoryMB = 128 + create_spec.deviceChange = [controller_spec, disk_spec] + create_spec.files = vm_file_info + + LOG.debug(_("Spec for creating the backing: %s.") % create_spec) + return create_spec + + def create_backing(self, name, size_kb, disk_type, + folder, resource_pool, host, ds_name): + """Create backing for the volume. + + Creates a VM with one VMDK based on the given inputs. + + :param name: Name of the backing + :param size_kb: Size in KB of the backing + :param disk_type: VMDK type for the disk + :param folder: Folder, where to create the backing under + :param resource_pool: Resource pool reference + :param host: Host reference + :param ds_name: Datastore name where the disk is to be provisioned + :return: Reference to the created backing entity + """ + LOG.debug(_("Creating volume backing name: %(name)s " + "disk_type: %(disk_type)s size_kb: %(size_kb)s at " + "folder: %(folder)s resourse pool: %(resource_pool)s " + "datastore name: %(ds_name)s.") % + {'name': name, 'disk_type': disk_type, 'size_kb': size_kb, + 'folder': folder, 'resource_pool': resource_pool, + 'ds_name': ds_name}) + + create_spec = self._get_create_spec(name, size_kb, disk_type, ds_name) + task = self._session.invoke_api(self._session.vim, 'CreateVM_Task', + folder, config=create_spec, + pool=resource_pool, host=host) + LOG.debug(_("Initiated creation of volume backing: %s.") % name) + task_info = self._session.wait_for_task(task) + backing = task_info.result + LOG.info(_("Successfully created volume backing: %s.") % backing) + return backing + + def get_datastore(self, backing): + """Get datastore where the backing resides. + + :param backing: Reference to the backing + :return: Datastore reference to which the backing belongs + """ + return self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, backing, + 'datastore').ManagedObjectReference[0] + + def get_summary(self, datastore): + """Get datastore summary. + + :param datastore: Reference to the datastore + :return: 'summary' property of the datastore + """ + return self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, datastore, + 'summary') + + def _get_relocate_spec(self, datastore, resource_pool, host, + disk_move_type): + """Return spec for relocating volume backing. + + :param datastore: Reference to the datastore + :param resource_pool: Reference to the resource pool + :param host: Reference to the host + :param disk_move_type: Disk move type option + :return: Spec for relocation + """ + cf = self._session.vim.client.factory + relocate_spec = cf.create('ns0:VirtualMachineRelocateSpec') + relocate_spec.datastore = datastore + relocate_spec.pool = resource_pool + relocate_spec.host = host + relocate_spec.diskMoveType = disk_move_type + + LOG.debug(_("Spec for relocating the backing: %s.") % relocate_spec) + return relocate_spec + + def relocate_backing(self, backing, datastore, resource_pool, host): + """Relocates backing to the input datastore and resource pool. + + The implementation uses moveAllDiskBackingsAndAllowSharing disk move + type. + + :param backing: Reference to the backing + :param datastore: Reference to the datastore + :param resource_pool: Reference to the resource pool + :param host: Reference to the host + """ + LOG.debug(_("Relocating backing: %(backing)s to datastore: %(ds)s " + "and resource pool: %(rp)s.") % + {'backing': backing, 'ds': datastore, 'rp': resource_pool}) + + # Relocate the volume backing + disk_move_type = 'moveAllDiskBackingsAndAllowSharing' + relocate_spec = self._get_relocate_spec(datastore, resource_pool, host, + disk_move_type) + task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task', + backing, spec=relocate_spec) + LOG.debug(_("Initiated relocation of volume backing: %s.") % backing) + self._session.wait_for_task(task) + LOG.info(_("Successfully relocated volume backing: %(backing)s " + "to datastore: %(ds)s and resource pool: %(rp)s.") % + {'backing': backing, 'ds': datastore, 'rp': resource_pool}) + + def move_backing_to_folder(self, backing, folder): + """Move the volume backing to the folder. + + :param backing: Reference to the backing + :param folder: Reference to the folder + """ + LOG.debug(_("Moving backing: %(backing)s to folder: %(fol)s.") % + {'backing': backing, 'fol': folder}) + task = self._session.invoke_api(self._session.vim, + 'MoveIntoFolder_Task', folder, + list=[backing]) + LOG.debug(_("Initiated move of volume backing: %(backing)s into the " + "folder: %(fol)s.") % {'backing': backing, 'fol': folder}) + self._session.wait_for_task(task) + LOG.info(_("Successfully moved volume backing: %(backing)s into the " + "folder: %(fol)s.") % {'backing': backing, 'fol': folder}) + + def create_snapshot(self, backing, name, description, quiesce=False): + """Create snapshot of the backing with given name and description. + + :param backing: Reference to the backing entity + :param name: Snapshot name + :param description: Snapshot description + :param quiesce: Whether to quiesce the backing when taking snapshot + :return: Created snapshot entity reference + """ + LOG.debug(_("Snapshoting backing: %(backing)s with name: %(name)s.") % + {'backing': backing, 'name': name}) + task = self._session.invoke_api(self._session.vim, + 'CreateSnapshot_Task', + backing, name=name, + description=description, + memory=False, quiesce=quiesce) + LOG.debug(_("Initiated snapshot of volume backing: %(backing)s " + "named: %(name)s.") % {'backing': backing, 'name': name}) + task_info = self._session.wait_for_task(task) + snapshot = task_info.result + LOG.info(_("Successfully created snapshot: %(snap)s for volume " + "backing: %(backing)s.") % + {'snap': snapshot, 'backing': backing}) + return snapshot + + @staticmethod + def _get_snapshot_from_tree(name, root): + """Get snapshot by name from the snapshot tree root. + + :param name: Snapshot name + :param root: Current root node in the snapshot tree + :return: None in the snapshot tree with given snapshot name + """ + if not root: + return None + if root.name == name: + return root.snapshot + if (not hasattr(root, 'childSnapshotList') or + not root.childSnapshotList): + # When root does not have children, the childSnapshotList attr + # is missing sometime. Adding an additional check. + return None + for node in root.childSnapshotList: + snapshot = VMwareVolumeOps._get_snapshot_from_tree(name, node) + if snapshot: + return snapshot + + def get_snapshot(self, backing, name): + """Get snapshot of the backing with given name. + + :param backing: Reference to the backing entity + :param name: Snapshot name + :return: Snapshot entity of the backing with given name + """ + snapshot = self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, backing, + 'snapshot') + if not snapshot or not snapshot.rootSnapshotList: + return None + for root in snapshot.rootSnapshotList: + return VMwareVolumeOps._get_snapshot_from_tree(name, root) + + def delete_snapshot(self, backing, name): + """Delete a given snapshot from volume backing. + + :param backing: Reference to the backing entity + :param name: Snapshot name + """ + LOG.debug(_("Deleting the snapshot: %(name)s from backing: " + "%(backing)s.") % + {'name': name, 'backing': backing}) + snapshot = self.get_snapshot(backing, name) + if not snapshot: + LOG.info(_("Did not find the snapshot: %(name)s for backing: " + "%(backing)s. Need not delete anything.") % + {'name': name, 'backing': backing}) + return + task = self._session.invoke_api(self._session.vim, + 'RemoveSnapshot_Task', + snapshot, removeChildren=False) + LOG.debug(_("Initiated snapshot: %(name)s deletion for backing: " + "%(backing)s.") % + {'name': name, 'backing': backing}) + self._session.wait_for_task(task) + LOG.info(_("Successfully deleted snapshot: %(name)s of backing: " + "%(backing)s.") % {'backing': backing, 'name': name}) + + def _get_folder(self, backing): + """Get parent folder of the backing. + + :param backing: Reference to the backing entity + :return: Reference to parent folder of the backing entity + """ + return self._get_parent(backing, 'Folder') + + def _get_clone_spec(self, datastore, disk_move_type, snapshot): + """Get the clone spec. + + :param datastore: Reference to datastore + :param disk_move_type: Disk move type + :param snapshot: Reference to snapshot + :return: Clone spec + """ + relocate_spec = self._get_relocate_spec(datastore, None, None, + disk_move_type) + cf = self._session.vim.client.factory + clone_spec = cf.create('ns0:VirtualMachineCloneSpec') + clone_spec.location = relocate_spec + clone_spec.powerOn = False + clone_spec.template = False + clone_spec.snapshot = snapshot + + LOG.debug(_("Spec for cloning the backing: %s.") % clone_spec) + return clone_spec + + def clone_backing(self, name, backing, snapshot, clone_type, datastore): + """Clone backing. + + If the clone_type is 'full', then a full clone of the source volume + backing will be created. Else, if it is 'linked', then a linked clone + of the source volume backing will be created. + + :param name: Name for the clone + :param backing: Reference to the backing entity + :param snapshot: Snapshot point from which the clone should be done + :param clone_type: Whether a full clone or linked clone is to be made + :param datastore: Reference to the datastore entity + """ + LOG.debug(_("Creating a clone of backing: %(back)s, named: %(name)s, " + "clone type: %(type)s from snapshot: %(snap)s on " + "datastore: %(ds)s") % + {'back': backing, 'name': name, 'type': clone_type, + 'snap': snapshot, 'ds': datastore}) + folder = self._get_folder(backing) + if clone_type == LINKED_CLONE_TYPE: + disk_move_type = 'createNewChildDiskBacking' + else: + disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' + clone_spec = self._get_clone_spec(datastore, disk_move_type, snapshot) + task = self._session.invoke_api(self._session.vim, 'CloneVM_Task', + backing, folder=folder, name=name, + spec=clone_spec) + LOG.debug(_("Initiated clone of backing: %s.") % name) + task_info = self._session.wait_for_task(task) + new_backing = task_info.result + LOG.info(_("Successfully created clone: %s.") % new_backing) + return new_backing + + def delete_file(self, file_path, datacenter=None): + """Delete file or folder on the datastore. + + :param file_path: Datastore path of the file or folder + """ + LOG.debug(_("Deleting file: %(file)s under datacenter: %(dc)s.") % + {'file': file_path, 'dc': datacenter}) + fileManager = self._session.vim.service_content.fileManager + task = self._session.invoke_api(self._session.vim, + 'DeleteDatastoreFile_Task', + fileManager, + name=file_path, + datacenter=datacenter) + LOG.debug(_("Initiated deletion via task: %s.") % task) + self._session.wait_for_task(task) + LOG.info(_("Successfully deleted file: %s.") % file_path) + + def get_path_name(self, backing): + """Get path name of the backing. + + :param backing: Reference to the backing entity + :return: Path name of the backing + """ + return self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, backing, + 'config.files').vmPathName + + def get_entity_name(self, entity): + """Get name of the managed entity. + + :param entity: Reference to the entity + :return: Name of the managed entity + """ + return self._session.invoke_api(vim_util, 'get_object_property', + self._session.vim, entity, 'name') + + def get_vmdk_path(self, backing): + """Get the vmdk file name of the backing. + + The vmdk file path of the backing returned is of the form: + "[datastore1] my_folder/my_vm.vmdk" + + :param backing: Reference to the backing + :return: VMDK file path of the backing + """ + hardware_devices = self._session.invoke_api(vim_util, + 'get_object_property', + self._session.vim, + backing, + 'config.hardware.device') + if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": + hardware_devices = hardware_devices.VirtualDevice + for device in hardware_devices: + if device.__class__.__name__ == "VirtualDisk": + bkng = device.backing + if bkng.__class__.__name__ == "VirtualDiskFlatVer2BackingInfo": + return bkng.fileName + + def copy_vmdk_file(self, dc_ref, src_vmdk_file_path, dest_vmdk_file_path): + """Copy contents of the src vmdk file to dest vmdk file. + + During the copy also coalesce snapshots of src if present. + dest_vmdk_file_path will be created if not already present. + + :param dc_ref: Reference to datacenter containing src and dest + :param src_vmdk_file_path: Source vmdk file path + :param dest_vmdk_file_path: Destination vmdk file path + """ + LOG.debug(_('Copying disk data before snapshot of the VM')) + diskMgr = self._session.vim.service_content.virtualDiskManager + task = self._session.invoke_api(self._session.vim, + 'CopyVirtualDisk_Task', + diskMgr, + sourceName=src_vmdk_file_path, + sourceDatacenter=dc_ref, + destName=dest_vmdk_file_path, + destDatacenter=dc_ref, + force=True) + LOG.debug(_("Initiated copying disk data via task: %s.") % task) + self._session.wait_for_task(task) + LOG.info(_("Successfully copied disk at: %(src)s to: %(dest)s.") % + {'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path}) + + def delete_vmdk_file(self, vmdk_file_path, dc_ref): + """Delete given vmdk files. + + :param vmdk_file_path: VMDK file path to be deleted + :param dc_ref: Reference to datacenter that contains this VMDK file + """ + LOG.debug(_("Deleting vmdk file: %s.") % vmdk_file_path) + diskMgr = self._session.vim.service_content.virtualDiskManager + task = self._session.invoke_api(self._session.vim, + 'DeleteVirtualDisk_Task', + diskMgr, + name=vmdk_file_path, + datacenter=dc_ref) + LOG.debug(_("Initiated deleting vmdk file via task: %s.") % task) + self._session.wait_for_task(task) + LOG.info(_("Deleted vmdk file: %s.") % vmdk_file_path) diff --git a/cinder/volume/drivers/windows/__init__.py b/cinder/volume/drivers/windows/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/drivers/windows/windows.py b/cinder/volume/drivers/windows/windows.py new file mode 100644 index 0000000000..68beda628b --- /dev/null +++ b/cinder/volume/drivers/windows/windows.py @@ -0,0 +1,226 @@ +# Copyright 2012 Pedro Navarro Perez +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume driver for Windows Server 2012 + +This driver requires ISCSI target role installed + +""" + +import os + +from oslo.config import cfg + +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder.volume import driver +from cinder.volume.drivers.windows import windows_utils + +LOG = logging.getLogger(__name__) + +windows_opts = [ + cfg.StrOpt('windows_iscsi_lun_path', + default='C:\iSCSIVirtualDisks', + help='Path to store VHD backed volumes'), +] + +CONF = cfg.CONF +CONF.register_opts(windows_opts) + + +class WindowsDriver(driver.ISCSIDriver): + """Executes volume driver commands on Windows Storage server.""" + + VERSION = '1.0.0' + + def __init__(self, *args, **kwargs): + super(WindowsDriver, self).__init__(*args, **kwargs) + self.configuration = kwargs.get('configuration', None) + if self.configuration: + self.configuration.append_config_values(windows_opts) + + def do_setup(self, context): + """Setup the Windows Volume driver. + + Called one time by the manager after the driver is loaded. + Validate the flags we care about + """ + self.utils = windows_utils.WindowsUtils() + + def check_for_setup_error(self): + """Check that the driver is working and can communicate.""" + self.utils.check_for_setup_error() + + def initialize_connection(self, volume, connector): + """Driver entry point to attach a volume to an instance.""" + initiator_name = connector['initiator'] + target_name = volume['provider_location'] + + self.utils.associate_initiator_with_iscsi_target(target_name, + initiator_name) + + properties = self.utils.get_host_information(volume, target_name) + + return { + 'driver_volume_type': 'iscsi', + 'data': properties, + } + + def terminate_connection(self, volume, connector, **kwargs): + """Driver entry point to unattach a volume from an instance. + + Unmask the LUN on the storage system so the given initiator can no + longer access it. + """ + initiator_name = connector['initiator'] + target_name = volume['provider_location'] + self.utils.delete_iscsi_target(initiator_name, target_name) + + def create_volume(self, volume): + """Driver entry point for creating a new volume.""" + vhd_path = self.local_path(volume) + vol_name = volume['name'] + vol_size = volume['size'] + + self.utils.create_volume(vhd_path, vol_name, vol_size) + + def local_path(self, volume): + base_vhd_folder = self.configuration.windows_iscsi_lun_path + if not os.path.exists(base_vhd_folder): + LOG.debug(_('Creating folder %s '), base_vhd_folder) + os.makedirs(base_vhd_folder) + return os.path.join(base_vhd_folder, str(volume['name']) + ".vhd") + + def delete_volume(self, volume): + """Driver entry point for destroying existing volumes.""" + vol_name = volume['name'] + vhd_path = self.local_path(volume) + + self.utils.delete_volume(vol_name, vhd_path) + + def create_snapshot(self, snapshot): + """Driver entry point for creating a snapshot.""" + # Getting WT_Snapshot class + vol_name = snapshot['volume_name'] + snapshot_name = snapshot['name'] + + self.utils.create_snapshot(vol_name, snapshot_name) + + def create_volume_from_snapshot(self, volume, snapshot): + """Driver entry point for exporting snapshots as volumes.""" + snapshot_name = snapshot['name'] + vol_name = volume['name'] + self.utils.create_volume_from_snapshot(vol_name, snapshot_name) + + def delete_snapshot(self, snapshot): + """Driver entry point for deleting a snapshot.""" + snapshot_name = snapshot['name'] + self.utils.delete_snapshot(snapshot_name) + + def _do_export(self, _ctx, volume, ensure=False): + """Do all steps to get disk exported as LUN 0 at separate target. + + :param volume: reference of volume to be exported + :param ensure: if True, ignore errors caused by already existing + resources + :return: iscsiadm-formatted provider location string + """ + target_name = "%s%s" % (self.configuration.iscsi_target_prefix, + volume['name']) + self.utils.create_iscsi_target(target_name, ensure) + + # Get the disk to add + vol_name = volume['name'] + self.utils.add_disk_to_target(vol_name, target_name) + + return target_name + + def ensure_export(self, context, volume): + """Driver entry point to get the export info for an existing volume.""" + self._do_export(context, volume, ensure=True) + + def create_export(self, context, volume): + """Driver entry point to get the export info for a new volume.""" + loc = self._do_export(context, volume, ensure=False) + return {'provider_location': loc} + + def remove_export(self, context, volume): + """Driver entry point to remove an export for a volume. + """ + target_name = "%s%s" % (self.configuration.iscsi_target_prefix, + volume['name']) + + self.utils.remove_iscsi_target(target_name) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + # Convert to VHD and file back to VHD + image_utils.fetch_to_vhd(context, image_service, image_id, + self.local_path(volume), + self.configuration.volume_dd_blocksize) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + + # Copy the volume to the image conversion dir + temp_vhd_path = os.path.join(self.configuration.image_conversion_dir, + str(image_meta['id']) + ".vhd") + self.utils.copy_vhd_disk(self.local_path(volume), temp_vhd_path) + image_utils.upload_volume(context, image_service, image_meta, + temp_vhd_path, 'vpc') + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + # Create a new volume + # Copy VHD file of the volume to clone to the created volume + self.create_volume(volume) + self.utils.copy_vhd_disk(self.local_path(src_vref), + self.local_path(volume)) + + def get_volume_stats(self, refresh=False): + """Get volume stats. + + If 'refresh' is True, run update the stats first. + """ + if refresh: + self._update_volume_stats() + + return self._stats + + def _update_volume_stats(self): + """Retrieve stats info for Windows device.""" + + LOG.debug(_("Updating volume stats")) + data = {} + backend_name = self.__class__.__name__ + if self.configuration: + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or self.__class__.__name__ + data["vendor_name"] = 'Microsoft' + data["driver_version"] = self.VERSION + data["storage_protocol"] = 'iSCSI' + data['total_capacity_gb'] = 'infinite' + data['free_capacity_gb'] = 'infinite' + data['reserved_percentage'] = 100 + data['QoS_support'] = False + self._stats = data + + def extend_volume(self, volume, new_size): + """Extend an Existing Volume.""" + old_size = volume['size'] + LOG.debug(_("Extend volume from %(old_size)s GB to %(new_size)s GB."), + {'old_size': old_size, 'new_size': new_size}) + additional_size = (new_size - old_size) * 1024 + self.utils.extend(volume['name'], additional_size) diff --git a/cinder/volume/drivers/windows/windows_utils.py b/cinder/volume/drivers/windows/windows_utils.py new file mode 100644 index 0000000000..6e18fe76c6 --- /dev/null +++ b/cinder/volume/drivers/windows/windows_utils.py @@ -0,0 +1,295 @@ +# Copyright 2013 Pedro Navarro Perez +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Utility class for Windows Storage Server 2012 volume related operations. +""" + +import os + +from cinder import exception +from cinder.openstack.common import log as logging + +# Check needed for unit testing on Unix +if os.name == 'nt': + import wmi + +LOG = logging.getLogger(__name__) + + +class WindowsUtils(object): + """Executes volume driver commands on Windows Storage server.""" + + def __init__(self, *args, **kwargs): + # Set the flags + self._conn_wmi = wmi.WMI(moniker='//./root/wmi') + self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2') + + def check_for_setup_error(self): + """Check that the driver is working and can communicate. + Invokes the portal and checks that is listening ISCSI traffic. + """ + try: + wt_portal = self._conn_wmi.WT_Portal()[0] + listen = wt_portal.Listen + except wmi.x_wmi as exc: + err_msg = (_('check_for_setup_error: the state of the WT Portal ' + 'could not be verified. WMI exception: %s')) + LOG.error(err_msg % exc) + raise exception.VolumeBackendAPIException(data=err_msg % exc) + + if not listen: + err_msg = (_('check_for_setup_error: there is no ISCSI traffic ' + 'listening.')) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def get_host_information(self, volume, target_name): + """Getting the portal and port information.""" + try: + wt_portal = self._conn_wmi.WT_Portal()[0] + except wmi.x_wmi as exc: + err_msg = (_('get_host_information: the state of the WT Portal ' + 'could not be verified. WMI exception: %s')) + LOG.error(err_msg % exc) + raise exception.VolumeBackendAPIException(data=err_msg % exc) + (address, port) = (wt_portal.Address, wt_portal.Port) + # Getting the host information + try: + hosts = self._conn_wmi.WT_Host(Hostname=target_name) + host = hosts[0] + except wmi.x_wmi as exc: + err_msg = (_('get_host_information: the ISCSI target information ' + 'could not be retrieved. WMI exception: %s')) + LOG.error(err_msg % exc) + raise exception.VolumeBackendAPIException(data=err_msg) + + properties = {} + properties['target_discovered'] = False + properties['target_portal'] = '%s:%s' % (address, port) + properties['target_iqn'] = host.TargetIQN + properties['target_lun'] = 0 + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + def associate_initiator_with_iscsi_target(self, initiator_name, + target_name): + """Sets information used by the iSCSI target entry.""" + try: + cl = self._conn_wmi.__getattr__("WT_IDMethod") + wt_idmethod = cl.new() + wt_idmethod.HostName = target_name + # Identification method is IQN + wt_idmethod.Method = 4 + wt_idmethod.Value = initiator_name + wt_idmethod.put() + except wmi.x_wmi as exc: + err_msg = (_('associate_initiator_with_iscsi_target: an ' + 'association between initiator: %(init)s and ' + 'target name: %(target)s could not be established. ' + 'WMI exception: %(wmi_exc)s') % + {'init': initiator_name, 'target': target_name, + 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def delete_iscsi_target(self, initiator_name, target_name): + """Removes iSCSI targets to hosts.""" + + try: + wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=target_name, + Method=4, + Value=initiator_name)[0] + wt_idmethod.Delete_() + except wmi.x_wmi as exc: + err_msg = (_( + 'delete_iscsi_target: error when deleting the iscsi target ' + 'associated with target name: %(target)s . ' + 'WMI exception: %(wmi_exc)s') % {'target': target_name, + 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def create_volume(self, vhd_path, vol_name, vol_size): + """Creates a volume.""" + try: + cl = self._conn_wmi.__getattr__("WT_Disk") + cl.NewWTDisk(DevicePath=vhd_path, + Description=vol_name, + SizeInMB=vol_size * 1024) + except wmi.x_wmi as exc: + err_msg = (_( + 'create_volume: error when creating the volume name: ' + '%(vol_name)s . WMI exception: ' + '%(wmi_exc)s') % {'vol_name': vol_name, 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def delete_volume(self, vol_name, vhd_path): + """Driver entry point for destroying existing volumes.""" + try: + wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0] + wt_disk.Delete_() + vhdfiles = self._conn_cimv2.query( + "Select * from CIM_DataFile where Name = '" + + vhd_path + "'") + if len(vhdfiles) > 0: + vhdfiles[0].Delete() + except wmi.x_wmi as exc: + err_msg = (_( + 'delete_volume: error when deleting the volume name: ' + '%(vol_name)s . WMI exception: ' + '%(wmi_exc)s') % {'vol_name': vol_name, 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def create_snapshot(self, vol_name, snapshot_name): + """Driver entry point for creating a snapshot.""" + try: + wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0] + # API Calls gets Generic Failure + cl = self._conn_wmi.__getattr__("WT_Snapshot") + disk_id = wt_disk.WTD + out = cl.Create(WTD=disk_id) + # Setting description since it used as a KEY + wt_snapshot_created = self._conn_wmi.WT_Snapshot(Id=out[0])[0] + wt_snapshot_created.Description = snapshot_name + wt_snapshot_created.put() + except wmi.x_wmi as exc: + err_msg = (_( + 'create_snapshot: error when creating the snapshot name: ' + '%(vol_name)s . WMI exception: ' + '%(wmi_exc)s') % {'vol_name': snapshot_name, 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def create_volume_from_snapshot(self, vol_name, snap_name): + """Driver entry point for exporting snapshots as volumes.""" + try: + wt_snapshot = self._conn_wmi.WT_Snapshot(Description=snap_name)[0] + disk_id = wt_snapshot.Export()[0] + wt_disk = self._conn_wmi.WT_Disk(WTD=disk_id)[0] + wt_disk.Description = vol_name + wt_disk.put() + except wmi.x_wmi as exc: + err_msg = (_( + 'create_volume_from_snapshot: error when creating the volume ' + 'name: %(vol_name)s from snapshot name: %(snap_name)s. ' + 'WMI exception: %(wmi_exc)s') % {'vol_name': vol_name, + 'snap_name': snap_name, + 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def delete_snapshot(self, snap_name): + """Driver entry point for deleting a snapshot.""" + try: + wt_snapshot = self._conn_wmi.WT_Snapshot(Description=snap_name)[0] + wt_snapshot.Delete_() + except wmi.x_wmi as exc: + err_msg = (_( + 'delete_snapshot: error when deleting the snapshot name: ' + '%(snap_name)s . WMI exception: ' + '%(wmi_exc)s') % {'snap_name': snap_name, 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def create_iscsi_target(self, target_name, ensure): + """Creates ISCSI target.""" + try: + cl = self._conn_wmi.__getattr__("WT_Host") + cl.NewHost(HostName=target_name) + except wmi.x_wmi as exc: + excep_info = exc.com_error.excepinfo[2] + if not ensure or excep_info.find(u'The file exists') == -1: + err_msg = (_( + 'create_iscsi_target: error when creating iscsi target: ' + '%(tar_name)s . WMI exception: ' + '%(wmi_exc)s') % {'tar_name': target_name, 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + else: + LOG.info(_('Ignored target creation error "%s"' + ' while ensuring export'), exc) + + def remove_iscsi_target(self, target_name): + """Removes ISCSI target.""" + try: + wt_host = self._conn_wmi.WT_Host(HostName=target_name)[0] + wt_host.RemoveAllWTDisks() + wt_host.Delete_() + except wmi.x_wmi as exc: + err_msg = (_( + 'remove_iscsi_target: error when deleting iscsi target: ' + '%(tar_name)s . WMI exception: ' + '%(wmi_exc)s') % {'tar_name': target_name, 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def add_disk_to_target(self, vol_name, target_name): + """Adds the disk to the target.""" + try: + q = self._conn_wmi.WT_Disk(Description=vol_name) + wt_disk = q[0] + wt_host = self._conn_wmi.WT_Host(HostName=target_name)[0] + wt_host.AddWTDisk(wt_disk.WTD) + except wmi.x_wmi as exc: + err_msg = (_( + 'add_disk_to_target: error adding disk associated to volume : ' + '%(vol_name)s to the target name: %(tar_name)s ' + '. WMI exception: %(wmi_exc)s') % {'tar_name': target_name, + 'vol_name': vol_name, + 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def copy_vhd_disk(self, source_path, destination_path): + """Copy the vhd disk from source path to destination path.""" + try: + vhdfiles = self._conn_cimv2.query( + "Select * from CIM_DataFile where Name = '" + + source_path + "'") + if len(vhdfiles) > 0: + vhdfiles[0].Copy(destination_path) + except wmi.x_wmi as exc: + err_msg = (_( + 'copy_vhd_disk: error when copying disk from source path : ' + '%(src_path)s to destination path: %(dest_path)s ' + '. WMI exception: ' + '%(wmi_exc)s') % {'src_path': source_path, + 'dest_path': destination_path, + 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def extend(self, vol_name, additional_size): + """Extend an existing volume.""" + try: + q = self._conn_wmi.WT_Disk(Description=vol_name) + wt_disk = q[0] + wt_disk.Extend(additional_size) + except wmi.x_wmi as exc: + err_msg = (_( + 'extend: error when extending the volume: %(vol_name)s ' + '.WMI exception: %(wmi_exc)s') % {'vol_name': vol_name, + 'wmi_exc': exc}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) diff --git a/cinder/volume/drivers/xenapi/__init__.py b/cinder/volume/drivers/xenapi/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/drivers/xenapi/lib.py b/cinder/volume/drivers/xenapi/lib.py new file mode 100644 index 0000000000..f271fee1d3 --- /dev/null +++ b/cinder/volume/drivers/xenapi/lib.py @@ -0,0 +1,542 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import os +import pickle + +from cinder import units +from cinder.volume.drivers.xenapi import tools + + +class XenAPIException(Exception): + def __init__(self, original_exception): + super(XenAPIException, self).__init__(str(original_exception)) + self.original_exception = original_exception + + +class OperationsBase(object): + def __init__(self, xenapi_session): + self.session = xenapi_session + + def call_xenapi(self, method, *args): + return self.session.call_xenapi(method, *args) + + +class VMOperations(OperationsBase): + def get_by_uuid(self, vm_uuid): + return self.call_xenapi('VM.get_by_uuid', vm_uuid) + + def get_vbds(self, vm_uuid): + return self.call_xenapi('VM.get_VBDs', vm_uuid) + + +class VBDOperations(OperationsBase): + def create(self, vm_ref, vdi_ref, userdevice, bootable, mode, type, + empty, other_config): + vbd_rec = dict( + VM=vm_ref, + VDI=vdi_ref, + userdevice=str(userdevice), + bootable=bootable, + mode=mode, + type=type, + empty=empty, + other_config=other_config, + qos_algorithm_type='', + qos_algorithm_params=dict() + ) + return self.call_xenapi('VBD.create', vbd_rec) + + def destroy(self, vbd_ref): + self.call_xenapi('VBD.destroy', vbd_ref) + + def get_device(self, vbd_ref): + return self.call_xenapi('VBD.get_device', vbd_ref) + + def plug(self, vbd_ref): + return self.call_xenapi('VBD.plug', vbd_ref) + + def unplug(self, vbd_ref): + return self.call_xenapi('VBD.unplug', vbd_ref) + + def get_vdi(self, vbd_ref): + return self.call_xenapi('VBD.get_VDI', vbd_ref) + + +class PoolOperations(OperationsBase): + def get_all(self): + return self.call_xenapi('pool.get_all') + + def get_default_SR(self, pool_ref): + return self.call_xenapi('pool.get_default_SR', pool_ref) + + +class PbdOperations(OperationsBase): + def get_all(self): + return self.call_xenapi('PBD.get_all') + + def unplug(self, pbd_ref): + self.call_xenapi('PBD.unplug', pbd_ref) + + def create(self, host_ref, sr_ref, device_config): + return self.call_xenapi( + 'PBD.create', + dict( + host=host_ref, + SR=sr_ref, + device_config=device_config + ) + ) + + def plug(self, pbd_ref): + self.call_xenapi('PBD.plug', pbd_ref) + + +class SrOperations(OperationsBase): + def get_all(self): + return self.call_xenapi('SR.get_all') + + def get_record(self, sr_ref): + return self.call_xenapi('SR.get_record', sr_ref) + + def forget(self, sr_ref): + self.call_xenapi('SR.forget', sr_ref) + + def scan(self, sr_ref): + self.call_xenapi('SR.scan', sr_ref) + + def create(self, host_ref, device_config, name_label, name_description, + sr_type, physical_size=None, content_type=None, + shared=False, sm_config=None): + return self.call_xenapi( + 'SR.create', + host_ref, + device_config, + physical_size or '0', + name_label or '', + name_description or '', + sr_type, + content_type or '', + shared, + sm_config or dict() + ) + + def introduce(self, sr_uuid, name_label, name_description, sr_type, + content_type=None, shared=False, sm_config=None): + return self.call_xenapi( + 'SR.introduce', + sr_uuid, + name_label or '', + name_description or '', + sr_type, + content_type or '', + shared, + sm_config or dict() + ) + + def get_uuid(self, sr_ref): + return self.get_record(sr_ref)['uuid'] + + def get_name_label(self, sr_ref): + return self.get_record(sr_ref)['name_label'] + + def get_name_description(self, sr_ref): + return self.get_record(sr_ref)['name_description'] + + def destroy(self, sr_ref): + self.call_xenapi('SR.destroy', sr_ref) + + +class VdiOperations(OperationsBase): + def get_all(self): + return self.call_xenapi('VDI.get_all') + + def get_record(self, vdi_ref): + return self.call_xenapi('VDI.get_record', vdi_ref) + + def get_by_uuid(self, vdi_uuid): + return self.call_xenapi('VDI.get_by_uuid', vdi_uuid) + + def get_uuid(self, vdi_ref): + return self.get_record(vdi_ref)['uuid'] + + def create(self, sr_ref, size, vdi_type, + sharable=False, read_only=False, other_config=None): + return self.call_xenapi('VDI.create', + dict(SR=sr_ref, + virtual_size=str(size), + type=vdi_type, + sharable=sharable, + read_only=read_only, + other_config=other_config or dict())) + + def destroy(self, vdi_ref): + self.call_xenapi('VDI.destroy', vdi_ref) + + def copy(self, vdi_ref, sr_ref): + return self.call_xenapi('VDI.copy', vdi_ref, sr_ref) + + def resize(self, vdi_ref, size): + return self.call_xenapi('VDI.resize', vdi_ref, str(size)) + + +class HostOperations(OperationsBase): + def get_record(self, host_ref): + return self.call_xenapi('host.get_record', host_ref) + + def get_uuid(self, host_ref): + return self.get_record(host_ref)['uuid'] + + +class XenAPISession(object): + def __init__(self, session, exception_to_convert): + self._session = session + self._exception_to_convert = exception_to_convert + self.handle = self._session.handle + self.PBD = PbdOperations(self) + self.SR = SrOperations(self) + self.VDI = VdiOperations(self) + self.host = HostOperations(self) + self.pool = PoolOperations(self) + self.VBD = VBDOperations(self) + self.VM = VMOperations(self) + + def close(self): + return self.call_xenapi('logout') + + @contextlib.contextmanager + def exception_converter(self): + try: + yield None + except self._exception_to_convert as e: + raise XenAPIException(e) + + def call_xenapi(self, method, *args): + with self.exception_converter(): + return self._session.xenapi_request(method, args) + + def call_plugin(self, host_ref, plugin, function, args): + with self.exception_converter(): + return self._session.xenapi.host.call_plugin( + host_ref, plugin, function, args) + + def get_pool(self): + return self.call_xenapi('session.get_pool', self.handle) + + def get_this_host(self): + return self.call_xenapi('session.get_this_host', self.handle) + + +class CompoundOperations(object): + def unplug_pbds_from_sr(self, sr_ref): + sr_rec = self.SR.get_record(sr_ref) + for pbd_ref in sr_rec.get('PBDs', []): + self.PBD.unplug(pbd_ref) + + def unplug_pbds_and_forget_sr(self, sr_ref): + self.unplug_pbds_from_sr(sr_ref) + self.SR.forget(sr_ref) + + def create_new_vdi(self, sr_ref, size_in_gigabytes): + return self.VDI.create(sr_ref, + to_bytes(size_in_gigabytes), + 'User', ) + + +def to_bytes(size_in_gigs): + return size_in_gigs * units.GiB + + +class NFSOperationsMixIn(CompoundOperations): + def is_nfs_sr(self, sr_ref): + return self.SR.get_record(sr_ref).get('type') == 'nfs' + + @contextlib.contextmanager + def new_sr_on_nfs(self, host_ref, server, serverpath, + name_label=None, name_description=None): + + device_config = dict( + server=server, + serverpath=serverpath + ) + name_label = name_label or '' + name_description = name_description or '' + sr_type = 'nfs' + + sr_ref = self.SR.create( + host_ref, + device_config, + name_label, + name_description, + sr_type, + ) + yield sr_ref + + self.unplug_pbds_and_forget_sr(sr_ref) + + def plug_nfs_sr(self, host_ref, server, serverpath, sr_uuid, + name_label=None, name_description=None): + + device_config = dict( + server=server, + serverpath=serverpath + ) + sr_type = 'nfs' + + sr_ref = self.SR.introduce( + sr_uuid, + name_label, + name_description, + sr_type, + ) + + pbd_ref = self.PBD.create( + host_ref, + sr_ref, + device_config + ) + + self.PBD.plug(pbd_ref) + + return sr_ref + + def connect_volume(self, server, serverpath, sr_uuid, vdi_uuid): + host_ref = self.get_this_host() + sr_ref = self.plug_nfs_sr( + host_ref, + server, + serverpath, + sr_uuid + ) + self.SR.scan(sr_ref) + vdi_ref = self.VDI.get_by_uuid(vdi_uuid) + return dict(sr_ref=sr_ref, vdi_ref=vdi_ref) + + def copy_vdi_to_sr(self, vdi_ref, sr_ref): + return self.VDI.copy(vdi_ref, sr_ref) + + +class ContextAwareSession(XenAPISession): + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + +class OpenStackXenAPISession(ContextAwareSession, + NFSOperationsMixIn): + pass + + +def connect(url, user, password): + import XenAPI + session = XenAPI.Session(url) + session.login_with_password(user, password) + return OpenStackXenAPISession(session, XenAPI.Failure) + + +class SessionFactory(object): + def __init__(self, url, user, password): + self.url = url + self.user = user + self.password = password + + def get_session(self): + return connect(self.url, self.user, self.password) + + +class XapiPluginProxy(object): + def __init__(self, session_factory, plugin_name): + self._session_factory = session_factory + self._plugin_name = plugin_name + + def call(self, function, *plugin_args, **plugin_kwargs): + plugin_params = dict(args=plugin_args, kwargs=plugin_kwargs) + args = dict(params=pickle.dumps(plugin_params)) + + with self._session_factory.get_session() as session: + host_ref = session.get_this_host() + result = session.call_plugin( + host_ref, self._plugin_name, function, args) + + return pickle.loads(result) + + +class GlancePluginProxy(XapiPluginProxy): + def __init__(self, session_factory): + super(GlancePluginProxy, self).__init__(session_factory, 'glance') + + def download_vhd(self, image_id, glance_host, glance_port, glance_use_ssl, + uuid_stack, sr_path, auth_token): + return self.call( + 'download_vhd', + image_id=image_id, + glance_host=glance_host, + glance_port=glance_port, + glance_use_ssl=glance_use_ssl, + uuid_stack=uuid_stack, + sr_path=sr_path, + auth_token=auth_token) + + def upload_vhd(self, vdi_uuids, image_id, glance_host, glance_port, + glance_use_ssl, sr_path, auth_token, properties): + return self.call( + 'upload_vhd', + vdi_uuids=vdi_uuids, + image_id=image_id, + glance_host=glance_host, + glance_port=glance_port, + glance_use_ssl=glance_use_ssl, + sr_path=sr_path, + auth_token=auth_token, + properties=properties) + + +class NFSBasedVolumeOperations(object): + def __init__(self, session_factory): + self._session_factory = session_factory + self.glance_plugin = GlancePluginProxy(session_factory) + + def create_volume(self, server, serverpath, size, + name=None, description=None): + with self._session_factory.get_session() as session: + host_ref = session.get_this_host() + with session.new_sr_on_nfs(host_ref, server, serverpath, + name, description) as sr_ref: + vdi_ref = session.create_new_vdi(sr_ref, size) + + return dict( + sr_uuid=session.SR.get_uuid(sr_ref), + vdi_uuid=session.VDI.get_uuid(vdi_ref) + ) + + def delete_volume(self, server, serverpath, sr_uuid, vdi_uuid): + with self._session_factory.get_session() as session: + refs = session.connect_volume( + server, serverpath, sr_uuid, vdi_uuid) + + session.VDI.destroy(refs['vdi_ref']) + sr_ref = refs['sr_ref'] + session.unplug_pbds_from_sr(sr_ref) + session.SR.destroy(sr_ref) + + def connect_volume(self, server, serverpath, sr_uuid, vdi_uuid): + with self._session_factory.get_session() as session: + refs = session.connect_volume( + server, serverpath, sr_uuid, vdi_uuid) + + return session.VDI.get_uuid(refs['vdi_ref']) + + def disconnect_volume(self, vdi_uuid): + with self._session_factory.get_session() as session: + vdi_ref = session.VDI.get_by_uuid(vdi_uuid) + vdi_rec = session.VDI.get_record(vdi_ref) + sr_ref = vdi_rec['SR'] + session.unplug_pbds_and_forget_sr(sr_ref) + + def copy_volume(self, server, serverpath, sr_uuid, vdi_uuid, + name=None, description=None): + with self._session_factory.get_session() as session: + src_refs = session.connect_volume( + server, serverpath, sr_uuid, vdi_uuid) + try: + host_ref = session.get_this_host() + + with session.new_sr_on_nfs(host_ref, server, serverpath, + name, description) as target_sr_ref: + target_vdi_ref = session.copy_vdi_to_sr( + src_refs['vdi_ref'], target_sr_ref) + + dst_refs = dict( + sr_uuid=session.SR.get_uuid(target_sr_ref), + vdi_uuid=session.VDI.get_uuid(target_vdi_ref) + ) + + finally: + session.unplug_pbds_and_forget_sr(src_refs['sr_ref']) + + return dst_refs + + def resize_volume(self, server, serverpath, sr_uuid, vdi_uuid, + size_in_gigabytes): + self.connect_volume(server, serverpath, sr_uuid, vdi_uuid) + + try: + with self._session_factory.get_session() as session: + vdi_ref = session.VDI.get_by_uuid(vdi_uuid) + session.VDI.resize(vdi_ref, to_bytes(size_in_gigabytes)) + finally: + self.disconnect_volume(vdi_uuid) + + def use_glance_plugin_to_overwrite_volume(self, server, serverpath, + sr_uuid, vdi_uuid, glance_server, + image_id, auth_token, + sr_base_path): + self.connect_volume(server, serverpath, sr_uuid, vdi_uuid) + + uuid_stack = [vdi_uuid] + glance_host, glance_port, glance_use_ssl = glance_server + + try: + result = self.glance_plugin.download_vhd( + image_id, glance_host, glance_port, glance_use_ssl, uuid_stack, + os.path.join(sr_base_path, sr_uuid), auth_token) + finally: + self.disconnect_volume(vdi_uuid) + + if len(result) != 1 or result['root']['uuid'] != vdi_uuid: + return False + + return True + + def use_glance_plugin_to_upload_volume(self, server, serverpath, + sr_uuid, vdi_uuid, glance_server, + image_id, auth_token, sr_base_path): + self.connect_volume(server, serverpath, sr_uuid, vdi_uuid) + + vdi_uuids = [vdi_uuid] + glance_host, glance_port, glance_use_ssl = glance_server + + try: + result = self.glance_plugin.upload_vhd( + vdi_uuids, image_id, glance_host, glance_port, glance_use_ssl, + os.path.join(sr_base_path, sr_uuid), auth_token, dict()) + finally: + self.disconnect_volume(vdi_uuid) + + @contextlib.contextmanager + def volume_attached_here(self, server, serverpath, sr_uuid, vdi_uuid, + readonly=True): + self.connect_volume(server, serverpath, sr_uuid, vdi_uuid) + + with self._session_factory.get_session() as session: + vm_uuid = tools.get_this_vm_uuid() + vm_ref = session.VM.get_by_uuid(vm_uuid) + vdi_ref = session.VDI.get_by_uuid(vdi_uuid) + vbd_ref = session.VBD.create( + vm_ref, vdi_ref, userdevice='autodetect', bootable=False, + mode='RO' if readonly else 'RW', type='disk', empty=False, + other_config=dict()) + session.VBD.plug(vbd_ref) + device = session.VBD.get_device(vbd_ref) + try: + yield "/dev/" + device + finally: + session.VBD.unplug(vbd_ref) + session.VBD.destroy(vbd_ref) + self.disconnect_volume(vdi_uuid) diff --git a/cinder/volume/drivers/xenapi/sm.py b/cinder/volume/drivers/xenapi/sm.py new file mode 100644 index 0000000000..f96bc287fd --- /dev/null +++ b/cinder/volume/drivers/xenapi/sm.py @@ -0,0 +1,271 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from cinder import exception +from cinder.image import glance +from cinder.image import image_utils +from cinder.openstack.common import log as logging +from cinder.volume import driver +from cinder.volume.drivers.xenapi import lib as xenapi_lib + +LOG = logging.getLogger(__name__) + +xenapi_opts = [ + cfg.StrOpt('xenapi_connection_url', + default=None, + help='URL for XenAPI connection'), + cfg.StrOpt('xenapi_connection_username', + default='root', + help='Username for XenAPI connection'), + cfg.StrOpt('xenapi_connection_password', + default=None, + help='Password for XenAPI connection', + secret=True), + cfg.StrOpt('xenapi_sr_base_path', + default='/var/run/sr-mount', + help='Base path to the storage repository'), +] + +xenapi_nfs_opts = [ + cfg.StrOpt('xenapi_nfs_server', + default=None, + help='NFS server to be used by XenAPINFSDriver'), + cfg.StrOpt('xenapi_nfs_serverpath', + default=None, + help='Path of exported NFS, used by XenAPINFSDriver'), +] + +CONF = cfg.CONF +CONF.register_opts(xenapi_opts) +CONF.register_opts(xenapi_nfs_opts) + + +class XenAPINFSDriver(driver.VolumeDriver): + + VERSION = "1.0.0" + + def __init__(self, *args, **kwargs): + super(XenAPINFSDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(xenapi_opts) + self.configuration.append_config_values(xenapi_nfs_opts) + + def do_setup(self, context): + session_factory = xenapi_lib.SessionFactory( + self.configuration.xenapi_connection_url, + self.configuration.xenapi_connection_username, + self.configuration.xenapi_connection_password + ) + self.nfs_ops = xenapi_lib.NFSBasedVolumeOperations(session_factory) + + def create_cloned_volume(self, volume, src_vref): + raise NotImplementedError() + + def create_volume(self, volume): + volume_details = self.nfs_ops.create_volume( + self.configuration.xenapi_nfs_server, + self.configuration.xenapi_nfs_serverpath, + volume['size'], + volume['display_name'], + volume['display_description'] + ) + location = "%(sr_uuid)s/%(vdi_uuid)s" % volume_details + return dict(provider_location=location) + + def create_export(self, context, volume): + pass + + def delete_volume(self, volume): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + + self.nfs_ops.delete_volume( + self.configuration.xenapi_nfs_server, + self.configuration.xenapi_nfs_serverpath, + sr_uuid, + vdi_uuid + ) + + def remove_export(self, context, volume): + pass + + def initialize_connection(self, volume, connector): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + + return dict( + driver_volume_type='xensm', + data=dict( + name_label=volume['display_name'] or '', + name_description=volume['display_description'] or '', + sr_uuid=sr_uuid, + vdi_uuid=vdi_uuid, + sr_type='nfs', + server=self.configuration.xenapi_nfs_server, + serverpath=self.configuration.xenapi_nfs_serverpath, + introduce_sr_keys=['sr_type', 'server', 'serverpath'] + ) + ) + + def terminate_connection(self, volume, connector, **kwargs): + pass + + def check_for_setup_error(self): + """To override superclass' method.""" + + def create_volume_from_snapshot(self, volume, snapshot): + return self._copy_volume( + snapshot, volume['display_name'], volume['name_description']) + + def create_snapshot(self, snapshot): + volume_id = snapshot['volume_id'] + volume = snapshot['volume'] + return self._copy_volume( + volume, snapshot['display_name'], snapshot['display_description']) + + def _copy_volume(self, volume, target_name, target_desc): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + + volume_details = self.nfs_ops.copy_volume( + self.configuration.xenapi_nfs_server, + self.configuration.xenapi_nfs_serverpath, + sr_uuid, + vdi_uuid, + target_name, + target_desc + ) + location = "%(sr_uuid)s/%(vdi_uuid)s" % volume_details + return dict(provider_location=location) + + def delete_snapshot(self, snapshot): + self.delete_volume(snapshot) + + def ensure_export(self, context, volume): + pass + + def copy_image_to_volume(self, context, volume, image_service, image_id): + if image_utils.is_xenserver_image(context, image_service, image_id): + return self._use_glance_plugin_to_copy_image_to_volume( + context, volume, image_service, image_id) + + return self._use_image_utils_to_pipe_bytes_to_volume( + context, volume, image_service, image_id) + + def _use_image_utils_to_pipe_bytes_to_volume(self, context, volume, + image_service, image_id): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + with self.nfs_ops.volume_attached_here(CONF.xenapi_nfs_server, + CONF.xenapi_nfs_serverpath, + sr_uuid, vdi_uuid, + False) as device: + image_utils.fetch_to_raw(context, + image_service, + image_id, + device, + self.configuration.volume_dd_blocksize, + size=volume['size']) + + def _use_glance_plugin_to_copy_image_to_volume(self, context, volume, + image_service, image_id): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + + api_servers = glance.get_api_servers() + glance_server = api_servers.next() + auth_token = context.auth_token + + overwrite_result = self.nfs_ops.use_glance_plugin_to_overwrite_volume( + CONF.xenapi_nfs_server, + CONF.xenapi_nfs_serverpath, + sr_uuid, + vdi_uuid, + glance_server, + image_id, + auth_token, + CONF.xenapi_sr_base_path) + + if overwrite_result is False: + raise exception.ImageCopyFailure(reason='Overwriting volume ' + 'failed.') + + self.nfs_ops.resize_volume( + CONF.xenapi_nfs_server, + CONF.xenapi_nfs_serverpath, + sr_uuid, + vdi_uuid, + volume['size']) + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + if image_utils.is_xenserver_format(image_meta): + return self._use_glance_plugin_to_upload_volume( + context, volume, image_service, image_meta) + + return self._use_image_utils_to_upload_volume( + context, volume, image_service, image_meta) + + def _use_image_utils_to_upload_volume(self, context, volume, image_service, + image_meta): + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + with self.nfs_ops.volume_attached_here(CONF.xenapi_nfs_server, + CONF.xenapi_nfs_serverpath, + sr_uuid, vdi_uuid, + True) as device: + image_utils.upload_volume(context, + image_service, + image_meta, + device) + + def _use_glance_plugin_to_upload_volume(self, context, volume, + image_service, image_meta): + image_id = image_meta['id'] + + sr_uuid, vdi_uuid = volume['provider_location'].split('/') + + api_servers = glance.get_api_servers() + glance_server = api_servers.next() + auth_token = context.auth_token + + self.nfs_ops.use_glance_plugin_to_upload_volume( + CONF.xenapi_nfs_server, + CONF.xenapi_nfs_serverpath, + sr_uuid, + vdi_uuid, + glance_server, + image_id, + auth_token, + CONF.xenapi_sr_base_path) + + def get_volume_stats(self, refresh=False): + if refresh or not self._stats: + data = {} + + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or 'XenAPINFS', + data['vendor_name'] = 'Open Source', + data['driver_version'] = self.VERSION + data['storage_protocol'] = 'xensm' + data['total_capacity_gb'] = 'unknown' + data['free_capacity_gb'] = 'unknown' + data['reserved_percentage'] = 0 + self._stats = data + + return self._stats + + def backup_volume(self, context, backup, backup_service): + """Create a new backup from an existing volume.""" + raise NotImplementedError() + + def restore_backup(self, context, backup, volume, backup_service): + """Restore an existing backup to a new or existing volume.""" + raise NotImplementedError() diff --git a/cinder/volume/drivers/xenapi/tools.py b/cinder/volume/drivers/xenapi/tools.py new file mode 100644 index 0000000000..d452fbfa77 --- /dev/null +++ b/cinder/volume/drivers/xenapi/tools.py @@ -0,0 +1,7 @@ +def _stripped_first_line_of(filename): + with open(filename, 'rb') as f: + return f.readline().strip() + + +def get_this_vm_uuid(): + return _stripped_first_line_of('/sys/hypervisor/uuid') diff --git a/cinder/volume/drivers/xiv_ds8k.py b/cinder/volume/drivers/xiv_ds8k.py new file mode 100644 index 0000000000..af50e22aca --- /dev/null +++ b/cinder/volume/drivers/xiv_ds8k.py @@ -0,0 +1,155 @@ +# Copyright 2013 IBM Corp. +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Erik Zaadi +# Avishay Traeger + +""" +Unified Volume driver for IBM XIV and DS8K Storage Systems. +""" + +from oslo.config import cfg + +from cinder import exception +from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.volume.drivers.san import san + +xiv_ds8k_opts = [ + cfg.StrOpt( + 'xiv_ds8k_proxy', + default='xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy', + help='Proxy driver that connects to the IBM Storage Array'), + cfg.StrOpt( + 'xiv_ds8k_connection_type', + default='iscsi', + help='Connection type to the IBM Storage Array' + ' (fibre_channel|iscsi)'), +] + +CONF = cfg.CONF +CONF.register_opts(xiv_ds8k_opts) + +LOG = logging.getLogger(__name__) + + +class XIVDS8KDriver(san.SanDriver): + """Unified IBM XIV and DS8K volume driver.""" + + def __init__(self, *args, **kwargs): + """Initialize the driver.""" + + super(XIVDS8KDriver, self).__init__(*args, **kwargs) + + self.configuration.append_config_values(xiv_ds8k_opts) + + proxy = importutils.import_class(self.configuration.xiv_ds8k_proxy) + + #NOTE: All Array specific configurations are prefixed with: + #"xiv_ds8k_array_" + #These additional flags should be specified in the cinder.conf + #preferably in each backend configuration. + + self.xiv_ds8k_proxy = proxy( + { + "xiv_ds8k_user": self.configuration.san_login, + "xiv_ds8k_pass": self.configuration.san_password, + "xiv_ds8k_address": self.configuration.san_ip, + "xiv_ds8k_vol_pool": self.configuration.san_clustername, + "xiv_ds8k_connection_type": + self.configuration.xiv_ds8k_connection_type + }, + LOG, + exception, + driver=self) + + def do_setup(self, context): + """Setup and verify IBM XIV and DS8K Storage connection.""" + + self.xiv_ds8k_proxy.setup(context) + + def ensure_export(self, context, volume): + """Ensure an export.""" + + return self.xiv_ds8k_proxy.ensure_export(context, volume) + + def create_export(self, context, volume): + """Create an export.""" + + return self.xiv_ds8k_proxy.create_export(context, volume) + + def create_volume(self, volume): + """Create a volume on the IBM XIV and DS8K Storage system.""" + + return self.xiv_ds8k_proxy.create_volume(volume) + + def delete_volume(self, volume): + """Delete a volume on the IBM XIV and DS8K Storage system.""" + + self.xiv_ds8k_proxy.delete_volume(volume) + + def remove_export(self, context, volume): + """Disconnect a volume from an attached instance.""" + + return self.xiv_ds8k_proxy.remove_export(context, volume) + + def initialize_connection(self, volume, connector): + """Map the created volume.""" + + return self.xiv_ds8k_proxy.initialize_connection(volume, connector) + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate a connection to a volume.""" + + return self.xiv_ds8k_proxy.terminate_connection(volume, connector) + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot.""" + + return self.xiv_ds8k_proxy.create_volume_from_snapshot( + volume, + snapshot) + + def create_snapshot(self, snapshot): + """Create a snapshot.""" + + return self.xiv_ds8k_proxy.create_snapshot(snapshot) + + def delete_snapshot(self, snapshot): + """Delete a snapshot.""" + + return self.xiv_ds8k_proxy.delete_snapshot(snapshot) + + def get_volume_stats(self, refresh=False): + """Get volume stats.""" + + return self.xiv_ds8k_proxy.get_volume_stats(refresh) + + def create_cloned_volume(self, tgt_volume, src_volume): + """Create Cloned Volume.""" + + return self.xiv_ds8k_proxy.create_cloned_volume(tgt_volume, src_volume) + + def extend_volume(self, volume, new_size): + """Extend Created Volume.""" + + self.xiv_ds8k_proxy.extend_volume(volume, new_size) + + def migrate_volume(self, context, volume, host): + """Migrate the volume to the specified host.""" + + return self.xiv_ds8k_proxy.migrate_volume(context, volume, host) diff --git a/cinder/volume/zadara.py b/cinder/volume/drivers/zadara.py old mode 100755 new mode 100644 similarity index 51% rename from cinder/volume/zadara.py rename to cinder/volume/drivers/zadara.py index 03f34ca548..01063caaf9 --- a/cinder/volume/zadara.py +++ b/cinder/volume/drivers/zadara.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2012 Zadara Storage, Inc. -# Copyright (c) 2012 OpenStack LLC. +# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -18,23 +16,20 @@ """ Volume driver for Zadara Virtual Private Storage Array (VPSA). -This driver requires VPSA with API ver.12.06 or higher. +This driver requires VPSA with API ver.13.07 or higher. """ + import httplib +from lxml import etree +from oslo.config import cfg + from cinder import exception -from cinder import flags from cinder.openstack.common import log as logging -from cinder.openstack.common import cfg -from cinder import utils from cinder.volume import driver -from cinder.volume import iscsi - -from lxml import etree - -LOG = logging.getLogger("cinder.volume.driver") +LOG = logging.getLogger(__name__) zadara_opts = [ cfg.StrOpt('zadara_vpsa_ip', @@ -44,55 +39,45 @@ default=None, help='Zadara VPSA port number'), cfg.BoolOpt('zadara_vpsa_use_ssl', - default=False, - help='Use SSL connection'), + default=False, + help='Use SSL connection'), cfg.StrOpt('zadara_user', default=None, help='User name for the VPSA'), cfg.StrOpt('zadara_password', default=None, - help='Password for the VPSA'), + help='Password for the VPSA', + secret=True), cfg.StrOpt('zadara_vpsa_poolname', default=None, help='Name of VPSA storage pool for volumes'), - cfg.StrOpt('zadara_default_cache_policy', - default='write-through', - help='Default cache policy for volumes'), - cfg.StrOpt('zadara_default_encryption', - default='NO', - help='Default encryption policy for volumes'), - cfg.StrOpt('zadara_default_striping_mode', - default='simple', - help='Default striping mode for volumes'), - cfg.StrOpt('zadara_default_stripesize', - default='64', - help='Default stripe size for volumes'), + cfg.BoolOpt('zadara_vol_thin', + default=True, + help='Default thin provisioning policy for volumes'), + cfg.BoolOpt('zadara_vol_encrypt', + default=False, + help='Default encryption policy for volumes'), cfg.StrOpt('zadara_vol_name_template', default='OS_%s', help='Default template for VPSA volume names'), cfg.BoolOpt('zadara_vpsa_auto_detach_on_delete', - default=True, - help="Automatically detach from servers on volume delete"), + default=True, + help="Automatically detach from servers on volume delete"), cfg.BoolOpt('zadara_vpsa_allow_nonexistent_delete', - default=True, - help="Don't halt on deletion of non-existing volumes"), - ] + default=True, + help="Don't halt on deletion of non-existing volumes"), ] -FLAGS = flags.FLAGS -FLAGS.register_opts(zadara_opts) +CONF = cfg.CONF +CONF.register_opts(zadara_opts) class ZadaraVPSAConnection(object): """Executes volume driver commands on VPSA.""" - def __init__(self, host, port, ssl, user, password): - self.host = host - self.port = port - self.use_ssl = ssl - self.user = user - self.password = password + def __init__(self, conf): + self.conf = conf self.access_key = None self.ensure_connection() @@ -111,25 +96,49 @@ def _joined_params(params): vpsa_commands = { 'login': ('POST', '/api/users/login.xml', - {'user': self.user, - 'password': self.password}), + {'user': self.conf.zadara_user, + 'password': self.conf.zadara_password}), # Volume operations 'create_volume': ('POST', '/api/volumes.xml', - {'display_name': kwargs.get('name'), - 'virtual_capacity': kwargs.get('size'), - 'raid_group_name[]': FLAGS.zadara_vpsa_poolname, - 'quantity': 1, - 'cache': FLAGS.zadara_default_cache_policy, - 'crypt': FLAGS.zadara_default_encryption, - 'mode': FLAGS.zadara_default_striping_mode, - 'stripesize': FLAGS.zadara_default_stripesize, - 'force': 'NO'}), + {'name': kwargs.get('name'), + 'capacity': kwargs.get('size'), + 'pool': self.conf.zadara_vpsa_poolname, + 'thin': 'YES' + if self.conf.zadara_vol_thin else 'NO', + 'crypt': 'YES' + if self.conf.zadara_vol_encrypt else 'NO'}), 'delete_volume': ('DELETE', '/api/volumes/%s.xml' % kwargs.get('vpsa_vol'), {}), + 'expand_volume': ('POST', + '/api/volumes/%s/expand.xml' + % kwargs.get('vpsa_vol'), + {'capacity': kwargs.get('size')}), + + # Snapshot operations + 'create_snapshot': ('POST', + '/api/consistency_groups/%s/snapshots.xml' + % kwargs.get('cg_name'), + {'display_name': kwargs.get('snap_name')}), + 'delete_snapshot': ('DELETE', + '/api/snapshots/%s.xml' + % kwargs.get('snap_id'), + {}), + + 'create_clone_from_snap': ('POST', + '/api/consistency_groups/%s/clone.xml' + % kwargs.get('cg_name'), + {'name': kwargs.get('name'), + 'snapshot': kwargs.get('snap_id')}), + + 'create_clone': ('POST', + '/api/consistency_groups/%s/clone.xml' + % kwargs.get('cg_name'), + {'name': kwargs.get('name')}), + # Server operations 'create_server': ('POST', '/api/servers.xml', @@ -139,12 +148,12 @@ def _joined_params(params): # Attach/Detach operations 'attach_volume': ('POST', '/api/servers/%s/volumes.xml' - % kwargs.get('vpsa_srv'), + % kwargs.get('vpsa_srv'), {'volume_name[]': kwargs.get('vpsa_vol'), 'force': 'NO'}), 'detach_volume': ('POST', '/api/volumes/%s/detach.xml' - % kwargs.get('vpsa_vol'), + % kwargs.get('vpsa_vol'), {'server_name[]': kwargs.get('vpsa_srv'), 'force': 'NO'}), @@ -152,6 +161,9 @@ def _joined_params(params): 'list_volumes': ('GET', '/api/volumes.xml', {}), + 'list_pools': ('GET', + '/api/pools.xml', + {}), 'list_controllers': ('GET', '/api/vcontrollers.xml', {}), @@ -160,9 +172,12 @@ def _joined_params(params): {}), 'list_vol_attachments': ('GET', '/api/volumes/%s/servers.xml' - % kwargs.get('vpsa_vol'), + % kwargs.get('vpsa_vol'), {}), - } + 'list_vol_snapshots': ('GET', + '/api/consistency_groups/%s/snapshots.xml' + % kwargs.get('cg_name'), + {})} if cmd not in vpsa_commands.keys(): raise exception.UnknownCmd(cmd=cmd) @@ -203,12 +218,12 @@ def ensure_connection(self, cmd=None): user = xml_tree.find('user') if user is None: raise exception.MalformedResponse(cmd=cmd, - reason='no "user" field') + reason='no "user" field') access_key = user.findtext('access-key') if access_key is None: raise exception.MalformedResponse(cmd=cmd, - reason='no "access-key" field') + reason='no "access-key" field') self.access_key = access_key @@ -218,13 +233,15 @@ def send_cmd(self, cmd, **kwargs): self.ensure_connection(cmd) (method, url, body) = self._generate_vpsa_cmd(cmd, **kwargs) - LOG.debug(_('Sending %(method)s to %(url)s. Body "%(body)s"') - % locals()) + LOG.debug(_('Sending %(method)s to %(url)s. Body "%(body)s"'), + {'method': method, 'url': url, 'body': body}) - if self.use_ssl: - connection = httplib.HTTPSConnection(self.host, self.port) + if self.conf.zadara_vpsa_use_ssl: + connection = httplib.HTTPSConnection(self.conf.zadara_vpsa_ip, + self.conf.zadara_vpsa_port) else: - connection = httplib.HTTPConnection(self.host, self.port) + connection = httplib.HTTPConnection(self.conf.zadara_vpsa_ip, + self.conf.zadara_vpsa_port) connection.request(method, url, body) response = connection.getresponse() @@ -240,26 +257,25 @@ def send_cmd(self, cmd, **kwargs): raise exception.FailedCmdWithDump(status=status, data=data) if method in ['POST', 'DELETE']: - LOG.debug(_('Operation completed. %(data)s') % locals()) + LOG.debug(_('Operation completed. %(data)s'), {'data': data}) return xml_tree class ZadaraVPSAISCSIDriver(driver.ISCSIDriver): """Zadara VPSA iSCSI volume driver.""" + VERSION = '13.07' + def __init__(self, *args, **kwargs): super(ZadaraVPSAISCSIDriver, self).__init__(*args, **kwargs) + self.configuration.append_config_values(zadara_opts) def do_setup(self, context): - """ - Any initialization the volume driver does while starting. + """Any initialization the volume driver does while starting. + Establishes initial connection with VPSA and retrieves access_key. """ - self.vpsa = ZadaraVPSAConnection(FLAGS.zadara_vpsa_ip, - FLAGS.zadara_vpsa_port, - FLAGS.zadara_vpsa_use_ssl, - FLAGS.zadara_user, - FLAGS.zadara_password) + self.vpsa = ZadaraVPSAConnection(self.configuration) def check_for_setup_error(self): """Returns an error (exception) if prerequisites aren't met.""" @@ -267,14 +283,11 @@ def check_for_setup_error(self): def local_path(self, volume): """Return local path to existing local volume.""" - LOG.error(_("Call to local_path should not happen." - " Verify that use_local_volumes flag is turned off.")) raise NotImplementedError() def _xml_parse_helper(self, xml_tree, first_level, search_tuple, first=True): - """ - Helper for parsing VPSA's XML output. + """Helper for parsing VPSA's XML output. Returns single item if first==True or list for multiple selection. If second argument in search_tuple is None - returns all items with @@ -296,21 +309,62 @@ def _xml_parse_helper(self, xml_tree, first_level, search_tuple, result_list.append(object) return result_list if result_list else None + def _get_vpsa_volume_name_and_size(self, name): + """Return VPSA's name & size for the volume.""" + xml_tree = self.vpsa.send_cmd('list_volumes') + volume = self._xml_parse_helper(xml_tree, 'volumes', + ('display-name', name)) + if volume is not None: + return (volume.findtext('name'), + int(volume.findtext('virtual-capacity'))) + + return (None, None) + def _get_vpsa_volume_name(self, name): """Return VPSA's name for the volume.""" + (vol_name, size) = self._get_vpsa_volume_name_and_size(name) + return vol_name + + def _get_volume_cg_name(self, name): + """Return name of the consistency group for the volume.""" xml_tree = self.vpsa.send_cmd('list_volumes') volume = self._xml_parse_helper(xml_tree, 'volumes', ('display-name', name)) if volume is not None: - return volume.findtext('name') + return volume.findtext('cg-name') + + return None + + def _get_snap_id(self, cg_name, snap_name): + """Return snapshot ID for particular volume.""" + xml_tree = self.vpsa.send_cmd('list_vol_snapshots', + cg_name=cg_name) + snap = self._xml_parse_helper(xml_tree, 'snapshots', + ('display-name', snap_name)) + if snap is not None: + return snap.findtext('name') return None + def _get_pool_capacity(self, pool_name): + """Return pool's total and available capacities.""" + xml_tree = self.vpsa.send_cmd('list_pools') + pool = self._xml_parse_helper(xml_tree, 'pools', + ('name', pool_name)) + if pool is not None: + total = int(pool.findtext('capacity')) + free = int(float(pool.findtext('available-capacity'))) + LOG.debug(_('Pool %(name)s: %(total)sGB total, %(free)sGB free'), + {'name': pool_name, 'total': total, 'free': free}) + return (total, free) + + return ('infinite', 'infinite') + def _get_active_controller_details(self): """Return details of VPSA's active controller.""" xml_tree = self.vpsa.send_cmd('list_controllers') ctrl = self._xml_parse_helper(xml_tree, 'vcontrollers', - ('state', 'active')) + ('state', 'active')) if ctrl is not None: return dict(target=ctrl.findtext('target'), ip=ctrl.findtext('iscsi-ip'), @@ -337,24 +391,24 @@ def _create_vpsa_server(self, initiator): def create_volume(self, volume): """Create volume.""" - self.vpsa.send_cmd('create_volume', - name=FLAGS.zadara_vol_name_template % volume['name'], - size=volume['size']) + self.vpsa.send_cmd( + 'create_volume', + name=self.configuration.zadara_vol_name_template % volume['name'], + size=volume['size']) def delete_volume(self, volume): - """ - Delete volume. + """Delete volume. Return ok if doesn't exist. Auto detach from all servers. """ # Get volume name - name = FLAGS.zadara_vol_name_template % volume['name'] + name = self.configuration.zadara_vol_name_template % volume['name'] vpsa_vol = self._get_vpsa_volume_name(name) if not vpsa_vol: msg = _('Volume %(name)s could not be found. ' - 'It might be already deleted') % locals() + 'It might be already deleted') % {'name': name} LOG.warning(msg) - if FLAGS.zadara_vpsa_allow_nonexistent_delete: + if self.configuration.zadara_vpsa_allow_nonexistent_delete: return else: raise exception.VolumeNotFound(volume_id=name) @@ -363,20 +417,131 @@ def delete_volume(self, volume): xml_tree = self.vpsa.send_cmd('list_vol_attachments', vpsa_vol=vpsa_vol) servers = self._xml_parse_helper(xml_tree, 'servers', - ('iqn', None), first=False) + ('iqn', None), first=False) if servers: - if not FLAGS.zadara_vpsa_auto_detach_on_delete: + if not self.configuration.zadara_vpsa_auto_detach_on_delete: raise exception.VolumeAttached(volume_id=name) for server in servers: vpsa_srv = server.findtext('name') if vpsa_srv: self.vpsa.send_cmd('detach_volume', - vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol) + vpsa_srv=vpsa_srv, + vpsa_vol=vpsa_vol) # Delete volume self.vpsa.send_cmd('delete_volume', vpsa_vol=vpsa_vol) + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + + LOG.debug(_('Create snapshot: %s'), snapshot['name']) + + # Retrieve the CG name for the base volume + volume_name = self.configuration.zadara_vol_name_template\ + % snapshot['volume_name'] + cg_name = self._get_volume_cg_name(volume_name) + if not cg_name: + msg = _('Volume %(name)s not found') % {'name': volume_name} + LOG.error(msg) + raise exception.VolumeNotFound(volume_id=volume_name) + + self.vpsa.send_cmd('create_snapshot', + cg_name=cg_name, + snap_name=snapshot['name']) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + + LOG.debug(_('Delete snapshot: %s'), snapshot['name']) + + # Retrieve the CG name for the base volume + volume_name = self.configuration.zadara_vol_name_template\ + % snapshot['volume_name'] + cg_name = self._get_volume_cg_name(volume_name) + if not cg_name: + # If the volume isn't present, then don't attempt to delete + LOG.warning(_("snapshot: original volume %s not found, " + "skipping delete operation") + % snapshot['volume_name']) + return True + + snap_id = self._get_snap_id(cg_name, snapshot['name']) + if not snap_id: + # If the snapshot isn't present, then don't attempt to delete + LOG.warning(_("snapshot: snapshot %s not found, " + "skipping delete operation") + % snapshot['name']) + return True + + self.vpsa.send_cmd('delete_snapshot', + snap_id=snap_id) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + + LOG.debug(_('Creating volume from snapshot: %s') % snapshot['name']) + + # Retrieve the CG name for the base volume + volume_name = self.configuration.zadara_vol_name_template\ + % snapshot['volume_name'] + cg_name = self._get_volume_cg_name(volume_name) + if not cg_name: + msg = _('Volume %(name)s not found') % {'name': volume_name} + LOG.error(msg) + raise exception.VolumeNotFound(volume_id=volume_name) + + snap_id = self._get_snap_id(cg_name, snapshot['name']) + if not snap_id: + msg = _('Snapshot %(name)s not found') % {'name': snapshot['name']} + LOG.error(msg) + raise exception.VolumeNotFound(volume_id=snapshot['name']) + + self.vpsa.send_cmd('create_clone_from_snap', + cg_name=cg_name, + name=self.configuration.zadara_vol_name_template + % volume['name'], + snap_id=snap_id) + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + + LOG.debug(_('Creating clone of volume: %s') % src_vref['name']) + + # Retrieve the CG name for the base volume + volume_name = self.configuration.zadara_vol_name_template\ + % src_vref['name'] + cg_name = self._get_volume_cg_name(volume_name) + if not cg_name: + msg = _('Volume %(name)s not found') % {'name': volume_name} + LOG.error(msg) + raise exception.VolumeNotFound(volume_id=volume_name) + + self.vpsa.send_cmd('create_clone', + cg_name=cg_name, + name=self.configuration.zadara_vol_name_template + % volume['name']) + + def extend_volume(self, volume, new_size): + """Extend an existing volume.""" + # Get volume name + name = self.configuration.zadara_vol_name_template % volume['name'] + (vpsa_vol, size) = self._get_vpsa_volume_name_and_size(name) + if not vpsa_vol: + msg = _('Volume %(name)s could not be found. ' + 'It might be already deleted') % {'name': name} + LOG.error(msg) + raise exception.VolumeNotFound(volume_id=name) + + if new_size < size: + raise exception.InvalidInput( + reason='%s < current size %s' % (new_size, size)) + + expand_size = new_size - size + self.vpsa.send_cmd('expand_volume', + vpsa_vol=vpsa_vol, + size=expand_size) + def create_export(self, context, volume): """Irrelevant for VPSA volumes. Export created during attachment.""" pass @@ -390,8 +555,7 @@ def remove_export(self, context, volume): pass def initialize_connection(self, volume, connector): - """ - Attach volume to initiator/host. + """Attach volume to initiator/host. During this call VPSA exposes volume to particular Initiator. It also creates a 'server' entity for Initiator (if it was not created before) @@ -407,7 +571,7 @@ def initialize_connection(self, volume, connector): raise exception.ZadaraServerCreateFailure(name=initiator_name) # Get volume name - name = FLAGS.zadara_vol_name_template % volume['name'] + name = self.configuration.zadara_vol_name_template % volume['name'] vpsa_vol = self._get_vpsa_volume_name(name) if not vpsa_vol: raise exception.VolumeNotFound(volume_id=name) @@ -419,7 +583,8 @@ def initialize_connection(self, volume, connector): # Attach volume to server self.vpsa.send_cmd('attach_volume', - vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol) + vpsa_srv=vpsa_srv, + vpsa_vol=vpsa_vol) # Get connection info xml_tree = self.vpsa.send_cmd('list_vol_attachments', @@ -431,8 +596,9 @@ def initialize_connection(self, volume, connector): target = server.findtext('target') lun = server.findtext('lun') if target is None or lun is None: - raise exception.ZadaraInvalidAttachmentInfo(name=name, - reason='target=%s, lun=%s' % (target, lun)) + raise exception.ZadaraInvalidAttachmentInfo( + name=name, + reason='target=%s, lun=%s' % (target, lun)) properties = {} properties['target_discovered'] = False @@ -445,14 +611,13 @@ def initialize_connection(self, volume, connector): properties['auth_username'] = ctrl['chap_user'] properties['auth_password'] = ctrl['chap_passwd'] - LOG.debug(_('Attach properties: %(properties)s') % locals()) + LOG.debug(_('Attach properties: %(properties)s'), + {'properties': properties}) return {'driver_volume_type': 'iscsi', 'data': properties} - def terminate_connection(self, volume, connector): - """ - Detach volume from the initiator. - """ + def terminate_connection(self, volume, connector, **kwargs): + """Detach volume from the initiator.""" # Get server name for IQN initiator_name = connector['initiator'] vpsa_srv = self._get_server_name(initiator_name) @@ -460,20 +625,42 @@ def terminate_connection(self, volume, connector): raise exception.ZadaraServerNotFound(name=initiator_name) # Get volume name - name = FLAGS.zadara_vol_name_template % volume['name'] + name = self.configuration.zadara_vol_name_template % volume['name'] vpsa_vol = self._get_vpsa_volume_name(name) if not vpsa_vol: raise exception.VolumeNotFound(volume_id=name) # Detach volume from server self.vpsa.send_cmd('detach_volume', - vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol) + vpsa_srv=vpsa_srv, + vpsa_vol=vpsa_vol) - def create_volume_from_snapshot(self, volume, snapshot): - raise NotImplementedError() + def get_volume_stats(self, refresh=False): + """Get volume stats. + If 'refresh' is True, run update the stats first. + """ + if refresh: + self._update_volume_stats() - def create_snapshot(self, snapshot): - raise NotImplementedError() + return self._stats - def delete_snapshot(self, snapshot): - raise NotImplementedError() + def _update_volume_stats(self): + """Retrieve stats info from volume group.""" + + LOG.debug(_("Updating volume stats")) + data = {} + + backend_name = self.configuration.safe_get('volume_backend_name') + data["volume_backend_name"] = backend_name or self.__class__.__name__ + data["vendor_name"] = 'Zadara Storage' + data["driver_version"] = self.VERSION + data["storage_protocol"] = 'iSCSI' + data['reserved_percentage'] = self.configuration.reserved_percentage + data['QoS_support'] = False + + (total, free) = self._get_pool_capacity(self.configuration. + zadara_vpsa_poolname) + data['total_capacity_gb'] = total + data['free_capacity_gb'] = free + + self._stats = data diff --git a/cinder/volume/flows/__init__.py b/cinder/volume/flows/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/flows/api/__init__.py b/cinder/volume/flows/api/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/flows/api/create_volume.py b/cinder/volume/flows/api/create_volume.py new file mode 100644 index 0000000000..da985cbc22 --- /dev/null +++ b/cinder/volume/flows/api/create_volume.py @@ -0,0 +1,719 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo.config import cfg +import taskflow.engines +from taskflow.patterns import linear_flow +from taskflow.utils import misc + +from cinder import exception +from cinder import flow_utils +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder import policy +from cinder import quota +from cinder import units +from cinder import utils +from cinder.volume.flows import common +from cinder.volume import volume_types + +LOG = logging.getLogger(__name__) + +ACTION = 'volume:create' +CONF = cfg.CONF +GB = units.GiB +QUOTAS = quota.QUOTAS + +# Only in these 'sources' status can we attempt to create a volume from a +# source volume or a source snapshot, other status states we can not create +# from, 'error' being the common example. +SNAPSHOT_PROCEED_STATUS = ('available',) +SRC_VOL_PROCEED_STATUS = ('available', 'in-use',) + + +class ExtractVolumeRequestTask(flow_utils.CinderTask): + """Processes an api request values into a validated set of values. + + This tasks responsibility is to take in a set of inputs that will form + a potential volume request and validates those values against a set of + conditions and/or translates those values into a valid set and then returns + the validated/translated values for use by other tasks. + + Reversion strategy: N/A + """ + + # This task will produce the following outputs (said outputs can be + # saved to durable storage in the future so that the flow can be + # reconstructed elsewhere and continued). + default_provides = set(['availability_zone', 'size', 'snapshot_id', + 'source_volid', 'volume_type', 'volume_type_id', + 'encryption_key_id']) + + def __init__(self, image_service, az_check_functor=None, **kwargs): + super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION], + **kwargs) + self.image_service = image_service + self.az_check_functor = az_check_functor + if not self.az_check_functor: + self.az_check_functor = lambda az: True + + @staticmethod + def _extract_snapshot(snapshot): + """Extracts the snapshot id from the provided snapshot (if provided). + + This function validates the input snapshot dict and checks that the + status of that snapshot is valid for creating a volume from. + """ + + snapshot_id = None + if snapshot is not None: + if snapshot['status'] not in SNAPSHOT_PROCEED_STATUS: + msg = _("Originating snapshot status must be one" + " of %s values") + msg = msg % (", ".join(SNAPSHOT_PROCEED_STATUS)) + # TODO(harlowja): what happens if the status changes after this + # initial snapshot status check occurs??? Seems like someone + # could delete the snapshot after this check passes but before + # the volume is officially created? + raise exception.InvalidSnapshot(reason=msg) + snapshot_id = snapshot['id'] + return snapshot_id + + @staticmethod + def _extract_source_volume(source_volume): + """Extracts the volume id from the provided volume (if provided). + + This function validates the input source_volume dict and checks that + the status of that source_volume is valid for creating a volume from. + """ + + source_volid = None + if source_volume is not None: + if source_volume['status'] not in SRC_VOL_PROCEED_STATUS: + msg = _("Unable to create a volume from an originating source" + " volume when its status is not one of %s" + " values") + msg = msg % (", ".join(SRC_VOL_PROCEED_STATUS)) + # TODO(harlowja): what happens if the status changes after this + # initial volume status check occurs??? Seems like someone + # could delete the volume after this check passes but before + # the volume is officially created? + raise exception.InvalidVolume(reason=msg) + source_volid = source_volume['id'] + return source_volid + + @staticmethod + def _extract_size(size, source_volume, snapshot): + """Extracts and validates the volume size. + + This function will validate or when not provided fill in the provided + size variable from the source_volume or snapshot and then does + validation on the size that is found and returns said validated size. + """ + + def validate_snap_size(size): + if snapshot and size < snapshot['volume_size']: + msg = _("Volume size %(size)sGB cannot be smaller than" + " the snapshot size %(snap_size)sGB. " + "They must be >= original snapshot size.") + msg = msg % {'size': size, + 'snap_size': snapshot['volume_size']} + raise exception.InvalidInput(reason=msg) + + def validate_source_size(size): + if source_volume and size < source_volume['size']: + msg = _("Volume size %(size)sGB cannot be smaller than " + "original volume size %(source_size)sGB. " + "They must be >= original volume size.") + msg = msg % {'size': size, + 'source_size': source_volume['size']} + raise exception.InvalidInput(reason=msg) + + def validate_int(size): + if not isinstance(size, int) or size <= 0: + msg = _("Volume size %(size)s must be an integer and" + " greater than 0") % {'size': size} + raise exception.InvalidInput(reason=msg) + + # Figure out which validation functions we should be applying + # on the size value that we extract. + validator_functors = [validate_int] + if source_volume: + validator_functors.append(validate_source_size) + elif snapshot: + validator_functors.append(validate_snap_size) + + # If the size is not provided then try to provide it. + if not size and source_volume: + size = source_volume['size'] + elif not size and snapshot: + size = snapshot['volume_size'] + + size = utils.as_int(size) + LOG.debug("Validating volume %(size)s using %(functors)s" % + {'size': size, + 'functors': ", ".join([common.make_pretty_name(func) + for func in validator_functors])}) + for func in validator_functors: + func(size) + return size + + def _check_image_metadata(self, context, image_id, size): + """Checks image existence and validates that the image metadata.""" + + # Check image existence + if not image_id: + return + + # NOTE(harlowja): this should raise an error if the image does not + # exist, this is expected as it signals that the image_id is missing. + image_meta = self.image_service.show(context, image_id) + + # Check image size is not larger than volume size. + image_size = utils.as_int(image_meta['size'], quiet=False) + image_size_in_gb = (image_size + GB - 1) / GB + if image_size_in_gb > size: + msg = _('Size of specified image %(image_size)sGB' + ' is larger than volume size %(volume_size)sGB.') + msg = msg % {'image_size': image_size_in_gb, 'volume_size': size} + raise exception.InvalidInput(reason=msg) + + # Check image min_disk requirement is met for the particular volume + min_disk = image_meta.get('min_disk', 0) + if size < min_disk: + msg = _('Volume size %(volume_size)sGB cannot be smaller' + ' than the image minDisk size %(min_disk)sGB.') + msg = msg % {'volume_size': size, 'min_disk': min_disk} + raise exception.InvalidInput(reason=msg) + + @staticmethod + def _check_metadata_properties(metadata=None): + """Checks that the volume metadata properties are valid.""" + + if not metadata: + metadata = {} + + for (k, v) in metadata.iteritems(): + if len(k) == 0: + msg = _("Metadata property key blank") + LOG.warn(msg) + raise exception.InvalidVolumeMetadata(reason=msg) + if len(k) > 255: + msg = _("Metadata property key %s greater than 255 " + "characters") % k + LOG.warn(msg) + raise exception.InvalidVolumeMetadataSize(reason=msg) + if len(v) > 255: + msg = _("Metadata property key %s value greater than" + " 255 characters") % k + LOG.warn(msg) + raise exception.InvalidVolumeMetadataSize(reason=msg) + + def _extract_availability_zone(self, availability_zone, snapshot, + source_volume): + """Extracts and returns a validated availability zone. + + This function will extract the availability zone (if not provided) from + the snapshot or source_volume and then performs a set of validation + checks on the provided or extracted availability zone and then returns + the validated availability zone. + """ + + # Try to extract the availability zone from the corresponding snapshot + # or source volume if either is valid so that we can be in the same + # availability zone as the source. + if availability_zone is None: + if snapshot: + try: + availability_zone = snapshot['volume']['availability_zone'] + except (TypeError, KeyError): + pass + if source_volume and availability_zone is None: + try: + availability_zone = source_volume['availability_zone'] + except (TypeError, KeyError): + pass + + if availability_zone is None: + if CONF.default_availability_zone: + availability_zone = CONF.default_availability_zone + else: + # For backwards compatibility use the storage_availability_zone + availability_zone = CONF.storage_availability_zone + if not self.az_check_functor(availability_zone): + msg = _("Availability zone '%s' is invalid") % (availability_zone) + LOG.warn(msg) + raise exception.InvalidInput(reason=msg) + + # If the configuration only allows cloning to the same availability + # zone then we need to enforce that. + if CONF.cloned_volume_same_az: + snap_az = None + try: + snap_az = snapshot['volume']['availability_zone'] + except (TypeError, KeyError): + pass + if snap_az and snap_az != availability_zone: + msg = _("Volume must be in the same " + "availability zone as the snapshot") + raise exception.InvalidInput(reason=msg) + source_vol_az = None + try: + source_vol_az = source_volume['availability_zone'] + except (TypeError, KeyError): + pass + if source_vol_az and source_vol_az != availability_zone: + msg = _("Volume must be in the same " + "availability zone as the source volume") + raise exception.InvalidInput(reason=msg) + + return availability_zone + + def _get_encryption_key_id(self, key_manager, context, volume_type_id, + snapshot, source_volume, backup_source_volume): + encryption_key_id = None + if volume_types.is_encrypted(context, volume_type_id): + if snapshot is not None: # creating from snapshot + encryption_key_id = snapshot['encryption_key_id'] + elif source_volume is not None: # cloning volume + encryption_key_id = source_volume['encryption_key_id'] + elif backup_source_volume is not None: # creating from backup + encryption_key_id = backup_source_volume['encryption_key_id'] + + # NOTE(joel-coffman): References to the encryption key should *not* + # be copied because the key is deleted when the volume is deleted. + # Clone the existing key and associate a separate -- but + # identical -- key with each volume. + if encryption_key_id is not None: + encryption_key_id = key_manager.copy_key(context, + encryption_key_id) + else: + encryption_key_id = key_manager.create_key(context) + + return encryption_key_id + + def _get_volume_type_id(self, volume_type, source_volume, snapshot, + backup_source_volume): + volume_type_id = None + if not volume_type and source_volume: + volume_type_id = source_volume['volume_type_id'] + elif snapshot is not None: + if volume_type: + current_volume_type_id = volume_type.get('id') + if (current_volume_type_id != + snapshot['volume_type_id']): + msg = _("Volume type will be changed to " + "be the same as the source volume.") + LOG.warn(msg) + volume_type_id = snapshot['volume_type_id'] + elif backup_source_volume is not None: + volume_type_id = backup_source_volume['volume_type_id'] + else: + volume_type_id = volume_type.get('id') + + return volume_type_id + + def execute(self, context, size, snapshot, image_id, source_volume, + availability_zone, volume_type, metadata, + key_manager, backup_source_volume): + + utils.check_exclusive_options(snapshot=snapshot, + imageRef=image_id, + source_volume=source_volume) + policy.enforce_action(context, ACTION) + + # TODO(harlowja): what guarantee is there that the snapshot or source + # volume will remain available after we do this initial verification?? + snapshot_id = self._extract_snapshot(snapshot) + source_volid = self._extract_source_volume(source_volume) + size = self._extract_size(size, source_volume, snapshot) + + self._check_image_metadata(context, image_id, size) + + availability_zone = self._extract_availability_zone(availability_zone, + snapshot, + source_volume) + + # TODO(joel-coffman): This special handling of snapshots to ensure that + # their volume type matches the source volume is too convoluted. We + # should copy encryption metadata from the encrypted volume type to the + # volume upon creation and propagate that information to each snapshot. + # This strategy avoid any dependency upon the encrypted volume type. + if not volume_type and not source_volume and not snapshot: + volume_type = volume_types.get_default_volume_type() + + volume_type_id = self._get_volume_type_id(volume_type, + source_volume, snapshot, + backup_source_volume) + + encryption_key_id = self._get_encryption_key_id(key_manager, + context, + volume_type_id, + snapshot, + source_volume, + backup_source_volume) + + specs = {} + if volume_type_id: + qos_specs = volume_types.get_volume_type_qos_specs(volume_type_id) + specs = qos_specs['qos_specs'] + if not specs: + # to make sure we don't pass empty dict + specs = None + + self._check_metadata_properties(metadata) + + return { + 'size': size, + 'snapshot_id': snapshot_id, + 'source_volid': source_volid, + 'availability_zone': availability_zone, + 'volume_type': volume_type, + 'volume_type_id': volume_type_id, + 'encryption_key_id': encryption_key_id, + 'qos_specs': specs, + } + + +class EntryCreateTask(flow_utils.CinderTask): + """Creates an entry for the given volume creation in the database. + + Reversion strategy: remove the volume_id created from the database. + """ + + default_provides = set(['volume_properties', 'volume_id', 'volume']) + + def __init__(self, db): + requires = ['availability_zone', 'description', 'metadata', + 'name', 'reservations', 'size', 'snapshot_id', + 'source_volid', 'volume_type_id', 'encryption_key_id'] + super(EntryCreateTask, self).__init__(addons=[ACTION], + requires=requires) + self.db = db + self.provides.update() + + def execute(self, context, **kwargs): + """Creates a database entry for the given inputs and returns details. + + Accesses the database and creates a new entry for the to be created + volume using the given volume properties which are extracted from the + input kwargs (and associated requirements this task needs). These + requirements should be previously satisfied and validated by a + pre-cursor task. + """ + + volume_properties = { + 'size': kwargs.pop('size'), + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': 'creating', + 'attach_status': 'detached', + 'encryption_key_id': kwargs.pop('encryption_key_id'), + # Rename these to the internal name. + 'display_description': kwargs.pop('description'), + 'display_name': kwargs.pop('name'), + } + + # Merge in the other required arguments which should provide the rest + # of the volume property fields (if applicable). + volume_properties.update(kwargs) + volume = self.db.volume_create(context, volume_properties) + + return { + 'volume_id': volume['id'], + 'volume_properties': volume_properties, + # NOTE(harlowja): it appears like further usage of this volume + # result actually depend on it being a sqlalchemy object and not + # just a plain dictionary so that's why we are storing this here. + # + # In the future where this task results can be serialized and + # restored automatically for continued running we will need to + # resolve the serialization & recreation of this object since raw + # sqlalchemy objects can't be serialized. + 'volume': volume, + } + + def revert(self, context, result, **kwargs): + # We never produced a result and therefore can't destroy anything. + if isinstance(result, misc.Failure): + return + if context.quota_committed: + # Committed quota doesn't rollback as the volume has already been + # created at this point, and the quota has already been absorbed. + return + vol_id = result['volume_id'] + try: + self.db.volume_destroy(context.elevated(), vol_id) + except exception.CinderException: + # We are already reverting, therefore we should silence this + # exception since a second exception being active will be bad. + # + # NOTE(harlowja): Being unable to destroy a volume is pretty + # bad though!! + LOG.exception(_("Failed destroying volume entry %s"), vol_id) + + +class QuotaReserveTask(flow_utils.CinderTask): + """Reserves a single volume with the given size & the given volume type. + + Reversion strategy: rollback the quota reservation. + + Warning Warning: if the process that is running this reserve and commit + process fails (or is killed before the quota is rolled back or committed + it does appear like the quota will never be rolled back). This makes + software upgrades hard (inflight operations will need to be stopped or + allowed to complete before the upgrade can occur). *In the future* when + taskflow has persistence built-in this should be easier to correct via + an automated or manual process. + """ + + default_provides = set(['reservations']) + + def __init__(self): + super(QuotaReserveTask, self).__init__(addons=[ACTION]) + + def execute(self, context, size, volume_type_id): + try: + reserve_opts = {'volumes': 1, 'gigabytes': size} + QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id) + reservations = QUOTAS.reserve(context, **reserve_opts) + return { + 'reservations': reservations, + } + except exception.OverQuota as e: + overs = e.kwargs['overs'] + quotas = e.kwargs['quotas'] + usages = e.kwargs['usages'] + + def _consumed(name): + return (usages[name]['reserved'] + usages[name]['in_use']) + + def _is_over(name): + for over in overs: + if name in over: + return True + return False + + if _is_over('gigabytes'): + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "%(s_size)sG volume (%(d_consumed)dG " + "of %(d_quota)dG already consumed)") + LOG.warn(msg % {'s_pid': context.project_id, + 's_size': size, + 'd_consumed': _consumed('gigabytes'), + 'd_quota': quotas['gigabytes']}) + raise exception.VolumeSizeExceedsAvailableQuota( + requested=size, + consumed=_consumed('gigabytes'), + quota=quotas['gigabytes']) + elif _is_over('volumes'): + msg = _("Quota exceeded for %(s_pid)s, tried to create " + "volume (%(d_consumed)d volumes " + "already consumed)") + LOG.warn(msg % {'s_pid': context.project_id, + 'd_consumed': _consumed('volumes')}) + raise exception.VolumeLimitExceeded(allowed=quotas['volumes']) + else: + # If nothing was reraised, ensure we reraise the initial error + raise + + def revert(self, context, result, **kwargs): + # We never produced a result and therefore can't destroy anything. + if isinstance(result, misc.Failure): + return + if context.quota_committed: + # The reservations have already been committed and can not be + # rolled back at this point. + return + # We actually produced an output that we can revert so lets attempt + # to use said output to rollback the reservation. + reservations = result['reservations'] + try: + QUOTAS.rollback(context, reservations) + except exception.CinderException: + # We are already reverting, therefore we should silence this + # exception since a second exception being active will be bad. + LOG.exception(_("Failed rolling back quota for" + " %s reservations"), reservations) + + +class QuotaCommitTask(flow_utils.CinderTask): + """Commits the reservation. + + Reversion strategy: N/A (the rollback will be handled by the task that did + the initial reservation (see: QuotaReserveTask). + + Warning Warning: if the process that is running this reserve and commit + process fails (or is killed before the quota is rolled back or committed + it does appear like the quota will never be rolled back). This makes + software upgrades hard (inflight operations will need to be stopped or + allowed to complete before the upgrade can occur). *In the future* when + taskflow has persistence built-in this should be easier to correct via + an automated or manual process. + """ + + def __init__(self): + super(QuotaCommitTask, self).__init__(addons=[ACTION]) + + def execute(self, context, reservations, volume_properties): + QUOTAS.commit(context, reservations) + context.quota_committed = True + return {'volume_properties': volume_properties} + + def revert(self, context, result, **kwargs): + # We never produced a result and therefore can't destroy anything. + if isinstance(result, misc.Failure): + return + volume = result['volume_properties'] + try: + reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']} + QUOTAS.add_volume_type_opts(context, + reserve_opts, + volume['volume_type_id']) + reservations = QUOTAS.reserve(context, + project_id=context.project_id, + **reserve_opts) + if reservations: + QUOTAS.commit(context, reservations, + project_id=context.project_id) + except Exception: + LOG.exception(_("Failed to update quota for deleting volume: %s"), + volume['id']) + + +class VolumeCastTask(flow_utils.CinderTask): + """Performs a volume create cast to the scheduler or to the volume manager. + + This which will signal a transition of the api workflow to another child + and/or related workflow on another component. + + Reversion strategy: N/A + """ + + def __init__(self, scheduler_rpcapi, volume_rpcapi, db): + requires = ['image_id', 'scheduler_hints', 'snapshot_id', + 'source_volid', 'volume_id', 'volume_type', + 'volume_properties'] + super(VolumeCastTask, self).__init__(addons=[ACTION], + requires=requires) + self.volume_rpcapi = volume_rpcapi + self.scheduler_rpcapi = scheduler_rpcapi + self.db = db + + def _cast_create_volume(self, context, request_spec, filter_properties): + source_volid = request_spec['source_volid'] + volume_id = request_spec['volume_id'] + snapshot_id = request_spec['snapshot_id'] + image_id = request_spec['image_id'] + host = None + + if snapshot_id and CONF.snapshot_same_host: + # NOTE(Rongze Zhu): A simple solution for bug 1008866. + # + # If snapshot_id is set, make the call create volume directly to + # the volume host where the snapshot resides instead of passing it + # through the scheduler. So snapshot can be copy to new volume. + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + source_volume_ref = self.db.volume_get(context, + snapshot_ref['volume_id']) + host = source_volume_ref['host'] + elif source_volid: + source_volume_ref = self.db.volume_get(context, source_volid) + host = source_volume_ref['host'] + + if not host: + # Cast to the scheduler and let it handle whatever is needed + # to select the target host for this volume. + self.scheduler_rpcapi.create_volume( + context, + CONF.volume_topic, + volume_id, + snapshot_id=snapshot_id, + image_id=image_id, + request_spec=request_spec, + filter_properties=filter_properties) + else: + # Bypass the scheduler and send the request directly to the volume + # manager. + now = timeutils.utcnow() + values = {'host': host, 'scheduled_at': now} + volume_ref = self.db.volume_update(context, volume_id, values) + self.volume_rpcapi.create_volume( + context, + volume_ref, + volume_ref['host'], + request_spec, + filter_properties, + allow_reschedule=False, + snapshot_id=snapshot_id, + image_id=image_id, + source_volid=source_volid) + + def execute(self, context, **kwargs): + scheduler_hints = kwargs.pop('scheduler_hints', None) + request_spec = kwargs.copy() + filter_properties = {} + if scheduler_hints: + filter_properties['scheduler_hints'] = scheduler_hints + self._cast_create_volume(context, request_spec, filter_properties) + + def revert(self, context, result, flow_failures, **kwargs): + if isinstance(result, misc.Failure): + return + + # Restore the source volume status and set the volume to error status. + volume_id = kwargs['volume_id'] + common.restore_source_status(context, self.db, kwargs) + common.error_out_volume(context, self.db, volume_id) + LOG.error(_("Volume %s: create failed"), volume_id) + exc_info = False + if all(flow_failures[-1].exc_info): + exc_info = flow_failures[-1].exc_info + LOG.error(_('Unexpected build error:'), exc_info=exc_info) + + +def get_flow(scheduler_rpcapi, volume_rpcapi, db, + image_service, + az_check_functor, + create_what): + """Constructs and returns the api entrypoint flow. + + This flow will do the following: + + 1. Inject keys & values for dependent tasks. + 2. Extracts and validates the input keys & values. + 3. Reserves the quota (reverts quota on any failures). + 4. Creates the database entry. + 5. Commits the quota. + 6. Casts to volume manager or scheduler for further processing. + """ + + flow_name = ACTION.replace(":", "_") + "_api" + api_flow = linear_flow.Flow(flow_name) + + api_flow.add(ExtractVolumeRequestTask( + image_service, + az_check_functor, + rebind={'size': 'raw_size', + 'availability_zone': 'raw_availability_zone', + 'volume_type': 'raw_volume_type'})) + api_flow.add(QuotaReserveTask(), + EntryCreateTask(db), + QuotaCommitTask()) + + # This will cast it out to either the scheduler or volume manager via + # the rpc apis provided. + api_flow.add(VolumeCastTask(scheduler_rpcapi, volume_rpcapi, db)) + + # Now load (but do not run) the flow using the provided initial data. + return taskflow.engines.load(api_flow, store=create_what) diff --git a/cinder/volume/flows/common.py b/cinder/volume/flows/common.py new file mode 100644 index 0000000000..f5e34d4fe3 --- /dev/null +++ b/cinder/volume/flows/common.py @@ -0,0 +1,92 @@ +# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved. +# Copyright (c) 2013 OpenStack Foundation +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder import exception +from cinder.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +# When a volume errors out we have the ability to save a piece of the exception +# that caused said failure, but we don't want to save the whole message since +# that could be very large, just save up to this number of characters. +REASON_LENGTH = 128 + + +def make_pretty_name(method): + """Makes a pretty name for a function/method.""" + meth_pieces = [method.__name__] + # If its an instance method attempt to tack on the class name + if hasattr(method, 'im_self') and method.im_self is not None: + try: + meth_pieces.insert(0, method.im_self.__class__.__name__) + except AttributeError: + pass + return ".".join(meth_pieces) + + +def restore_source_status(context, db, volume_spec): + # NOTE(harlowja): Only if the type of the volume that was being created is + # the source volume type should we try to reset the source volume status + # back to its original value. + if not volume_spec or volume_spec.get('type') != 'source_vol': + return + source_volid = volume_spec['source_volid'] + source_status = volume_spec['source_volstatus'] + try: + LOG.debug(_('Restoring source %(source_volid)s status to %(status)s') % + {'status': source_status, 'source_volid': source_volid}) + db.volume_update(context, source_volid, {'status': source_status}) + except exception.CinderException: + # NOTE(harlowja): Don't let this cause further exceptions since this is + # a non-critical failure. + LOG.exception(_("Failed setting source volume %(source_volid)s back to" + " its initial %(source_status)s status") % + {'source_status': source_status, + 'source_volid': source_volid}) + + +def error_out_volume(context, db, volume_id, reason=None): + + def _clean_reason(reason): + if reason is None: + return '???' + reason = str(reason) + if len(reason) <= REASON_LENGTH: + return reason + else: + return reason[0:REASON_LENGTH] + '...' + + update = { + 'status': 'error', + } + reason = _clean_reason(reason) + # TODO(harlowja): re-enable when we can support this in the database. + # if reason: + # status['details'] = reason + try: + LOG.debug(_('Updating volume: %(volume_id)s with %(update)s' + ' due to: %(reason)s') % {'volume_id': volume_id, + 'reason': reason, + 'update': update}) + db.volume_update(context, volume_id, update) + except exception.CinderException: + # Don't let this cause further exceptions. + LOG.exception(_("Failed updating volume %(volume_id)s with" + " %(update)s") % {'volume_id': volume_id, + 'update': update}) diff --git a/cinder/volume/flows/manager/__init__.py b/cinder/volume/flows/manager/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py new file mode 100644 index 0000000000..080c16fbb3 --- /dev/null +++ b/cinder/volume/flows/manager/create_volume.py @@ -0,0 +1,734 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import traceback + +from oslo.config import cfg +import taskflow.engines +from taskflow.patterns import linear_flow +from taskflow.utils import misc + +from cinder import exception +from cinder import flow_utils +from cinder.image import glance +from cinder.openstack.common import log as logging +from cinder.openstack.common import processutils +from cinder.openstack.common import timeutils +from cinder import utils +from cinder.volume.flows import common +from cinder.volume import utils as volume_utils + +LOG = logging.getLogger(__name__) + +ACTION = 'volume:create' +CONF = cfg.CONF + +# These attributes we will attempt to save for the volume if they exist +# in the source image metadata. +IMAGE_ATTRIBUTES = ( + 'checksum', + 'container_format', + 'disk_format', + 'min_disk', + 'min_ram', + 'size', +) + + +class OnFailureRescheduleTask(flow_utils.CinderTask): + """Triggers a rescheduling request to be sent when reverting occurs. + + Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets + sent to the scheduler rpc api to allow for an attempt X of Y for scheduling + this volume elsewhere. + """ + + def __init__(self, reschedule_context, db, scheduler_rpcapi): + requires = ['filter_properties', 'image_id', 'request_spec', + 'snapshot_id', 'volume_id', 'context'] + super(OnFailureRescheduleTask, self).__init__(addons=[ACTION], + requires=requires) + self.scheduler_rpcapi = scheduler_rpcapi + self.db = db + self.reschedule_context = reschedule_context + # These exception types will trigger the volume to be set into error + # status rather than being rescheduled. + self.no_reschedule_types = [ + # The volume has already finished being created when the exports + # occur, rescheduling would be bad if it happened due to exports + # not succeeding. + exception.ExportFailure, + # Image copying happens after volume creation so rescheduling due + # to copy failure will mean the same volume will be created at + # another place when it still exists locally. + exception.ImageCopyFailure, + # Metadata updates happen after the volume has been created so if + # they fail, rescheduling will likely attempt to create the volume + # on another machine when it still exists locally. + exception.MetadataCopyFailure, + exception.MetadataCreateFailure, + exception.MetadataUpdateFailure, + # The volume/snapshot has been removed from the database, that + # can not be fixed by rescheduling. + exception.VolumeNotFound, + exception.SnapshotNotFound, + exception.VolumeTypeNotFound, + exception.ImageUnacceptable, + ] + + def execute(self, **kwargs): + pass + + def _reschedule(self, context, cause, request_spec, filter_properties, + snapshot_id, image_id, volume_id, **kwargs): + """Actions that happen during the rescheduling attempt occur here.""" + + create_volume = self.scheduler_rpcapi.create_volume + if not filter_properties: + filter_properties = {} + if 'retry' not in filter_properties: + filter_properties['retry'] = {} + + retry_info = filter_properties['retry'] + num_attempts = retry_info.get('num_attempts', 0) + request_spec['volume_id'] = volume_id + + LOG.debug(_("Volume %(volume_id)s: re-scheduling %(method)s " + "attempt %(num)d due to %(reason)s") % + {'volume_id': volume_id, + 'method': common.make_pretty_name(create_volume), + 'num': num_attempts, + 'reason': cause.exception_str}) + + if all(cause.exc_info): + # Stringify to avoid circular ref problem in json serialization + retry_info['exc'] = traceback.format_exception(*cause.exc_info) + + return create_volume(context, CONF.volume_topic, volume_id, + snapshot_id=snapshot_id, image_id=image_id, + request_spec=request_spec, + filter_properties=filter_properties) + + def _post_reschedule(self, context, volume_id): + """Actions that happen after the rescheduling attempt occur here.""" + + LOG.debug(_("Volume %s: re-scheduled"), volume_id) + + def _pre_reschedule(self, context, volume_id): + """Actions that happen before the rescheduling attempt occur here.""" + + try: + # Reset the volume state. + # + # NOTE(harlowja): this is awkward to be done here, shouldn't + # this happen at the scheduler itself and not before it gets + # sent to the scheduler? (since what happens if it never gets + # there??). It's almost like we need a status of 'on-the-way-to + # scheduler' in the future. + update = { + 'status': 'creating', + 'scheduled_at': timeutils.utcnow(), + } + LOG.debug(_("Updating volume %(volume_id)s with %(update)s.") % + {'update': update, 'volume_id': volume_id}) + self.db.volume_update(context, volume_id, update) + except exception.CinderException: + # Don't let resetting the status cause the rescheduling to fail. + LOG.exception(_("Volume %s: resetting 'creating' status failed."), + volume_id) + + def revert(self, context, result, flow_failures, **kwargs): + # Check if we have a cause which can tell us not to reschedule. + for failure in flow_failures.values(): + if failure.check(*self.no_reschedule_types): + return + + volume_id = kwargs['volume_id'] + # Use a different context when rescheduling. + if self.reschedule_context: + context = self.reschedule_context + try: + cause = list(flow_failures.values())[0] + self._pre_reschedule(context, volume_id) + self._reschedule(context, cause, **kwargs) + self._post_reschedule(context, volume_id) + except exception.CinderException: + LOG.exception(_("Volume %s: rescheduling failed"), volume_id) + + +class ExtractVolumeRefTask(flow_utils.CinderTask): + """Extracts volume reference for given volume id.""" + + default_provides = 'volume_ref' + + def __init__(self, db): + super(ExtractVolumeRefTask, self).__init__(addons=[ACTION]) + self.db = db + + def execute(self, context, volume_id): + # NOTE(harlowja): this will fetch the volume from the database, if + # the volume has been deleted before we got here then this should fail. + # + # In the future we might want to have a lock on the volume_id so that + # the volume can not be deleted while its still being created? + volume_ref = self.db.volume_get(context, volume_id) + + return volume_ref + + def revert(self, context, volume_id, result, **kwargs): + if isinstance(result, misc.Failure): + return + + common.error_out_volume(context, self.db, volume_id) + LOG.error(_("Volume %s: create failed"), volume_id) + + +class ExtractVolumeSpecTask(flow_utils.CinderTask): + """Extracts a spec of a volume to be created into a common structure. + + This task extracts and organizes the input requirements into a common + and easier to analyze structure for later tasks to use. It will also + attach the underlying database volume reference which can be used by + other tasks to reference for further details about the volume to be. + + Reversion strategy: N/A + """ + + default_provides = 'volume_spec' + + def __init__(self, db): + requires = ['image_id', 'snapshot_id', 'source_volid'] + super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION], + requires=requires) + self.db = db + + def execute(self, context, volume_ref, **kwargs): + get_remote_image_service = glance.get_remote_image_service + + volume_name = volume_ref['name'] + volume_size = utils.as_int(volume_ref['size'], quiet=False) + + # Create a dictionary that will represent the volume to be so that + # later tasks can easily switch between the different types and create + # the volume according to the volume types specifications (which are + # represented in this dictionary). + specs = { + 'status': volume_ref['status'], + 'type': 'raw', # This will have the type of the volume to be + # created, which should be one of [raw, snap, + # source_vol, image] + 'volume_id': volume_ref['id'], + 'volume_name': volume_name, + 'volume_size': volume_size, + } + + if kwargs.get('snapshot_id'): + # We are making a snapshot based volume instead of a raw volume. + specs.update({ + 'type': 'snap', + 'snapshot_id': kwargs['snapshot_id'], + }) + elif kwargs.get('source_volid'): + # We are making a source based volume instead of a raw volume. + # + # NOTE(harlowja): This will likely fail if the source volume + # disappeared by the time this call occurred. + source_volid = kwargs['source_volid'] + source_volume_ref = self.db.volume_get(context, source_volid) + specs.update({ + 'source_volid': source_volid, + # This is captured incase we have to revert and we want to set + # back the source volume status to its original status. This + # may or may not be sketchy to do?? + 'source_volstatus': source_volume_ref['status'], + 'type': 'source_vol', + }) + elif kwargs.get('image_id'): + # We are making a image based volume instead of a raw volume. + image_href = kwargs['image_id'] + image_service, image_id = get_remote_image_service(context, + image_href) + specs.update({ + 'type': 'image', + 'image_id': image_id, + 'image_location': image_service.get_location(context, + image_id), + 'image_meta': image_service.show(context, image_id), + # Instead of refetching the image service later just save it. + # + # NOTE(harlowja): if we have to later recover this tasks output + # on another 'node' that this object won't be able to be + # serialized, so we will have to recreate this object on + # demand in the future. + 'image_service': image_service, + }) + + return specs + + def revert(self, context, result, **kwargs): + if isinstance(result, misc.Failure): + return + volume_spec = result.get('volume_spec') + # Restore the source volume status and set the volume to error status. + common.restore_source_status(context, self.db, volume_spec) + + +class NotifyVolumeActionTask(flow_utils.CinderTask): + """Performs a notification about the given volume when called. + + Reversion strategy: N/A + """ + + def __init__(self, db, host, event_suffix): + super(NotifyVolumeActionTask, self).__init__(addons=[ACTION, + event_suffix]) + self.db = db + self.event_suffix = event_suffix + self.host = host + + def execute(self, context, volume_ref): + volume_id = volume_ref['id'] + try: + volume_utils.notify_about_volume_usage(context, volume_ref, + self.event_suffix, + host=self.host) + except exception.CinderException: + # If notification sending of volume database entry reading fails + # then we shouldn't error out the whole workflow since this is + # not always information that must be sent for volumes to operate + LOG.exception(_("Failed notifying about the volume" + " action %(event)s for volume %(volume_id)s") % + {'event': self.event_suffix, + 'volume_id': volume_id}) + + +class CreateVolumeFromSpecTask(flow_utils.CinderTask): + """Creates a volume from a provided specification. + + Reversion strategy: N/A + """ + + default_provides = 'volume' + + def __init__(self, db, host, driver): + super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION]) + self.db = db + self.driver = driver + # This maps the different volume specification types into the methods + # that can create said volume type (aka this is a jump table). + self._create_func_mapping = { + 'raw': self._create_raw_volume, + 'snap': self._create_from_snapshot, + 'source_vol': self._create_from_source_volume, + 'image': self._create_from_image, + } + self.host = host + + def _handle_bootable_volume_glance_meta(self, context, volume_id, + **kwargs): + """Enable bootable flag and properly handle glance metadata. + + Caller should provide one and only one of snapshot_id,source_volid + and image_id. If an image_id specified, a image_meta should also be + provided, otherwise will be treated as an empty dictionary. + """ + + log_template = _("Copying metadata from %(src_type)s %(src_id)s to " + "%(vol_id)s.") + exception_template = _("Failed updating volume %(vol_id)s metadata" + " using the provided %(src_type)s" + " %(src_id)s metadata") + src_type = None + src_id = None + self._enable_bootable_flag(context, volume_id) + try: + if kwargs.get('snapshot_id'): + src_type = 'snapshot' + src_id = kwargs['snapshot_id'] + snapshot_id = src_id + LOG.debug(log_template % {'src_type': src_type, + 'src_id': src_id, + 'vol_id': volume_id}) + self.db.volume_glance_metadata_copy_to_volume( + context, volume_id, snapshot_id) + elif kwargs.get('source_volid'): + src_type = 'source volume' + src_id = kwargs['source_volid'] + source_volid = src_id + LOG.debug(log_template % {'src_type': src_type, + 'src_id': src_id, + 'vol_id': volume_id}) + self.db.volume_glance_metadata_copy_from_volume_to_volume( + context, + source_volid, + volume_id) + elif kwargs.get('image_id'): + src_type = 'image' + src_id = kwargs['image_id'] + image_id = src_id + image_meta = kwargs.get('image_meta', {}) + LOG.debug(log_template % {'src_type': src_type, + 'src_id': src_id, + 'vol_id': volume_id}) + self._capture_volume_image_metadata(context, volume_id, + image_id, image_meta) + except exception.CinderException as ex: + LOG.exception(exception_template % {'src_type': src_type, + 'src_id': src_id, + 'vol_id': volume_id}) + raise exception.MetadataCopyFailure(reason=ex) + + def _create_from_snapshot(self, context, volume_ref, snapshot_id, + **kwargs): + volume_id = volume_ref['id'] + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + model_update = self.driver.create_volume_from_snapshot(volume_ref, + snapshot_ref) + # NOTE(harlowja): Subtasks would be useful here since after this + # point the volume has already been created and further failures + # will not destroy the volume (although they could in the future). + make_bootable = False + try: + originating_vref = self.db.volume_get(context, + snapshot_ref['volume_id']) + make_bootable = originating_vref.bootable + except exception.CinderException as ex: + LOG.exception(_("Failed fetching snapshot %(snapshot_id)s bootable" + " flag using the provided glance snapshot " + "%(snapshot_ref_id)s volume reference") % + {'snapshot_id': snapshot_id, + 'snapshot_ref_id': snapshot_ref['volume_id']}) + raise exception.MetadataUpdateFailure(reason=ex) + if make_bootable: + self._handle_bootable_volume_glance_meta(context, volume_id, + snapshot_id=snapshot_id) + return model_update + + def _enable_bootable_flag(self, context, volume_id): + try: + LOG.debug(_('Marking volume %s as bootable.'), volume_id) + self.db.volume_update(context, volume_id, {'bootable': True}) + except exception.CinderException as ex: + LOG.exception(_("Failed updating volume %(volume_id)s bootable" + " flag to true") % {'volume_id': volume_id}) + raise exception.MetadataUpdateFailure(reason=ex) + + def _create_from_source_volume(self, context, volume_ref, + source_volid, **kwargs): + # NOTE(harlowja): if the source volume has disappeared this will be our + # detection of that since this database call should fail. + # + # NOTE(harlowja): likely this is not the best place for this to happen + # and we should have proper locks on the source volume while actions + # that use the source volume are underway. + srcvol_ref = self.db.volume_get(context, source_volid) + model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref) + # NOTE(harlowja): Subtasks would be useful here since after this + # point the volume has already been created and further failures + # will not destroy the volume (although they could in the future). + if srcvol_ref.bootable: + self._handle_bootable_volume_glance_meta(context, volume_ref['id'], + source_volid=source_volid) + return model_update + + def _copy_image_to_volume(self, context, volume_ref, + image_id, image_location, image_service): + """Downloads Glance image to the specified volume.""" + copy_image_to_volume = self.driver.copy_image_to_volume + volume_id = volume_ref['id'] + LOG.debug(_("Attempting download of %(image_id)s (%(image_location)s)" + " to volume %(volume_id)s.") % + {'image_id': image_id, 'volume_id': volume_id, + 'image_location': image_location}) + try: + copy_image_to_volume(context, volume_ref, image_service, image_id) + except processutils.ProcessExecutionError as ex: + LOG.error(_("Failed to copy image %(image_id)s to volume: " + "%(volume_id)s, error: %(error)s") % + {'volume_id': volume_id, + 'error': ex.stderr, 'image_id': image_id}) + raise exception.ImageCopyFailure(reason=ex.stderr) + except exception.ImageUnacceptable as ex: + LOG.error(_("Failed to copy image to volume: %(volume_id)s, " + "error: %(error)s") % {'volume_id': volume_id, + 'error': ex}) + raise exception.ImageUnacceptable(ex) + except Exception as ex: + LOG.error(_("Failed to copy image %(image_id)s to " + "volume: %(volume_id)s, error: %(error)s") % + {'volume_id': volume_id, 'error': ex, + 'image_id': image_id}) + if not isinstance(ex, exception.ImageCopyFailure): + raise exception.ImageCopyFailure(reason=ex) + else: + raise + + LOG.debug(_("Downloaded image %(image_id)s (%(image_location)s)" + " to volume %(volume_id)s successfully.") % + {'image_id': image_id, 'volume_id': volume_id, + 'image_location': image_location}) + + def _capture_volume_image_metadata(self, context, volume_id, + image_id, image_meta): + + # Save some base attributes into the volume metadata + base_metadata = { + 'image_id': image_id, + } + name = image_meta.get('name', None) + if name: + base_metadata['image_name'] = name + + # Save some more attributes into the volume metadata from the image + # metadata + for key in IMAGE_ATTRIBUTES: + if key not in image_meta: + continue + value = image_meta.get(key, None) + if value is not None: + base_metadata[key] = value + + # Save all the image metadata properties into the volume metadata + property_metadata = {} + image_properties = image_meta.get('properties', {}) + for (key, value) in image_properties.items(): + if value is not None: + property_metadata[key] = value + + # NOTE(harlowja): The best way for this to happen would be in bulk, + # but that doesn't seem to exist (yet), so we go through one by one + # which means we can have partial create/update failure. + volume_metadata = dict(property_metadata) + volume_metadata.update(base_metadata) + LOG.debug(_("Creating volume glance metadata for volume %(volume_id)s" + " backed by image %(image_id)s with: %(vol_metadata)s.") % + {'volume_id': volume_id, 'image_id': image_id, + 'vol_metadata': volume_metadata}) + for (key, value) in volume_metadata.items(): + try: + self.db.volume_glance_metadata_create(context, volume_id, + key, value) + except exception.GlanceMetadataExists: + pass + + def _create_from_image(self, context, volume_ref, + image_location, image_id, image_meta, + image_service, **kwargs): + LOG.debug(_("Cloning %(volume_id)s from image %(image_id)s " + " at location %(image_location)s.") % + {'volume_id': volume_ref['id'], + 'image_location': image_location, 'image_id': image_id}) + # Create the volume from an image. + # + # NOTE (singn): two params need to be returned + # dict containing provider_location for cloned volume + # and clone status. + model_update, cloned = self.driver.clone_image( + volume_ref, image_location, image_id, image_meta) + if not cloned: + # TODO(harlowja): what needs to be rolled back in the clone if this + # volume create fails?? Likely this should be a subflow or broken + # out task in the future. That will bring up the question of how + # do we make said subflow/task which is only triggered in the + # clone image 'path' resumable and revertable in the correct + # manner. + # + # Create the volume and then download the image onto the volume. + model_update = self.driver.create_volume(volume_ref) + updates = dict(model_update or dict(), status='downloading') + try: + volume_ref = self.db.volume_update(context, + volume_ref['id'], updates) + except exception.CinderException: + LOG.exception(_("Failed updating volume %(volume_id)s with " + "%(updates)s") % + {'volume_id': volume_ref['id'], + 'updates': updates}) + self._copy_image_to_volume(context, volume_ref, + image_id, image_location, image_service) + + self._handle_bootable_volume_glance_meta(context, volume_ref['id'], + image_id=image_id, + image_meta=image_meta) + return model_update + + def _create_raw_volume(self, context, volume_ref, **kwargs): + return self.driver.create_volume(volume_ref) + + def execute(self, context, volume_ref, volume_spec): + volume_spec = dict(volume_spec) + volume_id = volume_spec.pop('volume_id', None) + + # we can't do anything if the driver didn't init + if not self.driver.initialized: + driver_name = self.driver.__class__.__name__ + LOG.error(_("Unable to create volume. " + "Volume driver %s not initialized") % driver_name) + # NOTE(flaper87): Set the error status before + # raising any exception. + self.db.volume_update(context, volume_id, dict(status='error')) + raise exception.DriverNotInitialized() + + create_type = volume_spec.pop('type', None) + create_functor = self._create_func_mapping.get(create_type) + if not create_functor: + raise exception.VolumeTypeNotFound(volume_type_id=create_type) + + if not volume_id: + volume_id = volume_ref['id'] + LOG.info(_("Volume %(volume_id)s: being created using %(functor)s " + "with specification: %(volume_spec)s") % + {'volume_spec': volume_spec, 'volume_id': volume_id, + 'functor': common.make_pretty_name(create_functor)}) + + # NOTE(vish): so we don't have to get volume from db again before + # passing it to the driver. + volume_ref['host'] = self.host + + # Call the given functor to make the volume. + model_update = create_functor(context, volume_ref=volume_ref, + **volume_spec) + + # Persist any model information provided on creation. + try: + if model_update: + volume_ref = self.db.volume_update(context, volume_ref['id'], + model_update) + except exception.CinderException as ex: + # If somehow the update failed we want to ensure that the + # failure is logged (but not try rescheduling since the volume at + # this point has been created). + if model_update: + LOG.exception(_("Failed updating model of volume %(volume_id)s" + " with creation provided model %(model)s") % + {'volume_id': volume_id, 'model': model_update}) + raise exception.ExportFailure(reason=ex) + + # Persist any driver exported model information. + model_update = None + try: + LOG.debug(_("Volume %s: creating export"), volume_ref['id']) + model_update = self.driver.create_export(context, volume_ref) + if model_update: + self.db.volume_update(context, volume_ref['id'], model_update) + except exception.CinderException as ex: + # If somehow the read *or* create export failed we want to ensure + # that the failure is logged (but not try rescheduling since + # the volume at this point has been created). + # + # NOTE(harlowja): Notice that since the model_update is initially + # empty, the only way it will still be empty is if there is no + # model_update (which we don't care about) or there was an + # model_update and updating failed. + if model_update: + LOG.exception(_("Failed updating model of volume %(volume_id)s" + " with driver provided model %(model)s") % + {'volume_id': volume_id, 'model': model_update}) + raise exception.ExportFailure(reason=ex) + + return volume_ref + + +class CreateVolumeOnFinishTask(NotifyVolumeActionTask): + """On successful volume creation this will perform final volume actions. + + When a volume is created successfully it is expected that MQ notifications + and database updates will occur to 'signal' to others that the volume is + now ready for usage. This task does those notifications and updates in a + reliable manner (not re-raising exceptions if said actions can not be + triggered). + + Reversion strategy: N/A + """ + + def __init__(self, db, host, event_suffix): + super(CreateVolumeOnFinishTask, self).__init__(db, host, event_suffix) + self.status_translation = { + 'migration_target_creating': 'migration_target', + } + + def execute(self, context, volume, volume_spec): + volume_id = volume['id'] + new_status = self.status_translation.get(volume_spec.get('status'), + 'available') + update = { + 'status': new_status, + 'launched_at': timeutils.utcnow(), + } + try: + # TODO(harlowja): is it acceptable to only log if this fails?? + # or are there other side-effects that this will cause if the + # status isn't updated correctly (aka it will likely be stuck in + # 'building' if this fails)?? + volume_ref = self.db.volume_update(context, volume_id, update) + # Now use the parent to notify. + super(CreateVolumeOnFinishTask, self).execute(context, volume_ref) + except exception.CinderException: + LOG.exception(_("Failed updating volume %(volume_id)s with " + "%(update)s") % {'volume_id': volume_id, + 'update': update}) + # Even if the update fails, the volume is ready. + msg = _("Volume %(volume_name)s (%(volume_id)s): created successfully") + LOG.info(msg % { + 'volume_name': volume_spec['volume_name'], + 'volume_id': volume_id, + }) + + +def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id, + allow_reschedule, reschedule_context, request_spec, + filter_properties, snapshot_id=None, image_id=None, + source_volid=None): + """Constructs and returns the manager entrypoint flow. + + This flow will do the following: + + 1. Determines if rescheduling is enabled (ahead of time). + 2. Inject keys & values for dependent tasks. + 3. Selects 1 of 2 activated only on *failure* tasks (one to update the db + status & notify or one to update the db status & notify & *reschedule*). + 4. Extracts a volume specification from the provided inputs. + 5. Notifies that the volume has start to be created. + 6. Creates a volume from the extracted volume specification. + 7. Attaches a on-success *only* task that notifies that the volume creation + has ended and performs further database status updates. + """ + + flow_name = ACTION.replace(":", "_") + "_manager" + volume_flow = linear_flow.Flow(flow_name) + + # This injects the initial starting flow values into the workflow so that + # the dependency order of the tasks provides/requires can be correctly + # determined. + create_what = { + 'context': context, + 'filter_properties': filter_properties, + 'image_id': image_id, + 'request_spec': request_spec, + 'snapshot_id': snapshot_id, + 'source_volid': source_volid, + 'volume_id': volume_id, + } + + volume_flow.add(ExtractVolumeRefTask(db)) + + if allow_reschedule and request_spec: + volume_flow.add(OnFailureRescheduleTask(reschedule_context, + db, scheduler_rpcapi)) + + volume_flow.add(ExtractVolumeSpecTask(db), + NotifyVolumeActionTask(db, host, "create.start"), + CreateVolumeFromSpecTask(db, host, driver), + CreateVolumeOnFinishTask(db, host, "create.end")) + + # Now load (but do not run) the flow using the provided initial data. + return taskflow.engines.load(volume_flow, store=create_what) diff --git a/cinder/volume/iscsi.py b/cinder/volume/iscsi.py deleted file mode 100644 index cb8d5df723..0000000000 --- a/cinder/volume/iscsi.py +++ /dev/null @@ -1,233 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Helper code for the iSCSI volume driver. - -""" -import os - -from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg -from cinder.openstack.common import log as logging -from cinder import utils - -LOG = logging.getLogger(__name__) - -iscsi_helper_opt = [ - cfg.StrOpt('iscsi_helper', - default='tgtadm', - help='iscsi target user-land tool to use'), - cfg.StrOpt('volumes_dir', - default='$state_path/volumes', - help='Volume configuration file storage directory'), -] - -FLAGS = flags.FLAGS -FLAGS.register_opts(iscsi_helper_opt) - - -class TargetAdmin(object): - """iSCSI target administration. - - Base class for iSCSI target admin helpers. - """ - - def __init__(self, cmd, execute): - self._cmd = cmd - self.set_execute(execute) - - def set_execute(self, execute): - """Set the function to be used to execute commands.""" - self._execute = execute - - def _run(self, *args, **kwargs): - self._execute(self._cmd, *args, run_as_root=True, **kwargs) - - def create_iscsi_target(self, name, tid, lun, path, **kwargs): - """Create a iSCSI target and logical unit""" - raise NotImplementedError() - - def remove_iscsi_target(self, tid, lun, vol_id, **kwargs): - """Remove a iSCSI target and logical unit""" - raise NotImplementedError() - - def _new_target(self, name, tid, **kwargs): - """Create a new iSCSI target.""" - raise NotImplementedError() - - def _delete_target(self, tid, **kwargs): - """Delete a target.""" - raise NotImplementedError() - - def show_target(self, tid, iqn=None, **kwargs): - """Query the given target ID.""" - raise NotImplementedError() - - def _new_logicalunit(self, tid, lun, path, **kwargs): - """Create a new LUN on a target using the supplied path.""" - raise NotImplementedError() - - def _delete_logicalunit(self, tid, lun, **kwargs): - """Delete a logical unit from a target.""" - raise NotImplementedError() - - -class TgtAdm(TargetAdmin): - """iSCSI target administration using tgtadm.""" - - def __init__(self, execute=utils.execute): - super(TgtAdm, self).__init__('tgtadm', execute) - - def _get_target(self, iqn): - (out, err) = self._execute('tgt-admin', '--show', run_as_root=True) - lines = out.split('\n') - for line in lines: - if iqn in line: - parsed = line.split() - tid = parsed[1] - return tid[:-1] - - return None - - def create_iscsi_target(self, name, tid, lun, path, **kwargs): - # Note(jdg) tid and lun aren't used by TgtAdm but remain for - # compatibility - - utils.ensure_tree(FLAGS.volumes_dir) - - vol_id = name.split(':')[1] - volume_conf = """ - - backing-store %s - - """ % (name, path) - - LOG.info(_('Creating volume: %s') % vol_id) - volumes_dir = FLAGS.volumes_dir - volume_path = os.path.join(volumes_dir, vol_id) - - f = open(volume_path, 'w+') - f.write(volume_conf) - f.close() - - try: - (out, err) = self._execute('tgt-admin', - '--update', - name, - run_as_root=True) - except exception.ProcessExecutionError, e: - LOG.error(_("Failed to create iscsi target for volume " - "id:%(vol_id)s.") % locals()) - - #Don't forget to remove the persistent file we created - os.unlink(volume_path) - raise exception.ISCSITargetCreateFailed(volume_id=vol_id) - - iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id) - tid = self._get_target(iqn) - if tid is None: - LOG.error(_("Failed to create iscsi target for volume " - "id:%(vol_id)s. Please ensure your tgtd config file " - "contains 'include %(volumes_dir)s/*'") % locals()) - raise exception.NotFound() - - return tid - - def remove_iscsi_target(self, tid, lun, vol_id, **kwargs): - LOG.info(_('Removing volume: %s') % vol_id) - vol_uuid_file = 'volume-%s' % vol_id - volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file) - if os.path.isfile(volume_path): - iqn = '%s%s' % (FLAGS.iscsi_target_prefix, - vol_uuid_file) - else: - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - try: - self._execute('tgt-admin', - '--delete', - iqn, - run_as_root=True) - except exception.ProcessExecutionError, e: - LOG.error(_("Failed to create iscsi target for volume " - "id:%(vol_id)s.") % locals()) - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - - os.unlink(volume_path) - - def show_target(self, tid, iqn=None, **kwargs): - if iqn is None: - raise exception.InvalidParameterValue( - err=_('valid iqn needed for show_target')) - - tid = self._get_target(iqn) - if tid is None: - raise exception.NotFound() - - -class IetAdm(TargetAdmin): - """iSCSI target administration using ietadm.""" - - def __init__(self, execute=utils.execute): - super(IetAdm, self).__init__('ietadm', execute) - - def create_iscsi_target(self, name, tid, lun, path, **kwargs): - self._new_target(name, tid, **kwargs) - self._new_logicalunit(tid, lun, path, **kwargs) - return tid - - def remove_iscsi_target(self, tid, lun, vol_id, **kwargs): - LOG.info(_('Removing volume: %s') % vol_id) - self._delete_logicalunit(tid, lun, **kwargs) - self._delete_target(tid, **kwargs) - - def _new_target(self, name, tid, **kwargs): - self._run('--op', 'new', - '--tid=%s' % tid, - '--params', 'Name=%s' % name, - **kwargs) - - def _delete_target(self, tid, **kwargs): - self._run('--op', 'delete', - '--tid=%s' % tid, - **kwargs) - - def show_target(self, tid, iqn=None, **kwargs): - self._run('--op', 'show', - '--tid=%s' % tid, - **kwargs) - - def _new_logicalunit(self, tid, lun, path, **kwargs): - self._run('--op', 'new', - '--tid=%s' % tid, - '--lun=%d' % lun, - '--params', 'Path=%s,Type=fileio' % path, - **kwargs) - - def _delete_logicalunit(self, tid, lun, **kwargs): - self._run('--op', 'delete', - '--tid=%s' % tid, - '--lun=%d' % lun, - **kwargs) - - -def get_target_admin(): - if FLAGS.iscsi_helper == 'tgtadm': - return TgtAdm() - else: - return IetAdm() diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index ddf86e6954..100cc422c9 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -30,27 +28,38 @@ :class:`manager.Manager` (default: :class:`cinder.volume.manager.Manager`). :volume_driver: Used by :class:`Manager`. Defaults to - :class:`cinder.volume.driver.ISCSIDriver`. + :class:`cinder.volume.drivers.lvm.LVMISCSIDriver`. :volume_group: Name of the group that will contain exported volumes (default: `cinder-volumes`) :num_shell_tries: Number of times to attempt to run commands (default: 3) """ + +import time + +from oslo.config import cfg + +from cinder import compute from cinder import context from cinder import exception -from cinder import flags from cinder.image import glance -from cinder.openstack.common import log as logging from cinder import manager -from cinder.openstack.common import cfg from cinder.openstack.common import excutils from cinder.openstack.common import importutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import periodic_task from cinder.openstack.common import timeutils +from cinder.openstack.common import uuidutils from cinder import quota from cinder import utils +from cinder.volume.configuration import Configuration +from cinder.volume.flows.manager import create_volume +from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import utils as volume_utils +from cinder.volume import volume_types +from eventlet.greenpool import GreenPool LOG = logging.getLogger(__name__) @@ -58,143 +67,332 @@ volume_manager_opts = [ cfg.StrOpt('volume_driver', - default='cinder.volume.driver.ISCSIDriver', + default='cinder.volume.drivers.lvm.LVMISCSIDriver', help='Driver to use for volume creation'), - cfg.BoolOpt('use_local_volumes', - default=True, - help='if True, will not discover local volumes'), - cfg.BoolOpt('volume_force_update_capabilities', + cfg.IntOpt('migration_create_volume_timeout_secs', + default=300, + help='Timeout for creating the volume to migrate to ' + 'when performing volume migration (seconds)'), + cfg.BoolOpt('volume_service_inithost_offload', default=False, - help='if True will force update capabilities on each check'), - ] - -FLAGS = flags.FLAGS -FLAGS.register_opts(volume_manager_opts) + help='Offload pending volume delete during ' + 'volume service startup'), +] + +CONF = cfg.CONF +CONF.register_opts(volume_manager_opts) + +MAPPING = { + 'cinder.volume.driver.RBDDriver': 'cinder.volume.drivers.rbd.RBDDriver', + 'cinder.volume.driver.SheepdogDriver': + 'cinder.volume.drivers.sheepdog.SheepdogDriver', + 'cinder.volume.nexenta.volume.NexentaDriver': + 'cinder.volume.drivers.nexenta.iscsi.NexentaISCSIDriver', + 'cinder.volume.drivers.nexenta.volume.NexentaDriver': + 'cinder.volume.drivers.nexenta.iscsi.NexentaISCSIDriver', + 'cinder.volume.san.SanISCSIDriver': + 'cinder.volume.drivers.san.san.SanISCSIDriver', + 'cinder.volume.san.SolarisISCSIDriver': + 'cinder.volume.drivers.san.solaris.SolarisISCSIDriver', + 'cinder.volume.san.HpSanISCSIDriver': + 'cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver', + 'cinder.volume.nfs.NfsDriver': + 'cinder.volume.drivers.nfs.NfsDriver', + 'cinder.volume.solidfire.SolidFire': + 'cinder.volume.drivers.solidfire.SolidFireDriver', + 'cinder.volume.drivers.solidfire.SolidFire': + 'cinder.volume.drivers.solidfire.SolidFireDriver', + 'cinder.volume.storwize_svc.StorwizeSVCDriver': + 'cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver', + 'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver': + 'cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver', + 'cinder.volume.windows.WindowsDriver': + 'cinder.volume.drivers.windows.windows.WindowsDriver', + 'cinder.volume.drivers.windows.WindowsDriver': + 'cinder.volume.drivers.windows.windows.WindowsDriver', + 'cinder.volume.xiv.XIVDriver': + 'cinder.volume.drivers.xiv_ds8k.XIVDS8KDriver', + 'cinder.volume.drivers.xiv.XIVDriver': + 'cinder.volume.drivers.xiv_ds8k.XIVDS8KDriver', + 'cinder.volume.zadara.ZadaraVPSAISCSIDriver': + 'cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver', + 'cinder.volume.driver.ISCSIDriver': + 'cinder.volume.drivers.lvm.LVMISCSIDriver', + 'cinder.volume.netapp.NetAppISCSIDriver': + 'cinder.volume.drivers.netapp.common.Deprecated', + 'cinder.volume.drivers.netapp.iscsi.NetAppISCSIDriver': + 'cinder.volume.drivers.netapp.common.Deprecated', + 'cinder.volume.netapp.NetAppCmodeISCSIDriver': + 'cinder.volume.drivers.netapp.common.Deprecated', + 'cinder.volume.drivers.netapp.iscsi.NetAppCmodeISCSIDriver': + 'cinder.volume.drivers.netapp.common.Deprecated', + 'cinder.volume.netapp_nfs.NetAppNFSDriver': + 'cinder.volume.drivers.netapp.common.Deprecated', + 'cinder.volume.drivers.netapp.nfs.NetAppNFSDriver': + 'cinder.volume.drivers.netapp.common.Deprecated', + 'cinder.volume.drivers.netapp.nfs.NetAppCmodeNfsDriver': + 'cinder.volume.drivers.netapp.common.Deprecated', + 'cinder.volume.drivers.huawei.HuaweiISCSIDriver': + 'cinder.volume.drivers.huawei.HuaweiVolumeDriver'} + + +def locked_volume_operation(f): + """Lock decorator for volume operations. + + Takes a named lock prior to executing the operation. The lock is named with + the operation executed and the id of the volume. This lock can then be used + by other operations to avoid operation conflicts on shared volumes. + + Example use: + + If a volume operation uses this decorator, it will block until the named + lock is free. This is used to protect concurrent operations on the same + volume e.g. delete VolA while create volume VolB from VolA is in progress. + """ + def lvo_inner1(inst, context, volume_id, **kwargs): + @utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True) + def lvo_inner2(*_args, **_kwargs): + return f(*_args, **_kwargs) + return lvo_inner2(inst, context, volume_id, **kwargs) + return lvo_inner1 + + +def locked_snapshot_operation(f): + """Lock decorator for snapshot operations. + + Takes a named lock prior to executing the operation. The lock is named with + the operation executed and the id of the snapshot. This lock can then be + used by other operations to avoid operation conflicts on shared snapshots. + + Example use: + + If a snapshot operation uses this decorator, it will block until the named + lock is free. This is used to protect concurrent operations on the same + snapshot e.g. delete SnapA while create volume VolA from SnapA is in + progress. + """ + def lso_inner1(inst, context, snapshot_id, **kwargs): + @utils.synchronized("%s-%s" % (snapshot_id, f.__name__), external=True) + def lso_inner2(*_args, **_kwargs): + return f(*_args, **_kwargs) + return lso_inner2(inst, context, snapshot_id, **kwargs) + return lso_inner1 class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" - def __init__(self, volume_driver=None, *args, **kwargs): + + RPC_API_VERSION = '1.12' + + def __init__(self, volume_driver=None, service_name=None, + *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" - if not volume_driver: - volume_driver = FLAGS.volume_driver - self.driver = importutils.import_object(volume_driver) + # update_service_capabilities needs service_name to be volume super(VolumeManager, self).__init__(service_name='volume', - *args, **kwargs) - # NOTE(vish): Implementation specific db handling is done - # by the driver. - self.driver.db = self.db - self._last_volume_stats = [] + *args, **kwargs) + self.configuration = Configuration(volume_manager_opts, + config_group=service_name) + self._tp = GreenPool() + self.stats = {} + + if not volume_driver: + # Get from configuration, which will get the default + # if its not using the multi backend + volume_driver = self.configuration.volume_driver + if volume_driver in MAPPING: + LOG.warn(_("Driver path %s is deprecated, update your " + "configuration to the new path."), volume_driver) + volume_driver = MAPPING[volume_driver] + if volume_driver == 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver': + # Deprecated in Havana + # Not handled in MAPPING because it requires setting a conf option + LOG.warn(_("ThinLVMVolumeDriver is deprecated, please configure " + "LVMISCSIDriver and lvm_type=thin. Continuing with " + "those settings.")) + volume_driver = 'cinder.volume.drivers.lvm.LVMISCSIDriver' + self.configuration.lvm_type = 'thin' + self.driver = importutils.import_object( + volume_driver, + configuration=self.configuration, + db=self.db) + + def _add_to_threadpool(self, func, *args, **kwargs): + self._tp.spawn_n(func, *args, **kwargs) def init_host(self): """Do any initialization that needs to be run if this is a - standalone service.""" + standalone service. + """ ctxt = context.get_admin_context() - self.driver.do_setup(ctxt) - self.driver.check_for_setup_error() + LOG.info(_("Starting volume driver %(driver_name)s (%(version)s)") % + {'driver_name': self.driver.__class__.__name__, + 'version': self.driver.get_version()}) + try: + self.driver.do_setup(ctxt) + self.driver.check_for_setup_error() + except Exception as ex: + LOG.error(_("Error encountered during " + "initialization of driver: %(name)s") % + {'name': self.driver.__class__.__name__}) + LOG.exception(ex) + # we don't want to continue since we failed + # to initialize the driver correctly. + return volumes = self.db.volume_get_all_by_host(ctxt, self.host) LOG.debug(_("Re-exporting %s volumes"), len(volumes)) - for volume in volumes: - if volume['status'] in ['available', 'in-use']: - self.driver.ensure_export(ctxt, volume) - else: - LOG.info(_("volume %s: skipping export"), volume['name']) + + try: + sum = 0 + self.stats.update({'allocated_capacity_gb': sum}) + for volume in volumes: + if volume['status'] in ['available', 'in-use']: + # calculate allocated capacity for driver + sum += volume['size'] + self.stats['allocated_capacity_gb'] = sum + try: + self.driver.ensure_export(ctxt, volume) + except Exception as export_ex: + LOG.error(_("Failed to re-export volume %s: " + "setting to error state"), volume['id']) + LOG.exception(export_ex) + self.db.volume_update(ctxt, + volume['id'], + {'status': 'error'}) + elif volume['status'] == 'downloading': + LOG.info(_("volume %s stuck in a downloading state"), + volume['id']) + self.driver.clear_download(ctxt, volume) + self.db.volume_update(ctxt, + volume['id'], + {'status': 'error'}) + else: + LOG.info(_("volume %s: skipping export"), volume['id']) + except Exception as ex: + LOG.error(_("Error encountered during " + "re-exporting phase of driver initialization: " + " %(name)s") % + {'name': self.driver.__class__.__name__}) + LOG.exception(ex) + return + + # at this point the driver is considered initialized. + self.driver.set_initialized() LOG.debug(_('Resuming any in progress delete operations')) for volume in volumes: if volume['status'] == 'deleting': LOG.info(_('Resuming delete on volume: %s') % volume['id']) - self.delete_volume(ctxt, volume['id']) + if CONF.volume_service_inithost_offload: + # Offload all the pending volume delete operations to the + # threadpool to prevent the main volume service thread + # from being blocked. + self._add_to_threadpool(self.delete_volume(ctxt, + volume['id'])) + else: + # By default, delete volumes sequentially + self.delete_volume(ctxt, volume['id']) + + # collect and publish service capabilities + self.publish_service_capabilities(ctxt) + + def create_volume(self, context, volume_id, request_spec=None, + filter_properties=None, allow_reschedule=True, + snapshot_id=None, image_id=None, source_volid=None): - def create_volume(self, context, volume_id, snapshot_id=None, - image_id=None): """Creates and exports the volume.""" + context_saved = context.deepcopy() context = context.elevated() - volume_ref = self.db.volume_get(context, volume_id) - self._notify_about_volume_usage(context, volume_ref, "create.start") - LOG.info(_("volume %s: creating"), volume_ref['name']) - - self.db.volume_update(context, - volume_id, - {'host': self.host}) - # NOTE(vish): so we don't have to get volume from db again - # before passing it to the driver. - volume_ref['host'] = self.host - - status = 'available' - model_update = False + if filter_properties is None: + filter_properties = {} try: - vol_name = volume_ref['name'] - vol_size = volume_ref['size'] - LOG.debug(_("volume %(vol_name)s: creating lv of" - " size %(vol_size)sG") % locals()) - if snapshot_id is None and image_id is None: - model_update = self.driver.create_volume(volume_ref) - elif snapshot_id is not None: - snapshot_ref = self.db.snapshot_get(context, snapshot_id) - model_update = self.driver.create_volume_from_snapshot( - volume_ref, - snapshot_ref) - else: - # create the volume from an image - image_service, image_id = \ - glance.get_remote_image_service(context, - image_id) - image_location = image_service.get_location(context, image_id) - cloned = self.driver.clone_image(volume_ref, image_location) - if not cloned: - model_update = self.driver.create_volume(volume_ref) - status = 'downloading' - - if model_update: - self.db.volume_update(context, volume_ref['id'], model_update) - - LOG.debug(_("volume %s: creating export"), volume_ref['name']) - model_update = self.driver.create_export(context, volume_ref) - if model_update: - self.db.volume_update(context, volume_ref['id'], model_update) - + # NOTE(flaper87): Driver initialization is + # verified by the task itself. + flow_engine = create_volume.get_flow( + context, + self.db, + self.driver, + self.scheduler_rpcapi, + self.host, + volume_id, + snapshot_id=snapshot_id, + image_id=image_id, + source_volid=source_volid, + allow_reschedule=allow_reschedule, + reschedule_context=context_saved, + request_spec=request_spec, + filter_properties=filter_properties) except Exception: - with excutils.save_and_reraise_exception(): - self.db.volume_update(context, - volume_ref['id'], {'status': 'error'}) - - now = timeutils.utcnow() - self.db.volume_update(context, - volume_ref['id'], {'status': status, - 'launched_at': now}) - LOG.debug(_("volume %s: created successfully"), volume_ref['name']) - self._reset_stats() - - if image_id and not cloned: - #copy the image onto the volume. - self._copy_image_to_volume(context, volume_ref, image_id) - self._notify_about_volume_usage(context, volume_ref, "create.end") + LOG.exception(_("Failed to create manager volume flow")) + raise exception.CinderException( + _("Failed to create manager volume flow")) + + if snapshot_id is not None: + # Make sure the snapshot is not deleted until we are done with it. + locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot') + elif source_volid is not None: + # Make sure the volume is not deleted until we are done with it. + locked_action = "%s-%s" % (source_volid, 'delete_volume') + else: + locked_action = None + + def _run_flow(): + # This code executes create volume flow. If something goes wrong, + # flow reverts all job that was done and reraises an exception. + # Otherwise, all data that was generated by flow becomes available + # in flow engine's storage. + flow_engine.run() + + @utils.synchronized(locked_action, external=True) + def _run_flow_locked(): + _run_flow() + + if locked_action is None: + _run_flow() + else: + _run_flow_locked() + + # Fetch created volume from storage + volume_ref = flow_engine.storage.fetch('volume') + # Update volume stats + self.stats['allocated_capacity_gb'] += volume_ref['size'] return volume_ref['id'] + @locked_volume_operation def delete_volume(self, context, volume_id): """Deletes and unexports volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) + + if context.project_id != volume_ref['project_id']: + project_id = volume_ref['project_id'] + else: + project_id = context.project_id + + LOG.info(_("volume %s: deleting"), volume_ref['id']) if volume_ref['attach_status'] == "attached": # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume_id) if volume_ref['host'] != self.host: raise exception.InvalidVolume( - reason=_("Volume is not local to this node")) + reason=_("volume is not local to this node")) self._notify_about_volume_usage(context, volume_ref, "delete.start") - self._reset_stats() try: - LOG.debug(_("volume %s: removing export"), volume_ref['name']) + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the volume status updated. + utils.require_driver_initialized(self.driver) + + LOG.debug(_("volume %s: removing export"), volume_ref['id']) self.driver.remove_export(context, volume_ref) - LOG.debug(_("volume %s: deleting"), volume_ref['name']) + LOG.debug(_("volume %s: deleting"), volume_ref['id']) self.driver.delete_volume(volume_ref) except exception.VolumeIsBusy: - LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) + LOG.error(_("Cannot delete volume %s: volume is busy"), + volume_ref['id']) self.driver.ensure_export(context, volume_ref) self.db.volume_update(context, volume_ref['id'], {'status': 'available'}) @@ -205,33 +403,63 @@ def delete_volume(self, context, volume_id): volume_ref['id'], {'status': 'error_deleting'}) + # If deleting the source volume in a migration, we want to skip quotas + # and other database updates. + if volume_ref['migration_status']: + return True + # Get reservations try: - reservations = QUOTAS.reserve(context, volumes=-1, - gigabytes=-volume_ref['size']) + reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']} + QUOTAS.add_volume_type_opts(context, + reserve_opts, + volume_ref.get('volume_type_id')) + reservations = QUOTAS.reserve(context, + project_id=project_id, + **reserve_opts) except Exception: reservations = None LOG.exception(_("Failed to update usages deleting volume")) + # Delete glance metadata if it exists + self.db.volume_glance_metadata_delete_by_volume(context, volume_id) + self.db.volume_destroy(context, volume_id) - LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) + LOG.info(_("volume %s: deleted successfully"), volume_ref['id']) self._notify_about_volume_usage(context, volume_ref, "delete.end") # Commit the reservations if reservations: - QUOTAS.commit(context, reservations) + QUOTAS.commit(context, reservations, project_id=project_id) + + self.stats['allocated_capacity_gb'] -= volume_ref['size'] + self.publish_service_capabilities(context) return True def create_snapshot(self, context, volume_id, snapshot_id): """Creates and exports the snapshot.""" + caller_context = context context = context.elevated() snapshot_ref = self.db.snapshot_get(context, snapshot_id) - LOG.info(_("snapshot %s: creating"), snapshot_ref['name']) + LOG.info(_("snapshot %s: creating"), snapshot_ref['id']) + + self._notify_about_snapshot_usage( + context, snapshot_ref, "create.start") try: - snap_name = snapshot_ref['name'] - LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the snapshot status updated. + utils.require_driver_initialized(self.driver) + + LOG.debug(_("snapshot %(snap_id)s: creating"), + {'snap_id': snapshot_ref['id']}) + + # Pass context so that drivers that want to use it, can, + # but it is not a requirement for all drivers. + snapshot_ref['context'] = caller_context + model_update = self.driver.create_snapshot(snapshot_ref) if model_update: self.db.snapshot_update(context, snapshot_ref['id'], @@ -246,19 +474,51 @@ def create_snapshot(self, context, volume_id, snapshot_id): self.db.snapshot_update(context, snapshot_ref['id'], {'status': 'available', 'progress': '100%'}) - LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name']) + + vol_ref = self.db.volume_get(context, volume_id) + if vol_ref.bootable: + try: + self.db.volume_glance_metadata_copy_to_snapshot( + context, snapshot_ref['id'], volume_id) + except exception.CinderException as ex: + LOG.exception(_("Failed updating %(snapshot_id)s" + " metadata using the provided volumes" + " %(volume_id)s metadata") % + {'volume_id': volume_id, + 'snapshot_id': snapshot_id}) + raise exception.MetadataCopyFailure(reason=ex) + LOG.info(_("snapshot %s: created successfully"), snapshot_ref['id']) + self._notify_about_snapshot_usage(context, snapshot_ref, "create.end") return snapshot_id + @locked_snapshot_operation def delete_snapshot(self, context, snapshot_id): """Deletes and unexports snapshot.""" + caller_context = context context = context.elevated() snapshot_ref = self.db.snapshot_get(context, snapshot_id) + project_id = snapshot_ref['project_id'] + + LOG.info(_("snapshot %s: deleting"), snapshot_ref['id']) + self._notify_about_snapshot_usage( + context, snapshot_ref, "delete.start") try: - LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the snapshot status updated. + utils.require_driver_initialized(self.driver) + + LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id']) + + # Pass context so that drivers that want to use it, can, + # but it is not a requirement for all drivers. + snapshot_ref['context'] = caller_context + self.driver.delete_snapshot(snapshot_ref) except exception.SnapshotIsBusy: - LOG.debug(_("snapshot %s: snapshot is busy"), snapshot_ref['name']) + LOG.error(_("Cannot delete snapshot %s: snapshot is busy"), + snapshot_ref['id']) self.db.snapshot_update(context, snapshot_ref['id'], {'status': 'available'}) @@ -269,39 +529,129 @@ def delete_snapshot(self, context, snapshot_id): snapshot_ref['id'], {'status': 'error_deleting'}) + # Get reservations + try: + if CONF.no_snapshot_gb_quota: + reserve_opts = {'snapshots': -1} + else: + reserve_opts = { + 'snapshots': -1, + 'gigabytes': -snapshot_ref['volume_size'], + } + volume_ref = self.db.volume_get(context, snapshot_ref['volume_id']) + QUOTAS.add_volume_type_opts(context, + reserve_opts, + volume_ref.get('volume_type_id')) + reservations = QUOTAS.reserve(context, + project_id=project_id, + **reserve_opts) + except Exception: + reservations = None + LOG.exception(_("Failed to update usages deleting snapshot")) + self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) self.db.snapshot_destroy(context, snapshot_id) - LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name']) - return True + LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['id']) + self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end") - def attach_volume(self, context, volume_id, instance_uuid, mountpoint): - """Updates db to show volume is attached""" - # TODO(vish): refactor this into a more general "reserve" - # TODO(sleepsonthefloor): Is this 'elevated' appropriate? - if not utils.is_uuid_like(instance_uuid): - raise exception.InvalidUUID(instance_uuid) + # Commit the reservations + if reservations: + QUOTAS.commit(context, reservations, project_id=project_id) + return True - try: - self.driver.attach_volume(context, - volume_id, - instance_uuid, - mountpoint) - except Exception: - with excutils.save_and_reraise_exception(): - self.db.volume_update(context, - volume_id, + def attach_volume(self, context, volume_id, instance_uuid, host_name, + mountpoint, mode): + """Updates db to show volume is attached.""" + @utils.synchronized(volume_id, external=True) + def do_attach(): + # check the volume status before attaching + volume = self.db.volume_get(context, volume_id) + volume_metadata = self.db.volume_admin_metadata_get( + context.elevated(), volume_id) + if volume['status'] == 'attaching': + if (volume['instance_uuid'] and volume['instance_uuid'] != + instance_uuid): + msg = _("being attached by another instance") + raise exception.InvalidVolume(reason=msg) + if (volume['attached_host'] and volume['attached_host'] != + host_name): + msg = _("being attached by another host") + raise exception.InvalidVolume(reason=msg) + if (volume_metadata.get('attached_mode') and + volume_metadata.get('attached_mode') != mode): + msg = _("being attached by different mode") + raise exception.InvalidVolume(reason=msg) + elif volume['status'] != "available": + msg = _("status must be available or attaching") + raise exception.InvalidVolume(reason=msg) + + # TODO(jdg): attach_time column is currently varchar + # we should update this to a date-time object + # also consider adding detach_time? + self._notify_about_volume_usage(context, volume, + "attach.start") + self.db.volume_update(context, volume_id, + {"instance_uuid": instance_uuid, + "attached_host": host_name, + "status": "attaching", + "attach_time": timeutils.strtime()}) + self.db.volume_admin_metadata_update(context.elevated(), + volume_id, + {"attached_mode": mode}, + False) + + if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): + self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) + raise exception.InvalidUUID(uuid=instance_uuid) + + host_name_sanitized = utils.sanitize_hostname( + host_name) if host_name else None - self.db.volume_attached(context.elevated(), - volume_id, - instance_uuid, - mountpoint) + volume = self.db.volume_get(context, volume_id) + + if volume_metadata.get('readonly') == 'True' and mode != 'ro': + self.db.volume_update(context, volume_id, + {'status': 'error_attaching'}) + raise exception.InvalidVolumeAttachMode(mode=mode, + volume_id=volume_id) + try: + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the volume status updated. + utils.require_driver_initialized(self.driver) + + self.driver.attach_volume(context, + volume, + instance_uuid, + host_name_sanitized, + mountpoint) + except Exception: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, volume_id, + {'status': 'error_attaching'}) + + volume = self.db.volume_attached(context.elevated(), + volume_id, + instance_uuid, + host_name_sanitized, + mountpoint) + self._notify_about_volume_usage(context, volume, "attach.end") + return do_attach() def detach_volume(self, context, volume_id): - """Updates db to show volume is detached""" + """Updates db to show volume is detached.""" # TODO(vish): refactor this into a more general "unreserve" # TODO(sleepsonthefloor): Is this 'elevated' appropriate? + + volume = self.db.volume_get(context, volume_id) + self._notify_about_volume_usage(context, volume, "detach.start") try: - self.driver.detach_volume(context, volume_id) + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the volume status updated. + utils.require_driver_initialized(self.driver) + + self.driver.detach_volume(context, volume) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, @@ -309,42 +659,45 @@ def detach_volume(self, context, volume_id): {'status': 'error_detaching'}) self.db.volume_detached(context.elevated(), volume_id) + self.db.volume_admin_metadata_delete(context.elevated(), volume_id, + 'attached_mode') - def _copy_image_to_volume(self, context, volume, image_id): - """Downloads Glance image to the specified volume. """ - volume_id = volume['id'] - payload = {'volume_id': volume_id, 'image_id': image_id} - try: - image_service, image_id = glance.get_remote_image_service(context, - image_id) - self.driver.copy_image_to_volume(context, volume, image_service, - image_id) - LOG.debug(_("Downloaded image %(image_id)s to %(volume_id)s " - "successfully") % locals()) - self.db.volume_update(context, volume_id, - {'status': 'available'}) - except Exception, error: - with excutils.save_and_reraise_exception(): - payload['message'] = unicode(error) - self.db.volume_update(context, volume_id, {'status': 'error'}) + # Check for https://bugs.launchpad.net/cinder/+bug/1065702 + volume = self.db.volume_get(context, volume_id) + if (volume['provider_location'] and + volume['name'] not in volume['provider_location']): + self.driver.ensure_export(context, volume) + self._notify_about_volume_usage(context, volume, "detach.end") + + def copy_volume_to_image(self, context, volume_id, image_meta): + """Uploads the specified volume to Glance. + + image_meta is a dictionary containing the following keys: + 'id', 'container_format', 'disk_format' - def copy_volume_to_image(self, context, volume_id, image_id): - """Uploads the specified volume to Glance.""" - payload = {'volume_id': volume_id, 'image_id': image_id} + """ + payload = {'volume_id': volume_id, 'image_id': image_meta['id']} try: + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the volume status updated. + utils.require_driver_initialized(self.driver) + volume = self.db.volume_get(context, volume_id) self.driver.ensure_export(context.elevated(), volume) - image_service, image_id = glance.get_remote_image_service(context, - image_id) + image_service, image_id = \ + glance.get_remote_image_service(context, image_meta['id']) self.driver.copy_volume_to_image(context, volume, image_service, - image_id) + image_meta) LOG.debug(_("Uploaded volume %(volume_id)s to " - "image (%(image_id)s) successfully") % locals()) - except Exception, error: + "image (%(image_id)s) successfully"), + {'volume_id': volume_id, 'image_id': image_id}) + except Exception as error: with excutils.save_and_reraise_exception(): payload['message'] = unicode(error) finally: - if volume['instance_uuid'] is None: + if (volume['instance_uuid'] is None and + volume['attached_host'] is None): self.db.volume_update(context, volume_id, {'status': 'available'}) else: @@ -388,55 +741,489 @@ def initialize_connection(self, context, volume_id, connector): json in various places, so it should not contain any non-json data types. """ - volume_ref = self.db.volume_get(context, volume_id) - return self.driver.initialize_connection(volume_ref, connector) + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the volume status updated. + utils.require_driver_initialized(self.driver) - def terminate_connection(self, context, volume_id, connector): + volume = self.db.volume_get(context, volume_id) + self.driver.validate_connector(connector) + try: + conn_info = self.driver.initialize_connection(volume, connector) + except Exception as err: + err_msg = (_('Unable to fetch connection information from ' + 'backend: %(err)s') % {'err': str(err)}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + # Add qos_specs to connection info + typeid = volume['volume_type_id'] + specs = None + if typeid: + res = volume_types.get_volume_type_qos_specs(typeid) + qos = res['qos_specs'] + # only pass qos_specs that is designated to be consumed by + # front-end, or both front-end and back-end. + if qos and qos.get('consumer') in ['front-end', 'both']: + specs = qos.get('specs') + + qos_spec = dict(qos_specs=specs) + conn_info['data'].update(qos_spec) + + # Add access_mode to connection info + volume_metadata = self.db.volume_admin_metadata_get(context.elevated(), + volume_id) + if conn_info['data'].get('access_mode') is None: + access_mode = volume_metadata.get('attached_mode') + if access_mode is None: + # NOTE(zhiyan): client didn't call 'os-attach' before + access_mode = ('ro' + if volume_metadata.get('readonly') == 'True' + else 'rw') + conn_info['data']['access_mode'] = access_mode + return conn_info + + def terminate_connection(self, context, volume_id, connector, force=False): """Cleanup connection from host represented by connector. The format of connector is the same as for initialize_connection. """ - volume_ref = self.db.volume_get(context, volume_id) - self.driver.terminate_connection(volume_ref, connector) + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the volume status updated. + utils.require_driver_initialized(self.driver) - def _volume_stats_changed(self, stat1, stat2): - if FLAGS.volume_force_update_capabilities: - return True - if len(stat1) != len(stat2): - return True - for (k, v) in stat1.iteritems(): - if (k, v) not in stat2.iteritems(): - return True - return False + volume_ref = self.db.volume_get(context, volume_id) + try: + self.driver.terminate_connection(volume_ref, + connector, force=force) + except Exception as err: + err_msg = (_('Unable to terminate volume connection: %(err)s') + % {'err': str(err)}) + LOG.error(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def accept_transfer(self, context, volume_id, new_user, new_project): + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the volume status updated. + utils.require_driver_initialized(self.driver) + + # NOTE(jdg): need elevated context as we haven't "given" the vol + # yet + volume_ref = self.db.volume_get(context.elevated(), volume_id) + self.driver.accept_transfer(context, volume_ref, new_user, new_project) + + def _migrate_volume_generic(self, ctxt, volume, host, new_type_id): + rpcapi = volume_rpcapi.VolumeAPI() + + # Create new volume on remote host + new_vol_values = {} + for k, v in volume.iteritems(): + new_vol_values[k] = v + del new_vol_values['id'] + del new_vol_values['_name_id'] + # We don't copy volume_type because the db sets that according to + # volume_type_id, which we do copy + del new_vol_values['volume_type'] + if new_type_id: + new_vol_values['volume_type_id'] = new_type_id + new_vol_values['host'] = host['host'] + new_vol_values['status'] = 'creating' + new_vol_values['migration_status'] = 'target:%s' % volume['id'] + new_vol_values['attach_status'] = 'detached' + new_volume = self.db.volume_create(ctxt, new_vol_values) + rpcapi.create_volume(ctxt, new_volume, host['host'], + None, None, allow_reschedule=False) + + # Wait for new_volume to become ready + starttime = time.time() + deadline = starttime + CONF.migration_create_volume_timeout_secs + new_volume = self.db.volume_get(ctxt, new_volume['id']) + tries = 0 + while new_volume['status'] != 'available': + tries = tries + 1 + now = time.time() + if new_volume['status'] == 'error': + msg = _("failed to create new_volume on destination host") + raise exception.VolumeMigrationFailed(reason=msg) + elif now > deadline: + msg = _("timeout creating new_volume on destination host") + raise exception.VolumeMigrationFailed(reason=msg) + else: + time.sleep(tries ** 2) + new_volume = self.db.volume_get(ctxt, new_volume['id']) - @manager.periodic_task + # Copy the source volume to the destination volume + try: + if (volume['instance_uuid'] is None and + volume['attached_host'] is None): + self.driver.copy_volume_data(ctxt, volume, new_volume, + remote='dest') + # The above call is synchronous so we complete the migration + self.migrate_volume_completion(ctxt, volume['id'], + new_volume['id'], error=False) + else: + nova_api = compute.API() + # This is an async call to Nova, which will call the completion + # when it's done + nova_api.update_server_volume(ctxt, volume['instance_uuid'], + volume['id'], new_volume['id']) + except Exception: + with excutils.save_and_reraise_exception(): + msg = _("Failed to copy volume %(vol1)s to %(vol2)s") + LOG.error(msg % {'vol1': volume['id'], + 'vol2': new_volume['id']}) + volume = self.db.volume_get(ctxt, volume['id']) + # If we're in the completing phase don't delete the target + # because we may have already deleted the source! + if volume['migration_status'] == 'migrating': + rpcapi.delete_volume(ctxt, new_volume) + new_volume['migration_status'] = None + + def _get_original_status(self, volume): + if (volume['instance_uuid'] is None and + volume['attached_host'] is None): + return 'available' + else: + return 'in-use' + + def migrate_volume_completion(self, ctxt, volume_id, new_volume_id, + error=False): + try: + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the migration status updated. + utils.require_driver_initialized(self.driver) + except exception.DriverNotInitialized: + with excutils.save_and_reraise_exception(): + self.db.volume_update(ctxt, volume_id, + {'migration_status': 'error'}) + + msg = _("migrate_volume_completion: completing migration for " + "volume %(vol1)s (temporary volume %(vol2)s") + LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id}) + volume = self.db.volume_get(ctxt, volume_id) + new_volume = self.db.volume_get(ctxt, new_volume_id) + rpcapi = volume_rpcapi.VolumeAPI() + + status_update = None + if volume['status'] == 'retyping': + status_update = {'status': self._get_original_status(volume)} + + if error: + msg = _("migrate_volume_completion is cleaning up an error " + "for volume %(vol1)s (temporary volume %(vol2)s") + LOG.info(msg % {'vol1': volume['id'], + 'vol2': new_volume['id']}) + new_volume['migration_status'] = None + rpcapi.delete_volume(ctxt, new_volume) + updates = {'migration_status': None} + if status_update: + updates.update(status_update) + self.db.volume_update(ctxt, volume_id, updates) + return volume_id + + self.db.volume_update(ctxt, volume_id, + {'migration_status': 'completing'}) + + # Delete the source volume (if it fails, don't fail the migration) + try: + self.delete_volume(ctxt, volume_id) + except Exception as ex: + msg = _("Failed to delete migration source vol %(vol)s: %(err)s") + LOG.error(msg % {'vol': volume_id, 'err': ex}) + + self.db.finish_volume_migration(ctxt, volume_id, new_volume_id) + self.db.volume_destroy(ctxt, new_volume_id) + updates = {'migration_status': None} + if status_update: + updates.update(status_update) + self.db.volume_update(ctxt, volume_id, updates) + return volume['id'] + + def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False, + new_type_id=None): + """Migrate the volume to the specified host (called on source host).""" + try: + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the migration status updated. + utils.require_driver_initialized(self.driver) + except exception.DriverNotInitialized: + with excutils.save_and_reraise_exception(): + self.db.volume_update(ctxt, volume_id, + {'migration_status': 'error'}) + + volume_ref = self.db.volume_get(ctxt, volume_id) + model_update = None + moved = False + + status_update = None + if volume_ref['status'] == 'retyping': + status_update = {'status': self._get_original_status(volume_ref)} + + self.db.volume_update(ctxt, volume_ref['id'], + {'migration_status': 'migrating'}) + if not force_host_copy and new_type_id is None: + try: + LOG.debug(_("volume %s: calling driver migrate_volume"), + volume_ref['id']) + moved, model_update = self.driver.migrate_volume(ctxt, + volume_ref, + host) + if moved: + updates = {'host': host['host'], + 'migration_status': None} + if status_update: + updates.update(status_update) + if model_update: + updates.update(model_update) + volume_ref = self.db.volume_update(ctxt, + volume_ref['id'], + updates) + except Exception: + with excutils.save_and_reraise_exception(): + updates = {'migration_status': None} + if status_update: + updates.update(status_update) + model_update = self.driver.create_export(ctxt, volume_ref) + if model_update: + updates.update(model_update) + self.db.volume_update(ctxt, volume_ref['id'], updates) + if not moved: + try: + self._migrate_volume_generic(ctxt, volume_ref, host, + new_type_id) + except Exception: + with excutils.save_and_reraise_exception(): + updates = {'migration_status': None} + if status_update: + updates.update(status_update) + model_update = self.driver.create_export(ctxt, volume_ref) + if model_update: + updates.update(model_update) + self.db.volume_update(ctxt, volume_ref['id'], updates) + + @periodic_task.periodic_task def _report_driver_status(self, context): - volume_stats = self.driver.get_volume_stats(refresh=True) - if volume_stats: - LOG.info(_("Checking volume capabilities")) - - if self._volume_stats_changed(self._last_volume_stats, - volume_stats): - LOG.info(_("New capabilities found: %s"), volume_stats) - self._last_volume_stats = volume_stats - - # This will grab info about the host and queue it - # to be sent to the Schedulers. - self.update_service_capabilities(self._last_volume_stats) + LOG.info(_("Updating volume status")) + if not self.driver.initialized: + if self.driver.configuration.config_group is None: + config_group = '' else: - # avoid repeating fanouts - self.update_service_capabilities(None) - - def _reset_stats(self): - LOG.info(_("Clear capabilities")) - self._last_volume_stats = [] + config_group = ('(config name %s)' % + self.driver.configuration.config_group) + + LOG.warning(_('Unable to update stats, %(driver_name)s ' + '-%(driver_version)s ' + '%(config_group)s driver is uninitialized.') % + {'driver_name': self.driver.__class__.__name__, + 'driver_version': self.driver.get_version(), + 'config_group': config_group}) + else: + volume_stats = self.driver.get_volume_stats(refresh=True) + if volume_stats: + # Append volume stats with 'allocated_capacity_gb' + volume_stats.update(self.stats) + # queue it to be sent to the Schedulers. + self.update_service_capabilities(volume_stats) + + def publish_service_capabilities(self, context): + """Collect driver status and then publish.""" + self._report_driver_status(context) + self._publish_service_capabilities(context) def notification(self, context, event): LOG.info(_("Notification {%s} received"), event) - self._reset_stats() - def _notify_about_volume_usage(self, context, volume, event_suffix, - extra_usage_info=None): + def _notify_about_volume_usage(self, + context, + volume, + event_suffix, + extra_usage_info=None): volume_utils.notify_about_volume_usage( - context, volume, event_suffix, - extra_usage_info=extra_usage_info, host=self.host) + context, volume, event_suffix, + extra_usage_info=extra_usage_info, host=self.host) + + def _notify_about_snapshot_usage(self, + context, + snapshot, + event_suffix, + extra_usage_info=None): + volume_utils.notify_about_snapshot_usage( + context, snapshot, event_suffix, + extra_usage_info=extra_usage_info, host=self.host) + + def extend_volume(self, context, volume_id, new_size): + try: + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the volume status updated. + utils.require_driver_initialized(self.driver) + except exception.DriverNotInitialized: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, volume_id, + {'status': 'error_extending'}) + + volume = self.db.volume_get(context, volume_id) + size_increase = (int(new_size)) - volume['size'] + + try: + reservations = QUOTAS.reserve(context, gigabytes=+size_increase) + except exception.OverQuota as exc: + self.db.volume_update(context, volume['id'], + {'status': 'error_extending'}) + overs = exc.kwargs['overs'] + usages = exc.kwargs['usages'] + quotas = exc.kwargs['quotas'] + + def _consumed(name): + return (usages[name]['reserved'] + usages[name]['in_use']) + + if 'gigabytes' in overs: + msg = _("Quota exceeded for %(s_pid)s, " + "tried to extend volume by " + "%(s_size)sG, (%(d_consumed)dG of %(d_quota)dG " + "already consumed)") + LOG.error(msg % {'s_pid': context.project_id, + 's_size': size_increase, + 'd_consumed': _consumed('gigabytes'), + 'd_quota': quotas['gigabytes']}) + return + + self._notify_about_volume_usage(context, volume, "resize.start") + try: + LOG.info(_("volume %s: extending"), volume['id']) + self.driver.extend_volume(volume, new_size) + LOG.info(_("volume %s: extended successfully"), volume['id']) + except Exception: + LOG.exception(_("volume %s: Error trying to extend volume"), + volume_id) + try: + self.db.volume_update(context, volume['id'], + {'status': 'error_extending'}) + finally: + QUOTAS.rollback(context, reservations) + return + + QUOTAS.commit(context, reservations) + self.db.volume_update(context, volume['id'], {'size': int(new_size), + 'status': 'available'}) + self.stats['allocated_capacity_gb'] += size_increase + self._notify_about_volume_usage( + context, volume, "resize.end", + extra_usage_info={'size': int(new_size)}) + + def retype(self, ctxt, volume_id, new_type_id, host, + migration_policy='never', reservations=None): + + def _retype_error(context, volume_id, old_reservations, + new_reservations, status_update): + try: + self.db.volume_update(context, volume_id, status_update) + finally: + QUOTAS.rollback(context, old_reservations) + QUOTAS.rollback(context, new_reservations) + + context = ctxt.elevated() + + volume_ref = self.db.volume_get(ctxt, volume_id) + status_update = {'status': self._get_original_status(volume_ref)} + if context.project_id != volume_ref['project_id']: + project_id = volume_ref['project_id'] + else: + project_id = context.project_id + + try: + # NOTE(flaper87): Verify the driver is enabled + # before going forward. The exception will be caught + # and the volume status updated. + utils.require_driver_initialized(self.driver) + except exception.DriverNotInitialized: + with excutils.save_and_reraise_exception(): + # NOTE(flaper87): Other exceptions in this method don't + # set the volume status to error. Should that be done + # here? Setting the volume back to it's original status + # for now. + self.db.volume_update(context, volume_id, status_update) + + # Get old reservations + try: + reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']} + QUOTAS.add_volume_type_opts(context, + reserve_opts, + volume_ref.get('volume_type_id')) + old_reservations = QUOTAS.reserve(context, + project_id=project_id, + **reserve_opts) + except Exception: + old_reservations = None + self.db.volume_update(context, volume_id, status_update) + LOG.exception(_("Failed to update usages while retyping volume.")) + raise exception.CinderException(_("Failed to get old volume type" + " quota reservations")) + + # We already got the new reservations + new_reservations = reservations + + # If volume types have the same contents, no need to do anything + retyped = False + diff, all_equal = volume_types.volume_types_diff( + context, volume_ref.get('volume_type_id'), new_type_id) + if all_equal: + retyped = True + + # Call driver to try and change the type + if not retyped: + try: + new_type = volume_types.get_volume_type(context, new_type_id) + retyped = self.driver.retype(context, volume_ref, new_type, + diff, host) + if retyped: + LOG.info(_("Volume %s: retyped succesfully"), volume_id) + except Exception as ex: + retyped = False + LOG.error(_("Volume %s: driver error when trying to retype, " + "falling back to generic mechanism."), + volume_ref['id']) + LOG.exception(ex) + + # We could not change the type, so we need to migrate the volume, where + # the destination volume will be of the new type + if not retyped: + if migration_policy == 'never': + _retype_error(context, volume_id, old_reservations, + new_reservations, status_update) + msg = _("Retype requires migration but is not allowed.") + raise exception.VolumeMigrationFailed(reason=msg) + + snaps = self.db.snapshot_get_all_for_volume(context, + volume_ref['id']) + if snaps: + _retype_error(context, volume_id, old_reservations, + new_reservations, status_update) + msg = _("Volume must not have snapshots.") + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + self.db.volume_update(context, volume_ref['id'], + {'migration_status': 'starting'}) + + try: + self.migrate_volume(context, volume_id, host, + new_type_id=new_type_id) + except Exception: + with excutils.save_and_reraise_exception(): + _retype_error(context, volume_id, old_reservations, + new_reservations, status_update) + + self.db.volume_update(context, volume_id, + {'volume_type_id': new_type_id, + 'host': host['host'], + 'status': status_update['status']}) + + if old_reservations: + QUOTAS.commit(context, old_reservations, project_id=project_id) + if new_reservations: + QUOTAS.commit(context, new_reservations, project_id=project_id) + self.publish_service_capabilities(context) diff --git a/cinder/volume/netapp.py b/cinder/volume/netapp.py deleted file mode 100644 index c6f1fbf1d0..0000000000 --- a/cinder/volume/netapp.py +++ /dev/null @@ -1,1285 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 NetApp, Inc. -# Copyright (c) 2012 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for NetApp storage systems. - -This driver requires NetApp OnCommand 5.0 and one or more Data -ONTAP 7-mode storage systems with installed iSCSI licenses. - -""" - -import time - -import suds -from suds import client -from suds.sax import text - -from cinder import exception -from cinder import flags -from cinder.openstack.common import log as logging -from cinder.openstack.common import cfg -from cinder.volume import driver -from cinder.volume import volume_types - -LOG = logging.getLogger("cinder.volume.driver") - -netapp_opts = [ - cfg.StrOpt('netapp_wsdl_url', - default=None, - help='URL of the WSDL file for the DFM server'), - cfg.StrOpt('netapp_login', - default=None, - help='User name for the DFM server'), - cfg.StrOpt('netapp_password', - default=None, - help='Password for the DFM server'), - cfg.StrOpt('netapp_server_hostname', - default=None, - help='Hostname for the DFM server'), - cfg.IntOpt('netapp_server_port', - default=8088, - help='Port number for the DFM server'), - cfg.StrOpt('netapp_storage_service', - default=None, - help=('Storage service to use for provisioning ' - '(when volume_type=None)')), - cfg.StrOpt('netapp_storage_service_prefix', - default=None, - help=('Prefix of storage service name to use for ' - 'provisioning (volume_type name will be appended)')), - cfg.StrOpt('netapp_vfiler', - default=None, - help='Vfiler to use for provisioning'), - ] - -FLAGS = flags.FLAGS -FLAGS.register_opts(netapp_opts) - - -class DfmDataset(object): - def __init__(self, id, name, project, type): - self.id = id - self.name = name - self.project = project - self.type = type - - -class DfmLun(object): - def __init__(self, dataset, lunpath, id): - self.dataset = dataset - self.lunpath = lunpath - self.id = id - - -class NetAppISCSIDriver(driver.ISCSIDriver): - """NetApp iSCSI volume driver.""" - - IGROUP_PREFIX = 'openstack-' - DATASET_PREFIX = 'OpenStack_' - DATASET_METADATA_PROJECT_KEY = 'OpenStackProject' - DATASET_METADATA_VOL_TYPE_KEY = 'OpenStackVolType' - - def __init__(self, *args, **kwargs): - super(NetAppISCSIDriver, self).__init__(*args, **kwargs) - self.discovered_luns = [] - self.discovered_datasets = [] - self.lun_table = {} - - def _check_fail(self, request, response): - """Utility routine to handle checking ZAPI failures.""" - if 'failed' == response.Status: - name = request.Name - reason = response.Reason - msg = _('API %(name)s failed: %(reason)s') - raise exception.VolumeBackendAPIException(data=msg % locals()) - - def _create_client(self, **kwargs): - """Instantiate a web services client. - - This method creates a "suds" client to make web services calls to the - DFM server. Note that the WSDL file is quite large and may take - a few seconds to parse. - """ - wsdl_url = kwargs['wsdl_url'] - LOG.debug(_('Using WSDL: %s') % wsdl_url) - if kwargs['cache']: - self.client = client.Client(wsdl_url, username=kwargs['login'], - password=kwargs['password']) - else: - self.client = client.Client(wsdl_url, username=kwargs['login'], - password=kwargs['password'], - cache=None) - soap_url = 'http://%s:%s/apis/soap/v1' % (kwargs['hostname'], - kwargs['port']) - LOG.debug(_('Using DFM server: %s') % soap_url) - self.client.set_options(location=soap_url) - - def _set_storage_service(self, storage_service): - """Set the storage service to use for provisioning.""" - LOG.debug(_('Using storage service: %s') % storage_service) - self.storage_service = storage_service - - def _set_storage_service_prefix(self, storage_service_prefix): - """Set the storage service prefix to use for provisioning.""" - LOG.debug(_('Using storage service prefix: %s') % - storage_service_prefix) - self.storage_service_prefix = storage_service_prefix - - def _set_vfiler(self, vfiler): - """Set the vfiler to use for provisioning.""" - LOG.debug(_('Using vfiler: %s') % vfiler) - self.vfiler = vfiler - - def _check_flags(self): - """Ensure that the flags we care about are set.""" - required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password', - 'netapp_server_hostname', 'netapp_server_port'] - for flag in required_flags: - if not getattr(FLAGS, flag, None): - raise exception.InvalidInput(reason=_('%s is not set') % flag) - if not (FLAGS.netapp_storage_service or - FLAGS.netapp_storage_service_prefix): - raise exception.InvalidInput(reason=_('Either ' - 'netapp_storage_service or netapp_storage_service_prefix must ' - 'be set')) - - def do_setup(self, context): - """Setup the NetApp Volume driver. - - Called one time by the manager after the driver is loaded. - Validate the flags we care about and setup the suds (web services) - client. - """ - self._check_flags() - self._create_client(wsdl_url=FLAGS.netapp_wsdl_url, - login=FLAGS.netapp_login, password=FLAGS.netapp_password, - hostname=FLAGS.netapp_server_hostname, - port=FLAGS.netapp_server_port, cache=True) - self._set_storage_service(FLAGS.netapp_storage_service) - self._set_storage_service_prefix(FLAGS.netapp_storage_service_prefix) - self._set_vfiler(FLAGS.netapp_vfiler) - - def check_for_setup_error(self): - """Check that the driver is working and can communicate. - - Invoke a web services API to make sure we can talk to the server. - Also perform the discovery of datasets and LUNs from DFM. - """ - self.client.service.DfmAbout() - LOG.debug(_("Connected to DFM server")) - self._discover_luns() - - def _get_datasets(self): - """Get the list of datasets from DFM.""" - server = self.client.service - res = server.DatasetListInfoIterStart(IncludeMetadata=True) - tag = res.Tag - datasets = [] - try: - while True: - res = server.DatasetListInfoIterNext(Tag=tag, Maximum=100) - if not res.Datasets: - break - datasets.extend(res.Datasets.DatasetInfo) - finally: - server.DatasetListInfoIterEnd(Tag=tag) - return datasets - - def _discover_dataset_luns(self, dataset, volume): - """Discover all of the LUNs in a dataset.""" - server = self.client.service - res = server.DatasetMemberListInfoIterStart( - DatasetNameOrId=dataset.id, - IncludeExportsInfo=True, - IncludeIndirect=True, - MemberType='lun_path') - tag = res.Tag - suffix = None - if volume: - suffix = '/' + volume - try: - while True: - res = server.DatasetMemberListInfoIterNext(Tag=tag, - Maximum=100) - if (not hasattr(res, 'DatasetMembers') or - not res.DatasetMembers): - break - for member in res.DatasetMembers.DatasetMemberInfo: - if suffix and not member.MemberName.endswith(suffix): - continue - # MemberName is the full LUN path in this format: - # host:/volume/qtree/lun - lun = DfmLun(dataset, member.MemberName, member.MemberId) - self.discovered_luns.append(lun) - finally: - server.DatasetMemberListInfoIterEnd(Tag=tag) - - def _discover_luns(self): - """Discover the LUNs from DFM. - - Discover all of the OpenStack-created datasets and LUNs in the DFM - database. - """ - datasets = self._get_datasets() - self.discovered_datasets = [] - self.discovered_luns = [] - for dataset in datasets: - if not dataset.DatasetName.startswith(self.DATASET_PREFIX): - continue - if (not hasattr(dataset, 'DatasetMetadata') or - not dataset.DatasetMetadata): - continue - project = None - type = None - for field in dataset.DatasetMetadata.DfmMetadataField: - if field.FieldName == self.DATASET_METADATA_PROJECT_KEY: - project = field.FieldValue - elif field.FieldName == self.DATASET_METADATA_VOL_TYPE_KEY: - type = field.FieldValue - if not project: - continue - ds = DfmDataset(dataset.DatasetId, dataset.DatasetName, - project, type) - self.discovered_datasets.append(ds) - self._discover_dataset_luns(ds, None) - dataset_count = len(self.discovered_datasets) - lun_count = len(self.discovered_luns) - msg = _("Discovered %(dataset_count)s datasets and %(lun_count)s LUNs") - LOG.debug(msg % locals()) - self.lun_table = {} - - def _get_job_progress(self, job_id): - """Get progress of one running DFM job. - - Obtain the latest progress report for the job and return the - list of progress events. - """ - server = self.client.service - res = server.DpJobProgressEventListIterStart(JobId=job_id) - tag = res.Tag - event_list = [] - try: - while True: - res = server.DpJobProgressEventListIterNext(Tag=tag, - Maximum=100) - if not hasattr(res, 'ProgressEvents'): - break - event_list += res.ProgressEvents.DpJobProgressEventInfo - finally: - server.DpJobProgressEventListIterEnd(Tag=tag) - return event_list - - def _wait_for_job(self, job_id): - """Wait until a job terminates. - - Poll the job until it completes or an error is detected. Return the - final list of progress events if it completes successfully. - """ - while True: - events = self._get_job_progress(job_id) - for event in events: - if event.EventStatus == 'error': - msg = _('Job failed: %s') % (event.ErrorMessage) - raise exception.VolumeBackendAPIException(data=msg) - if event.EventType == 'job-end': - return events - time.sleep(5) - - def _dataset_name(self, project, ss_type): - """Return the dataset name for a given project and volume type.""" - _project = project.replace(' ', '_').replace('-', '_') - dataset_name = self.DATASET_PREFIX + _project - if not ss_type: - return dataset_name - _type = ss_type.replace(' ', '_').replace('-', '_') - return dataset_name + '_' + _type - - def _get_dataset(self, dataset_name): - """Lookup a dataset by name in the list of discovered datasets.""" - for dataset in self.discovered_datasets: - if dataset.name == dataset_name: - return dataset - return None - - def _create_dataset(self, dataset_name, project, ss_type): - """Create a new dataset using the storage service. - - The export settings are set to create iSCSI LUNs aligned for Linux. - Returns the ID of the new dataset. - """ - if ss_type and not self.storage_service_prefix: - msg = _('Attempt to use volume_type without specifying ' - 'netapp_storage_service_prefix flag.') - raise exception.VolumeBackendAPIException(data=msg) - if not (ss_type or self.storage_service): - msg = _('You must set the netapp_storage_service flag in order to ' - 'create volumes with no volume_type.') - raise exception.VolumeBackendAPIException(data=msg) - storage_service = self.storage_service - if ss_type: - storage_service = self.storage_service_prefix + ss_type - - factory = self.client.factory - - lunmap = factory.create('DatasetLunMappingInfo') - lunmap.IgroupOsType = 'linux' - export = factory.create('DatasetExportInfo') - export.DatasetExportProtocol = 'iscsi' - export.DatasetLunMappingInfo = lunmap - detail = factory.create('StorageSetInfo') - detail.DpNodeName = 'Primary data' - detail.DatasetExportInfo = export - if hasattr(self, 'vfiler') and self.vfiler: - detail.ServerNameOrId = self.vfiler - details = factory.create('ArrayOfStorageSetInfo') - details.StorageSetInfo = [detail] - field1 = factory.create('DfmMetadataField') - field1.FieldName = self.DATASET_METADATA_PROJECT_KEY - field1.FieldValue = project - field2 = factory.create('DfmMetadataField') - field2.FieldName = self.DATASET_METADATA_VOL_TYPE_KEY - field2.FieldValue = ss_type - metadata = factory.create('ArrayOfDfmMetadataField') - metadata.DfmMetadataField = [field1, field2] - - res = self.client.service.StorageServiceDatasetProvision( - StorageServiceNameOrId=storage_service, - DatasetName=dataset_name, - AssumeConfirmation=True, - StorageSetDetails=details, - DatasetMetadata=metadata) - - ds = DfmDataset(res.DatasetId, dataset_name, project, ss_type) - self.discovered_datasets.append(ds) - return ds - - def _provision(self, name, description, project, ss_type, size): - """Provision a LUN through provisioning manager. - - The LUN will be created inside a dataset associated with the project. - If the dataset doesn't already exist, we create it using the storage - service specified in the cinder conf. - """ - dataset_name = self._dataset_name(project, ss_type) - dataset = self._get_dataset(dataset_name) - if not dataset: - dataset = self._create_dataset(dataset_name, project, ss_type) - - info = self.client.factory.create('ProvisionMemberRequestInfo') - info.Name = name - if description: - info.Description = description - info.Size = size - info.MaximumSnapshotSpace = 2 * long(size) - - server = self.client.service - lock_id = server.DatasetEditBegin(DatasetNameOrId=dataset.id) - try: - server.DatasetProvisionMember(EditLockId=lock_id, - ProvisionMemberRequestInfo=info) - res = server.DatasetEditCommit(EditLockId=lock_id, - AssumeConfirmation=True) - except (suds.WebFault, Exception): - server.DatasetEditRollback(EditLockId=lock_id) - msg = _('Failed to provision dataset member') - raise exception.VolumeBackendAPIException(data=msg) - - lun_id = None - lunpath = None - - for info in res.JobIds.JobInfo: - events = self._wait_for_job(info.JobId) - for event in events: - if event.EventType != 'lun-create': - continue - lunpath = event.ProgressLunInfo.LunName - lun_id = event.ProgressLunInfo.LunPathId - - if not lun_id: - msg = _('No LUN was created by the provision job') - raise exception.VolumeBackendAPIException(data=msg) - - lun = DfmLun(dataset, lunpath, lun_id) - self.discovered_luns.append(lun) - self.lun_table[name] = lun - - def _get_ss_type(self, volume): - """Get the storage service type for a volume.""" - id = volume['volume_type_id'] - if not id: - return None - volume_type = volume_types.get_volume_type(None, id) - if not volume_type: - return None - return volume_type['name'] - - def _remove_destroy(self, name, project): - """Remove the LUN from the dataset, also destroying it. - - Remove the LUN from the dataset and destroy the actual LUN on the - storage system. - """ - lun = self._lookup_lun_for_volume(name, project) - member = self.client.factory.create('DatasetMemberParameter') - member.ObjectNameOrId = lun.id - members = self.client.factory.create('ArrayOfDatasetMemberParameter') - members.DatasetMemberParameter = [member] - - server = self.client.service - lock_id = server.DatasetEditBegin(DatasetNameOrId=lun.dataset.id) - try: - server.DatasetRemoveMember(EditLockId=lock_id, Destroy=True, - DatasetMemberParameters=members) - server.DatasetEditCommit(EditLockId=lock_id, - AssumeConfirmation=True) - except (suds.WebFault, Exception): - server.DatasetEditRollback(EditLockId=lock_id) - msg = _('Failed to remove and delete dataset member') - raise exception.VolumeBackendAPIException(data=msg) - - def create_volume(self, volume): - """Driver entry point for creating a new volume.""" - default_size = '104857600' # 100 MB - gigabytes = 1073741824L # 2^30 - name = volume['name'] - project = volume['project_id'] - display_name = volume['display_name'] - display_description = volume['display_description'] - description = None - if display_name: - if display_description: - description = display_name + "\n" + display_description - else: - description = display_name - elif display_description: - description = display_description - if int(volume['size']) == 0: - size = default_size - else: - size = str(int(volume['size']) * gigabytes) - ss_type = self._get_ss_type(volume) - self._provision(name, description, project, ss_type, size) - - def _lookup_lun_for_volume(self, name, project): - """Lookup the LUN that corresponds to the give volume. - - Initial lookups involve a table scan of all of the discovered LUNs, - but later lookups are done instantly from the hashtable. - """ - if name in self.lun_table: - return self.lun_table[name] - lunpath_suffix = '/' + name - for lun in self.discovered_luns: - if lun.dataset.project != project: - continue - if lun.lunpath.endswith(lunpath_suffix): - self.lun_table[name] = lun - return lun - msg = _("No entry in LUN table for volume %s") % (name) - raise exception.VolumeBackendAPIException(data=msg) - - def delete_volume(self, volume): - """Driver entry point for destroying existing volumes.""" - name = volume['name'] - project = volume['project_id'] - self._remove_destroy(name, project) - - def _get_lun_details(self, lun_id): - """Given the ID of a LUN, get the details about that LUN.""" - server = self.client.service - res = server.LunListInfoIterStart(ObjectNameOrId=lun_id) - tag = res.Tag - try: - res = server.LunListInfoIterNext(Tag=tag, Maximum=1) - if hasattr(res, 'Luns') and res.Luns.LunInfo: - return res.Luns.LunInfo[0] - finally: - server.LunListInfoIterEnd(Tag=tag) - msg = _('Failed to get LUN details for LUN ID %s') - raise exception.VolumeBackendAPIException(data=msg % lun_id) - - def _get_host_details(self, host_id): - """Given the ID of a host, get the details about it. - - A "host" is a storage system here. - """ - server = self.client.service - res = server.HostListInfoIterStart(ObjectNameOrId=host_id) - tag = res.Tag - try: - res = server.HostListInfoIterNext(Tag=tag, Maximum=1) - if hasattr(res, 'Hosts') and res.Hosts.HostInfo: - return res.Hosts.HostInfo[0] - finally: - server.HostListInfoIterEnd(Tag=tag) - msg = _('Failed to get host details for host ID %s') - raise exception.VolumeBackendAPIException(data=msg % host_id) - - def _get_iqn_for_host(self, host_id): - """Get the iSCSI Target Name for a storage system.""" - request = self.client.factory.create('Request') - request.Name = 'iscsi-node-get-name' - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - return response.Results['node-name'][0] - - def _api_elem_is_empty(self, elem): - """Return true if the API element should be considered empty. - - Helper routine to figure out if a list returned from a proxy API - is empty. This is necessary because the API proxy produces nasty - looking XML. - """ - if not type(elem) is list: - return True - if 0 == len(elem): - return True - child = elem[0] - if isinstance(child, text.Text): - return True - if type(child) is str: - return True - return False - - def _get_target_portal_for_host(self, host_id, host_address): - """Get iSCSI target portal for a storage system. - - Get the iSCSI Target Portal details for a particular IP address - on a storage system. - """ - request = self.client.factory.create('Request') - request.Name = 'iscsi-portal-list-info' - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - portal = {} - portals = response.Results['iscsi-portal-list-entries'] - if self._api_elem_is_empty(portals): - return portal - portal_infos = portals[0]['iscsi-portal-list-entry-info'] - for portal_info in portal_infos: - portal['address'] = portal_info['ip-address'][0] - portal['port'] = portal_info['ip-port'][0] - portal['portal'] = portal_info['tpgroup-tag'][0] - if host_address == portal['address']: - break - return portal - - def _get_export(self, volume): - """Get the iSCSI export details for a volume. - - Looks up the LUN in DFM based on the volume and project name, then get - the LUN's ID. We store that value in the database instead of the iSCSI - details because we will not have the true iSCSI details until masking - time (when initialize_connection() is called). - """ - name = volume['name'] - project = volume['project_id'] - lun = self._lookup_lun_for_volume(name, project) - return {'provider_location': lun.id} - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - return self._get_export(volume) - - def create_export(self, context, volume): - """Driver entry point to get the export info for a new volume.""" - return self._get_export(volume) - - def remove_export(self, context, volume): - """Driver exntry point to remove an export for a volume. - - Since exporting is idempotent in this driver, we have nothing - to do for unexporting. - """ - pass - - def _find_igroup_for_initiator(self, host_id, initiator_name): - """Get the igroup for an initiator. - - Look for an existing igroup (initiator group) on the storage system - containing a given iSCSI initiator and return the name of the igroup. - """ - request = self.client.factory.create('Request') - request.Name = 'igroup-list-info' - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - igroups = response.Results['initiator-groups'] - if self._api_elem_is_empty(igroups): - return None - igroup_infos = igroups[0]['initiator-group-info'] - for igroup_info in igroup_infos: - if ('iscsi' != igroup_info['initiator-group-type'][0] or - 'linux' != igroup_info['initiator-group-os-type'][0]): - continue - igroup_name = igroup_info['initiator-group-name'][0] - if not igroup_name.startswith(self.IGROUP_PREFIX): - continue - initiators = igroup_info['initiators'][0]['initiator-info'] - for initiator in initiators: - if initiator_name == initiator['initiator-name'][0]: - return igroup_name - return None - - def _create_igroup(self, host_id, initiator_name): - """Create a new igroup. - - Create a new igroup (initiator group) on the storage system to hold - the given iSCSI initiator. The group will only have 1 member and will - be named "openstack-${initiator_name}". - """ - igroup_name = self.IGROUP_PREFIX + initiator_name - request = self.client.factory.create('Request') - request.Name = 'igroup-create' - igroup_create_xml = ( - '%s' - 'iscsi' - 'linuxlinux') - request.Args = text.Raw(igroup_create_xml % igroup_name) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - request = self.client.factory.create('Request') - request.Name = 'igroup-add' - igroup_add_xml = ( - '%s' - '%s') - request.Args = text.Raw(igroup_add_xml % (igroup_name, initiator_name)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - return igroup_name - - def _get_lun_mappping(self, host_id, lunpath, igroup_name): - """Get the mapping between a LUN and an igroup. - - Check if a given LUN is already mapped to the given igroup (initiator - group). If the LUN is mapped, also return the LUN number for the - mapping. - """ - request = self.client.factory.create('Request') - request.Name = 'lun-map-list-info' - request.Args = text.Raw('%s' % (lunpath)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - igroups = response.Results['initiator-groups'] - if self._api_elem_is_empty(igroups): - return {'mapped': False} - igroup_infos = igroups[0]['initiator-group-info'] - for igroup_info in igroup_infos: - if igroup_name == igroup_info['initiator-group-name'][0]: - return {'mapped': True, 'lun_num': igroup_info['lun-id'][0]} - return {'mapped': False} - - def _map_initiator(self, host_id, lunpath, igroup_name): - """Map a LUN to an igroup. - - Map the given LUN to the given igroup (initiator group). Return the LUN - number that the LUN was mapped to (the filer will choose the lowest - available number). - """ - request = self.client.factory.create('Request') - request.Name = 'lun-map' - lun_map_xml = ('%s' - '%s') - request.Args = text.Raw(lun_map_xml % (igroup_name, lunpath)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - return response.Results['lun-id-assigned'][0] - - def _unmap_initiator(self, host_id, lunpath, igroup_name): - """Unmap the given LUN from the given igroup (initiator group).""" - request = self.client.factory.create('Request') - request.Name = 'lun-unmap' - lun_unmap_xml = ('%s' - '%s') - request.Args = text.Raw(lun_unmap_xml % (igroup_name, lunpath)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - - def _ensure_initiator_mapped(self, host_id, lunpath, initiator_name): - """Ensure that a LUN is mapped to a particular initiator. - - Check if a LUN is mapped to a given initiator already and create - the mapping if it is not. A new igroup will be created if needed. - Returns the LUN number for the mapping between the LUN and initiator - in both cases. - """ - lunpath = '/vol/' + lunpath - igroup_name = self._find_igroup_for_initiator(host_id, initiator_name) - if not igroup_name: - igroup_name = self._create_igroup(host_id, initiator_name) - - mapping = self._get_lun_mappping(host_id, lunpath, igroup_name) - if mapping['mapped']: - return mapping['lun_num'] - return self._map_initiator(host_id, lunpath, igroup_name) - - def _ensure_initiator_unmapped(self, host_id, lunpath, initiator_name): - """Ensure that a LUN is not mapped to a particular initiator. - - Check if a LUN is mapped to a given initiator and remove the - mapping if it is. This does not destroy the igroup. - """ - lunpath = '/vol/' + lunpath - igroup_name = self._find_igroup_for_initiator(host_id, initiator_name) - if not igroup_name: - return - - mapping = self._get_lun_mappping(host_id, lunpath, igroup_name) - if mapping['mapped']: - self._unmap_initiator(host_id, lunpath, igroup_name) - - def initialize_connection(self, volume, connector): - """Driver entry point to attach a volume to an instance. - - Do the LUN masking on the storage system so the initiator can access - the LUN on the target. Also return the iSCSI properties so the - initiator can find the LUN. This implementation does not call - _get_iscsi_properties() to get the properties because cannot store the - LUN number in the database. We only find out what the LUN number will - be during this method call so we construct the properties dictionary - ourselves. - """ - initiator_name = connector['initiator'] - lun_id = volume['provider_location'] - if not lun_id: - msg = _("No LUN ID for volume %s") % volume['name'] - raise exception.VolumeBackendAPIException(data=msg) - lun = self._get_lun_details(lun_id) - lun_num = self._ensure_initiator_mapped(lun.HostId, lun.LunPath, - initiator_name) - host = self._get_host_details(lun.HostId) - portal = self._get_target_portal_for_host(host.HostId, - host.HostAddress) - if not portal: - msg = _('Failed to get target portal for filer: %s') - raise exception.VolumeBackendAPIException(data=msg % host.HostName) - - iqn = self._get_iqn_for_host(host.HostId) - if not iqn: - msg = _('Failed to get target IQN for filer: %s') - raise exception.VolumeBackendAPIException(data=msg % host.HostName) - - properties = {} - properties['target_discovered'] = False - (address, port) = (portal['address'], portal['port']) - properties['target_portal'] = '%s:%s' % (address, port) - properties['target_iqn'] = iqn - properties['target_lun'] = lun_num - properties['volume_id'] = volume['id'] - - auth = volume['provider_auth'] - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - def terminate_connection(self, volume, connector): - """Driver entry point to unattach a volume from an instance. - - Unmask the LUN on the storage system so the given intiator can no - longer access it. - """ - initiator_name = connector['initiator'] - lun_id = volume['provider_location'] - if not lun_id: - msg = _('No LUN ID for volume %s') % volume['name'] - raise exception.VolumeBackendAPIException(data=msg) - lun = self._get_lun_details(lun_id) - self._ensure_initiator_unmapped(lun.HostId, lun.LunPath, - initiator_name) - - def _is_clone_done(self, host_id, clone_op_id, volume_uuid): - """Check the status of a clone operation. - - Return True if done, False otherwise. - """ - request = self.client.factory.create('Request') - request.Name = 'clone-list-status' - clone_list_status_xml = ( - '' - '%s' - '%s' - '') - request.Args = text.Raw(clone_list_status_xml % (clone_op_id, - volume_uuid)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - status = response.Results['status'] - if self._api_elem_is_empty(status): - return False - ops_info = status[0]['ops-info'][0] - state = ops_info['clone-state'][0] - return 'completed' == state - - def _clone_lun(self, host_id, src_path, dest_path, snap): - """Create a clone of a NetApp LUN. - - The clone initially consumes no space and is not space reserved. - """ - request = self.client.factory.create('Request') - request.Name = 'clone-start' - clone_start_xml = ( - '%s%s' - '%s') - if snap: - no_snap = 'false' - else: - no_snap = 'true' - request.Args = text.Raw(clone_start_xml % (src_path, no_snap, - dest_path)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - clone_id = response.Results['clone-id'][0] - clone_id_info = clone_id['clone-id-info'][0] - clone_op_id = clone_id_info['clone-op-id'][0] - volume_uuid = clone_id_info['volume-uuid'][0] - while not self._is_clone_done(host_id, clone_op_id, volume_uuid): - time.sleep(5) - - def _refresh_dfm_luns(self, host_id): - """Refresh the LUN list for one filer in DFM.""" - server = self.client.service - server.DfmObjectRefresh(ObjectNameOrId=host_id, ChildType='lun_path') - while True: - time.sleep(15) - res = server.DfmMonitorTimestampList(HostNameOrId=host_id) - for timestamp in res.DfmMonitoringTimestamp: - if 'lun' != timestamp.MonitorName: - continue - if timestamp.LastMonitoringTimestamp: - return - - def _destroy_lun(self, host_id, lun_path): - """Destroy a LUN on the filer.""" - request = self.client.factory.create('Request') - request.Name = 'lun-offline' - path_xml = '%s' - request.Args = text.Raw(path_xml % lun_path) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - request = self.client.factory.create('Request') - request.Name = 'lun-destroy' - request.Args = text.Raw(path_xml % lun_path) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - - def _resize_volume(self, host_id, vol_name, new_size): - """Resize the volume by the amount requested.""" - request = self.client.factory.create('Request') - request.Name = 'volume-size' - volume_size_xml = ( - '%s%s') - request.Args = text.Raw(volume_size_xml % (vol_name, new_size)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - - def _create_qtree(self, host_id, vol_name, qtree_name): - """Create a qtree the filer.""" - request = self.client.factory.create('Request') - request.Name = 'qtree-create' - qtree_create_xml = ( - '0755%s%s') - request.Args = text.Raw(qtree_create_xml % (vol_name, qtree_name)) - response = self.client.service.ApiProxy(Target=host_id, - Request=request) - self._check_fail(request, response) - - def create_snapshot(self, snapshot): - """Driver entry point for creating a snapshot. - - This driver implements snapshots by using efficient single-file - (LUN) cloning. - """ - vol_name = snapshot['volume_name'] - snapshot_name = snapshot['name'] - project = snapshot['project_id'] - lun = self._lookup_lun_for_volume(vol_name, project) - lun_id = lun.id - lun = self._get_lun_details(lun_id) - extra_gb = snapshot['volume_size'] - new_size = '+%dg' % extra_gb - self._resize_volume(lun.HostId, lun.VolumeName, new_size) - # LunPath is the partial LUN path in this format: volume/qtree/lun - lun_path = str(lun.LunPath) - lun_name = lun_path[lun_path.rfind('/') + 1:] - qtree_path = '/vol/%s/%s' % (lun.VolumeName, lun.QtreeName) - src_path = '%s/%s' % (qtree_path, lun_name) - dest_path = '%s/%s' % (qtree_path, snapshot_name) - self._clone_lun(lun.HostId, src_path, dest_path, True) - - def delete_snapshot(self, snapshot): - """Driver entry point for deleting a snapshot.""" - vol_name = snapshot['volume_name'] - snapshot_name = snapshot['name'] - project = snapshot['project_id'] - lun = self._lookup_lun_for_volume(vol_name, project) - lun_id = lun.id - lun = self._get_lun_details(lun_id) - lun_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName, - snapshot_name) - self._destroy_lun(lun.HostId, lun_path) - extra_gb = snapshot['volume_size'] - new_size = '-%dg' % extra_gb - self._resize_volume(lun.HostId, lun.VolumeName, new_size) - - def create_volume_from_snapshot(self, volume, snapshot): - """Driver entry point for creating a new volume from a snapshot. - - Many would call this "cloning" and in fact we use cloning to implement - this feature. - """ - vol_size = volume['size'] - snap_size = snapshot['volume_size'] - if vol_size != snap_size: - msg = _('Cannot create volume of size %(vol_size)s from ' - 'snapshot of size %(snap_size)s') - raise exception.VolumeBackendAPIException(data=msg % locals()) - vol_name = snapshot['volume_name'] - snapshot_name = snapshot['name'] - project = snapshot['project_id'] - lun = self._lookup_lun_for_volume(vol_name, project) - lun_id = lun.id - dataset = lun.dataset - old_type = dataset.type - new_type = self._get_ss_type(volume) - if new_type != old_type: - msg = _('Cannot create volume of type %(new_type)s from ' - 'snapshot of type %(old_type)s') - raise exception.VolumeBackendAPIException(data=msg % locals()) - lun = self._get_lun_details(lun_id) - extra_gb = vol_size - new_size = '+%dg' % extra_gb - self._resize_volume(lun.HostId, lun.VolumeName, new_size) - clone_name = volume['name'] - self._create_qtree(lun.HostId, lun.VolumeName, clone_name) - src_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName, - snapshot_name) - dest_path = '/vol/%s/%s/%s' % (lun.VolumeName, clone_name, clone_name) - self._clone_lun(lun.HostId, src_path, dest_path, False) - self._refresh_dfm_luns(lun.HostId) - self._discover_dataset_luns(dataset, clone_name) - - -class NetAppLun(object): - """Represents a LUN on NetApp storage.""" - - def __init__(self, handle, name, size, metadata_dict): - self.handle = handle - self.name = name - self.size = size - self.metadata = metadata_dict - - def get_metadata_property(self, prop): - """Get the metadata property of a LUN.""" - if prop in self.metadata: - return self.metadata[prop] - name = self.name - msg = _("No metadata property %(prop)s defined for the LUN %(name)s") - LOG.debug(msg % locals()) - - -class NetAppCmodeISCSIDriver(driver.ISCSIDriver): - """NetApp C-mode iSCSI volume driver.""" - - def __init__(self, *args, **kwargs): - super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs) - self.lun_table = {} - - def _create_client(self, **kwargs): - """Instantiate a web services client. - - This method creates a "suds" client to make web services calls to the - DFM server. Note that the WSDL file is quite large and may take - a few seconds to parse. - """ - wsdl_url = kwargs['wsdl_url'] - LOG.debug(_('Using WSDL: %s') % wsdl_url) - if kwargs['cache']: - self.client = client.Client(wsdl_url, username=kwargs['login'], - password=kwargs['password']) - else: - self.client = client.Client(wsdl_url, username=kwargs['login'], - password=kwargs['password'], - cache=None) - - def _check_flags(self): - """Ensure that the flags we care about are set.""" - required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password', - 'netapp_server_hostname', 'netapp_server_port'] - for flag in required_flags: - if not getattr(FLAGS, flag, None): - msg = _('%s is not set') % flag - raise exception.InvalidInput(data=msg) - - def do_setup(self, context): - """Setup the NetApp Volume driver. - - Called one time by the manager after the driver is loaded. - Validate the flags we care about and setup the suds (web services) - client. - """ - self._check_flags() - self._create_client(wsdl_url=FLAGS.netapp_wsdl_url, - login=FLAGS.netapp_login, password=FLAGS.netapp_password, - hostname=FLAGS.netapp_server_hostname, - port=FLAGS.netapp_server_port, cache=True) - - def check_for_setup_error(self): - """Check that the driver is working and can communicate. - - Discovers the LUNs on the NetApp server. - """ - self.lun_table = {} - luns = self.client.service.ListLuns() - for lun in luns: - meta_dict = {} - if hasattr(lun, 'Metadata'): - meta_dict = self._create_dict_from_meta(lun.Metadata) - discovered_lun = NetAppLun(lun.Handle, lun.Name, lun.Size, - meta_dict) - self._add_lun_to_table(discovered_lun) - LOG.debug(_("Success getting LUN list from server")) - - def create_volume(self, volume): - """Driver entry point for creating a new volume.""" - default_size = '104857600' # 100 MB - gigabytes = 1073741824L # 2^30 - name = volume['name'] - if int(volume['size']) == 0: - size = default_size - else: - size = str(int(volume['size']) * gigabytes) - extra_args = {} - extra_args['OsType'] = 'linux' - extra_args['QosType'] = self._get_qos_type(volume) - extra_args['Container'] = volume['project_id'] - extra_args['Display'] = volume['display_name'] - extra_args['Description'] = volume['display_description'] - extra_args['SpaceReserved'] = True - server = self.client.service - metadata = self._create_metadata_list(extra_args) - lun = server.ProvisionLun(Name=name, Size=size, - Metadata=metadata) - LOG.debug(_("Created LUN with name %s") % name) - self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name, - lun.Size, self._create_dict_from_meta(lun.Metadata))) - - def delete_volume(self, volume): - """Driver entry point for destroying existing volumes.""" - name = volume['name'] - handle = self._get_lun_handle(name) - self.client.service.DestroyLun(Handle=handle) - LOG.debug(_("Destroyed LUN %s") % handle) - self.lun_table.pop(name) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - handle = self._get_lun_handle(volume['name']) - return {'provider_location': handle} - - def create_export(self, context, volume): - """Driver entry point to get the export info for a new volume.""" - handle = self._get_lun_handle(volume['name']) - return {'provider_location': handle} - - def remove_export(self, context, volume): - """Driver exntry point to remove an export for a volume. - - Since exporting is idempotent in this driver, we have nothing - to do for unexporting. - """ - pass - - def initialize_connection(self, volume, connector): - """Driver entry point to attach a volume to an instance. - - Do the LUN masking on the storage system so the initiator can access - the LUN on the target. Also return the iSCSI properties so the - initiator can find the LUN. This implementation does not call - _get_iscsi_properties() to get the properties because cannot store the - LUN number in the database. We only find out what the LUN number will - be during this method call so we construct the properties dictionary - ourselves. - """ - initiator_name = connector['initiator'] - handle = volume['provider_location'] - server = self.client.service - server.MapLun(Handle=handle, InitiatorType="iscsi", - InitiatorName=initiator_name) - msg = _("Mapped LUN %(handle)s to the initiator %(initiator_name)s") - LOG.debug(msg % locals()) - - target_details_list = server.GetLunTargetDetails(Handle=handle, - InitiatorType="iscsi", InitiatorName=initiator_name) - msg = _("Succesfully fetched target details for LUN %(handle)s and " - "initiator %(initiator_name)s") - LOG.debug(msg % locals()) - - if not target_details_list: - msg = _('Failed to get LUN target details for the LUN %s') - raise exception.VolumeBackendAPIException(data=msg % handle) - target_details = target_details_list[0] - if not target_details.Address and target_details.Port: - msg = _('Failed to get target portal for the LUN %s') - raise exception.VolumeBackendAPIException(data=msg % handle) - iqn = target_details.Iqn - if not iqn: - msg = _('Failed to get target IQN for the LUN %s') - raise exception.VolumeBackendAPIException(data=msg % handle) - - properties = {} - properties['target_discovered'] = False - (address, port) = (target_details.Address, target_details.Port) - properties['target_portal'] = '%s:%s' % (address, port) - properties['target_iqn'] = iqn - properties['target_lun'] = target_details.LunNumber - properties['volume_id'] = volume['id'] - - auth = volume['provider_auth'] - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - def terminate_connection(self, volume, connector): - """Driver entry point to unattach a volume from an instance. - - Unmask the LUN on the storage system so the given intiator can no - longer access it. - """ - initiator_name = connector['initiator'] - handle = volume['provider_location'] - self.client.service.UnmapLun(Handle=handle, InitiatorType="iscsi", - InitiatorName=initiator_name) - msg = _("Unmapped LUN %(handle)s from the initiator " - "%(initiator_name)s") - LOG.debug(msg % locals()) - - def create_snapshot(self, snapshot): - """Driver entry point for creating a snapshot. - - This driver implements snapshots by using efficient single-file - (LUN) cloning. - """ - vol_name = snapshot['volume_name'] - snapshot_name = snapshot['name'] - lun = self.lun_table[vol_name] - extra_args = {'SpaceReserved': False} - self._clone_lun(lun.handle, snapshot_name, extra_args) - - def delete_snapshot(self, snapshot): - """Driver entry point for deleting a snapshot.""" - handle = self._get_lun_handle(snapshot['name']) - self.client.service.DestroyLun(Handle=handle) - LOG.debug(_("Destroyed LUN %s") % handle) - - def create_volume_from_snapshot(self, volume, snapshot): - """Driver entry point for creating a new volume from a snapshot. - - Many would call this "cloning" and in fact we use cloning to implement - this feature. - """ - snapshot_name = snapshot['name'] - lun = self.lun_table[snapshot_name] - new_name = volume['name'] - extra_args = {} - extra_args['OsType'] = 'linux' - extra_args['QosType'] = self._get_qos_type(volume) - extra_args['Container'] = volume['project_id'] - extra_args['Display'] = volume['display_name'] - extra_args['Description'] = volume['display_description'] - extra_args['SpaceReserved'] = True - self._clone_lun(lun.handle, new_name, extra_args) - - def _get_qos_type(self, volume): - """Get the storage service type for a volume.""" - type_id = volume['volume_type_id'] - if not type_id: - return None - volume_type = volume_types.get_volume_type(None, type_id) - if not volume_type: - return None - return volume_type['name'] - - def _add_lun_to_table(self, lun): - """Adds LUN to cache table.""" - if not isinstance(lun, NetAppLun): - msg = _("Object is not a NetApp LUN.") - raise exception.VolumeBackendAPIException(data=msg) - self.lun_table[lun.name] = lun - - def _clone_lun(self, handle, new_name, extra_args): - """Clone LUN with the given handle to the new name.""" - server = self.client.service - metadata = self._create_metadata_list(extra_args) - lun = server.CloneLun(Handle=handle, NewName=new_name, - Metadata=metadata) - LOG.debug(_("Cloned LUN with new name %s") % new_name) - self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name, - lun.Size, self._create_dict_from_meta(lun.Metadata))) - - def _create_metadata_list(self, extra_args): - """Creates metadata from kwargs.""" - metadata = [] - for key in extra_args.keys(): - meta = self.client.factory.create("Metadata") - meta.Key = key - meta.Value = extra_args[key] - metadata.append(meta) - return metadata - - def _get_lun_handle(self, name): - """Get the details for a LUN from our cache table.""" - if not name in self.lun_table: - LOG.warn(_("Could not find handle for LUN named %s") % name) - return None - return self.lun_table[name].handle - - def _create_dict_from_meta(self, metadata): - """Creates dictionary from metadata array.""" - meta_dict = {} - if not metadata: - return meta_dict - for meta in metadata: - meta_dict[meta.Key] = meta.Value - return meta_dict diff --git a/cinder/volume/netapp_nfs.py b/cinder/volume/netapp_nfs.py deleted file mode 100644 index dd69c6deca..0000000000 --- a/cinder/volume/netapp_nfs.py +++ /dev/null @@ -1,266 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 NetApp, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for NetApp NFS storage. -""" - -import os -import time -import suds -from suds.sax import text - -from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg -from cinder.openstack.common import log as logging -from cinder.volume import nfs -from cinder.volume.netapp import netapp_opts - -LOG = logging.getLogger("cinder.volume.driver") - -netapp_nfs_opts = [ - cfg.IntOpt('synchronous_snapshot_create', - default=0, - help='Does snapshot creation call returns immediately') - ] - -FLAGS = flags.FLAGS -FLAGS.register_opts(netapp_opts) -FLAGS.register_opts(netapp_nfs_opts) - - -class NetAppNFSDriver(nfs.NfsDriver): - """Executes commands relating to Volumes.""" - def __init__(self, *args, **kwargs): - # NOTE(vish): db is set by Manager - self._execute = None - self._context = None - super(NetAppNFSDriver, self).__init__(*args, **kwargs) - - def set_execute(self, execute): - self._execute = execute - - def do_setup(self, context): - self._context = context - self.check_for_setup_error() - self._client = NetAppNFSDriver._get_client() - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met""" - NetAppNFSDriver._check_dfm_flags() - super(NetAppNFSDriver, self).check_for_setup_error() - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - vol_size = volume.size - snap_size = snapshot.volume_size - - if vol_size != snap_size: - msg = _('Cannot create volume of size %(vol_size)s from ' - 'snapshot of size %(snap_size)s') - raise exception.CinderException(msg % locals()) - - self._clone_volume(snapshot.name, volume.name, snapshot.volume_id) - share = self._get_volume_location(snapshot.volume_id) - - return {'provider_location': share} - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self._clone_volume(snapshot['volume_name'], - snapshot['name'], - snapshot['volume_id']) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - nfs_mount = self._get_provider_location(snapshot.volume_id) - - if self._volume_not_present(nfs_mount, snapshot.name): - return True - - self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name), - run_as_root=True) - - @staticmethod - def _check_dfm_flags(): - """Raises error if any required configuration flag for OnCommand proxy - is missing.""" - required_flags = ['netapp_wsdl_url', - 'netapp_login', - 'netapp_password', - 'netapp_server_hostname', - 'netapp_server_port'] - for flag in required_flags: - if not getattr(FLAGS, flag, None): - raise exception.CinderException(_('%s is not set') % flag) - - @staticmethod - def _get_client(): - """Creates SOAP _client for ONTAP-7 DataFabric Service.""" - client = suds.client.Client(FLAGS.netapp_wsdl_url, - username=FLAGS.netapp_login, - password=FLAGS.netapp_password) - soap_url = 'http://%s:%s/apis/soap/v1' % ( - FLAGS.netapp_server_hostname, - FLAGS.netapp_server_port) - client.set_options(location=soap_url) - - return client - - def _get_volume_location(self, volume_id): - """Returns NFS mount address as :""" - nfs_server_ip = self._get_host_ip(volume_id) - export_path = self._get_export_path(volume_id) - return (nfs_server_ip + ':' + export_path) - - def _clone_volume(self, volume_name, clone_name, volume_id): - """Clones mounted volume with OnCommand proxy API""" - host_id = self._get_host_id(volume_id) - export_path = self._get_full_export_path(volume_id, host_id) - - request = self._client.factory.create('Request') - request.Name = 'clone-start' - - clone_start_args = ('%s/%s' - '%s/%s') - - request.Args = text.Raw(clone_start_args % (export_path, - volume_name, - export_path, - clone_name)) - - resp = self._client.service.ApiProxy(Target=host_id, - Request=request) - - if resp.Status == 'passed' and FLAGS.synchronous_snapshot_create: - clone_id = resp.Results['clone-id'][0] - clone_id_info = clone_id['clone-id-info'][0] - clone_operation_id = int(clone_id_info['clone-op-id'][0]) - - self._wait_for_clone_finished(clone_operation_id, host_id) - elif resp.Status == 'failed': - raise exception.CinderException(resp.Reason) - - def _wait_for_clone_finished(self, clone_operation_id, host_id): - """ - Polls ONTAP7 for clone status. Returns once clone is finished. - :param clone_operation_id: Identifier of ONTAP clone operation - """ - clone_list_options = ('' - '' - '%d' - '' - '' - '') - - request = self._client.factory.create('Request') - request.Name = 'clone-list-status' - request.Args = text.Raw(clone_list_options % clone_operation_id) - - resp = self._client.service.ApiProxy(Target=host_id, Request=request) - - while resp.Status != 'passed': - time.sleep(1) - resp = self._client.service.ApiProxy(Target=host_id, - Request=request) - - def _get_provider_location(self, volume_id): - """ - Returns provider location for given volume - :param volume_id: - """ - volume = self.db.volume_get(self._context, volume_id) - return volume.provider_location - - def _get_host_ip(self, volume_id): - """Returns IP address for the given volume""" - return self._get_provider_location(volume_id).split(':')[0] - - def _get_export_path(self, volume_id): - """Returns NFS export path for the given volume""" - return self._get_provider_location(volume_id).split(':')[1] - - def _get_host_id(self, volume_id): - """Returns ID of the ONTAP-7 host""" - host_ip = self._get_host_ip(volume_id) - server = self._client.service - - resp = server.HostListInfoIterStart(ObjectNameOrId=host_ip) - tag = resp.Tag - - try: - res = server.HostListInfoIterNext(Tag=tag, Maximum=1) - if hasattr(res, 'Hosts') and res.Hosts.HostInfo: - return res.Hosts.HostInfo[0].HostId - finally: - server.HostListInfoIterEnd(Tag=tag) - - def _get_full_export_path(self, volume_id, host_id): - """Returns full path to the NFS share, e.g. /vol/vol0/home""" - export_path = self._get_export_path(volume_id) - command_args = '%s' - - request = self._client.factory.create('Request') - request.Name = 'nfs-exportfs-storage-path' - request.Args = text.Raw(command_args % export_path) - - resp = self._client.service.ApiProxy(Target=host_id, - Request=request) - - if resp.Status == 'passed': - return resp.Results['actual-pathname'][0] - elif resp.Status == 'failed': - raise exception.CinderException(resp.Reason) - - def _volume_not_present(self, nfs_mount, volume_name): - """ - Check if volume exists - """ - try: - self._try_execute('ls', self._get_volume_path(nfs_mount, - volume_name)) - except exception.ProcessExecutionError: - # If the volume isn't present - return True - return False - - def _try_execute(self, *command, **kwargs): - # NOTE(vish): Volume commands can partially fail due to timing, but - # running them a second time on failure will usually - # recover nicely. - tries = 0 - while True: - try: - self._execute(*command, **kwargs) - return True - except exception.ProcessExecutionError: - tries = tries + 1 - if tries >= FLAGS.num_shell_tries: - raise - LOG.exception(_("Recovering from a failed execute. " - "Try number %s"), tries) - time.sleep(tries ** 2) - - def _get_volume_path(self, nfs_share, volume_name): - """Get volume path (local fs path) for given volume name on given nfs - share - @param nfs_share string, example 172.18.194.100:/var/nfs - @param volume_name string, - example volume-91ee65ec-c473-4391-8c09-162b00c68a8c - """ - return os.path.join(self._get_mount_point_for_share(nfs_share), - volume_name) diff --git a/cinder/volume/nexenta/volume.py b/cinder/volume/nexenta/volume.py deleted file mode 100644 index 9bac207ec8..0000000000 --- a/cinder/volume/nexenta/volume.py +++ /dev/null @@ -1,282 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -:mod:`nexenta.volume` -- Driver to store volumes on Nexenta Appliance -===================================================================== - -.. automodule:: nexenta.volume -.. moduleauthor:: Yuriy Taraday -""" - -from cinder import exception -from cinder import flags -from cinder.openstack.common import log as logging -from cinder.openstack.common import cfg -from cinder.volume import driver -from cinder.volume import nexenta -from cinder.volume.nexenta import jsonrpc - -LOG = logging.getLogger("cinder.volume.nexenta.volume") -FLAGS = flags.FLAGS - -nexenta_opts = [ - cfg.StrOpt('nexenta_host', - default='', - help='IP address of Nexenta SA'), - cfg.IntOpt('nexenta_rest_port', - default=2000, - help='HTTP port to connect to Nexenta REST API server'), - cfg.StrOpt('nexenta_rest_protocol', - default='auto', - help='Use http or https for REST connection (default auto)'), - cfg.StrOpt('nexenta_user', - default='admin', - help='User name to connect to Nexenta SA'), - cfg.StrOpt('nexenta_password', - default='nexenta', - help='Password to connect to Nexenta SA'), - cfg.IntOpt('nexenta_iscsi_target_portal_port', - default=3260, - help='Nexenta target portal port'), - cfg.StrOpt('nexenta_volume', - default='cinder', - help='pool on SA that will hold all volumes'), - cfg.StrOpt('nexenta_target_prefix', - default='iqn.1986-03.com.sun:02:cinder-', - help='IQN prefix for iSCSI targets'), - cfg.StrOpt('nexenta_target_group_prefix', - default='cinder/', - help='prefix for iSCSI target groups on SA'), - cfg.StrOpt('nexenta_blocksize', - default='', - help='block size for volumes (blank=default,8KB)'), - cfg.BoolOpt('nexenta_sparse', - default=False, - help='flag to create sparse volumes'), -] -FLAGS.register_opts(nexenta_opts) - - -class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921 - """Executes volume driver commands on Nexenta Appliance.""" - - def __init__(self): - super(NexentaDriver, self).__init__() - - def do_setup(self, context): - protocol = FLAGS.nexenta_rest_protocol - auto = protocol == 'auto' - if auto: - protocol = 'http' - self.nms = jsonrpc.NexentaJSONProxy( - '%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host, - FLAGS.nexenta_rest_port), - FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto) - - def check_for_setup_error(self): - """Verify that the volume for our zvols exists. - - :raise: :py:exc:`LookupError` - """ - if not self.nms.volume.object_exists(FLAGS.nexenta_volume): - raise LookupError(_("Volume %s does not exist in Nexenta SA"), - FLAGS.nexenta_volume) - - @staticmethod - def _get_zvol_name(volume_name): - """Return zvol name that corresponds given volume name.""" - return '%s/%s' % (FLAGS.nexenta_volume, volume_name) - - @staticmethod - def _get_target_name(volume_name): - """Return iSCSI target name to access volume.""" - return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name) - - @staticmethod - def _get_target_group_name(volume_name): - """Return Nexenta iSCSI target group name for volume.""" - return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name) - - def create_volume(self, volume): - """Create a zvol on appliance. - - :param volume: volume reference - """ - self.nms.zvol.create( - self._get_zvol_name(volume['name']), - '%sG' % (volume['size'],), - FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse) - - def delete_volume(self, volume): - """Destroy a zvol on appliance. - - :param volume: volume reference - """ - try: - self.nms.zvol.destroy(self._get_zvol_name(volume['name']), '') - except nexenta.NexentaException as exc: - if "zvol has children" in exc.args[1]: - raise exception.VolumeIsBusy - else: - raise - - def create_snapshot(self, snapshot): - """Create snapshot of existing zvol on appliance. - - :param snapshot: shapshot reference - """ - self.nms.zvol.create_snapshot( - self._get_zvol_name(snapshot['volume_name']), - snapshot['name'], '') - - def create_volume_from_snapshot(self, volume, snapshot): - """Create new volume from other's snapshot on appliance. - - :param volume: reference of volume to be created - :param snapshot: reference of source snapshot - """ - self.nms.zvol.clone( - '%s@%s' % (self._get_zvol_name(snapshot['volume_name']), - snapshot['name']), - self._get_zvol_name(volume['name'])) - - def delete_snapshot(self, snapshot): - """Delete volume's snapshot on appliance. - - :param snapshot: shapshot reference - """ - try: - self.nms.snapshot.destroy( - '%s@%s' % (self._get_zvol_name(snapshot['volume_name']), - snapshot['name']), - '') - except nexenta.NexentaException as exc: - if "snapshot has dependent clones" in exc.args[1]: - raise exception.SnapshotIsBusy - else: - raise - - def local_path(self, volume): - """Return local path to existing local volume. - - We never have local volumes, so it raises NotImplementedError. - - :raise: :py:exc:`NotImplementedError` - """ - LOG.error(_("Call to local_path should not happen." - " Verify that use_local_volumes flag is turned off.")) - raise NotImplementedError - - def _do_export(self, _ctx, volume, ensure=False): - """Do all steps to get zvol exported as LUN 0 at separate target. - - :param volume: reference of volume to be exported - :param ensure: if True, ignore errors caused by already existing - resources - :return: iscsiadm-formatted provider location string - """ - zvol_name = self._get_zvol_name(volume['name']) - target_name = self._get_target_name(volume['name']) - target_group_name = self._get_target_group_name(volume['name']) - - try: - self.nms.iscsitarget.create_target({'target_name': target_name}) - except nexenta.NexentaException as exc: - if not ensure or 'already configured' not in exc.args[1]: - raise - else: - LOG.info(_('Ignored target creation error "%s"' - ' while ensuring export'), exc) - try: - self.nms.stmf.create_targetgroup(target_group_name) - except nexenta.NexentaException as exc: - if not ensure or 'already exists' not in exc.args[1]: - raise - else: - LOG.info(_('Ignored target group creation error "%s"' - ' while ensuring export'), exc) - try: - self.nms.stmf.add_targetgroup_member(target_group_name, - target_name) - except nexenta.NexentaException as exc: - if not ensure or 'already exists' not in exc.args[1]: - raise - else: - LOG.info(_('Ignored target group member addition error "%s"' - ' while ensuring export'), exc) - try: - self.nms.scsidisk.create_lu(zvol_name, {}) - except nexenta.NexentaException as exc: - if not ensure or 'in use' not in exc.args[1]: - raise - else: - LOG.info(_('Ignored LU creation error "%s"' - ' while ensuring export'), exc) - try: - self.nms.scsidisk.add_lun_mapping_entry(zvol_name, { - 'target_group': target_group_name, - 'lun': '0'}) - except nexenta.NexentaException as exc: - if not ensure or 'view entry exists' not in exc.args[1]: - raise - else: - LOG.info(_('Ignored LUN mapping entry addition error "%s"' - ' while ensuring export'), exc) - return '%s:%s,1 %s' % (FLAGS.nexenta_host, - FLAGS.nexenta_iscsi_target_portal_port, - target_name) - - def create_export(self, _ctx, volume): - """Create new export for zvol. - - :param volume: reference of volume to be exported - :return: iscsiadm-formatted provider location string - """ - loc = self._do_export(_ctx, volume, ensure=False) - return {'provider_location': loc} - - def ensure_export(self, _ctx, volume): - """Recreate parts of export if necessary. - - :param volume: reference of volume to be exported - """ - self._do_export(_ctx, volume, ensure=True) - - def remove_export(self, _ctx, volume): - """Destroy all resources created to export zvol. - - :param volume: reference of volume to be unexported - """ - zvol_name = self._get_zvol_name(volume['name']) - target_name = self._get_target_name(volume['name']) - target_group_name = self._get_target_group_name(volume['name']) - self.nms.scsidisk.delete_lu(zvol_name) - - try: - self.nms.stmf.destroy_targetgroup(target_group_name) - except nexenta.NexentaException as exc: - # We assume that target group is already gone - LOG.warn(_('Got error trying to destroy target group' - ' %(target_group)s, assuming it is already gone: %(exc)s'), - {'target_group': target_group_name, 'exc': exc}) - try: - self.nms.iscsitarget.delete_target(target_name) - except nexenta.NexentaException as exc: - # We assume that target is gone as well - LOG.warn(_('Got error trying to delete target %(target)s,' - ' assuming it is already gone: %(exc)s'), - {'target': target_name, 'exc': exc}) diff --git a/cinder/volume/nfs.py b/cinder/volume/nfs.py deleted file mode 100644 index 2d83ba6eed..0000000000 --- a/cinder/volume/nfs.py +++ /dev/null @@ -1,290 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 NetApp, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import errno -import hashlib - -from cinder import flags -from cinder.openstack.common import cfg -from cinder.openstack.common import log as logging -from cinder.volume import driver -from cinder import exception - -LOG = logging.getLogger("cinder.volume.driver") - -volume_opts = [ - cfg.StrOpt('nfs_shares_config', - default=None, - help='File with the list of available nfs shares'), - cfg.StrOpt('nfs_mount_point_base', - default='$state_path/mnt', - help='Base dir where nfs expected to be mounted'), - cfg.StrOpt('nfs_disk_util', - default='df', - help='Use du or df for free space calculation'), - cfg.BoolOpt('nfs_sparsed_volumes', - default=True, - help=('Create volumes as sparsed files which take no space.' - 'If set to False volume is created as regular file.' - 'In such case volume creation takes a lot of time.')) -] - -FLAGS = flags.FLAGS -FLAGS.register_opts(volume_opts) - - -class NfsDriver(driver.VolumeDriver): - """NFS based cinder driver. Creates file on NFS share for using it - as block device on hypervisor.""" - - def do_setup(self, context): - """Any initialization the volume driver does while starting""" - super(NfsDriver, self).do_setup(context) - - config = FLAGS.nfs_shares_config - if not config: - LOG.warn(_("There's no NFS config file configured ")) - if not config or not os.path.exists(config): - msg = _("NFS config file doesn't exist") - LOG.warn(msg) - raise exception.NfsException(msg) - - try: - self._execute('mount.nfs', check_exit_code=False) - except OSError as exc: - if exc.errno == errno.ENOENT: - raise exception.NfsException('mount.nfs is not installed') - else: - raise - - def check_for_setup_error(self): - """Just to override parent behavior""" - pass - - def create_volume(self, volume): - """Creates a volume""" - - self._ensure_shares_mounted() - - volume['provider_location'] = self._find_share(volume['size']) - - LOG.info(_('casted to %s') % volume['provider_location']) - - self._do_create_volume(volume) - - return {'provider_location': volume['provider_location']} - - def delete_volume(self, volume): - """Deletes a logical volume.""" - - if not volume['provider_location']: - LOG.warn(_('Volume %s does not have provider_location specified, ' - 'skipping'), volume['name']) - return - - self._ensure_share_mounted(volume['provider_location']) - - mounted_path = self.local_path(volume) - - if not self._path_exists(mounted_path): - volume = volume['name'] - - LOG.warn(_('Trying to delete non-existing volume %(volume)s at ' - 'path %(mounted_path)s') % locals()) - return - - self._execute('rm', '-f', mounted_path, run_as_root=True) - - def ensure_export(self, ctx, volume): - """Synchronously recreates an export for a logical volume.""" - self._ensure_share_mounted(volume['provider_location']) - - def create_export(self, ctx, volume): - """Exports the volume. Can optionally return a Dictionary of changes - to the volume object to be persisted.""" - pass - - def remove_export(self, ctx, volume): - """Removes an export for a logical volume.""" - pass - - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - data = {'export': volume['provider_location'], - 'name': volume['name']} - return { - 'driver_volume_type': 'nfs', - 'data': data - } - - def terminate_connection(self, volume, connector): - """Disallow connection from connector""" - pass - - def local_path(self, volume): - """Get volume path (mounted locally fs path) for given volume - :param volume: volume reference - """ - nfs_share = volume['provider_location'] - return os.path.join(self._get_mount_point_for_share(nfs_share), - volume['name']) - - def _create_sparsed_file(self, path, size): - """Creates file with 0 disk usage""" - self._execute('truncate', '-s', self._sizestr(size), - path, run_as_root=True) - - def _create_regular_file(self, path, size): - """Creates regular file of given size. Takes a lot of time for large - files""" - KB = 1024 - MB = KB * 1024 - GB = MB * 1024 - - block_size_mb = 1 - block_count = size * GB / (block_size_mb * MB) - - self._execute('dd', 'if=/dev/zero', 'of=%s' % path, - 'bs=%dM' % block_size_mb, - 'count=%d' % block_count, - run_as_root=True) - - def _set_rw_permissions_for_all(self, path): - """Sets 666 permissions for the path""" - self._execute('chmod', 'ugo+rw', path, run_as_root=True) - - def _do_create_volume(self, volume): - """Create a volume on given nfs_share - :param volume: volume reference - """ - volume_path = self.local_path(volume) - volume_size = volume['size'] - - if FLAGS.nfs_sparsed_volumes: - self._create_sparsed_file(volume_path, volume_size) - else: - self._create_regular_file(volume_path, volume_size) - - self._set_rw_permissions_for_all(volume_path) - - def _ensure_shares_mounted(self): - """Look for NFS shares in the flags and tries to mount them locally""" - self._mounted_shares = [] - - for share in self._load_shares_config(): - try: - self._ensure_share_mounted(share) - self._mounted_shares.append(share) - except Exception, exc: - LOG.warning('Exception during mounting %s' % (exc,)) - - LOG.debug('Available shares %s' % str(self._mounted_shares)) - - def _load_shares_config(self): - return [share.strip() for share in open(FLAGS.nfs_shares_config) - if share and not share.startswith('#')] - - def _ensure_share_mounted(self, nfs_share): - """Mount NFS share - :param nfs_share: - """ - mount_path = self._get_mount_point_for_share(nfs_share) - self._mount_nfs(nfs_share, mount_path, ensure=True) - - def _find_share(self, volume_size_for): - """Choose NFS share among available ones for given volume size. Current - implementation looks for greatest capacity - :param volume_size_for: int size in Gb - """ - - if not self._mounted_shares: - raise exception.NfsNoSharesMounted() - - greatest_size = 0 - greatest_share = None - - for nfs_share in self._mounted_shares: - capacity = self._get_available_capacity(nfs_share) - if capacity > greatest_size: - greatest_share = nfs_share - greatest_size = capacity - - if volume_size_for * 1024 * 1024 * 1024 > greatest_size: - raise exception.NfsNoSuitableShareFound( - volume_size=volume_size_for) - return greatest_share - - def _get_mount_point_for_share(self, nfs_share): - """ - :param nfs_share: example 172.18.194.100:/var/nfs - """ - return os.path.join(FLAGS.nfs_mount_point_base, - self._get_hash_str(nfs_share)) - - def _get_available_capacity(self, nfs_share): - """Calculate available space on the NFS share - :param nfs_share: example 172.18.194.100:/var/nfs - """ - mount_point = self._get_mount_point_for_share(nfs_share) - - out, _ = self._execute('df', '-P', '-B', '1', mount_point, - run_as_root=True) - out = out.splitlines()[1] - - available = 0 - - if FLAGS.nfs_disk_util == 'df': - available = int(out.split()[3]) - else: - size = int(out.split()[1]) - out, _ = self._execute('du', '-sb', '--apparent-size', - '--exclude', '*snapshot*', mount_point, - run_as_root=True) - used = int(out.split()[0]) - available = size - used - - return available - - def _mount_nfs(self, nfs_share, mount_path, ensure=False): - """Mount NFS share to mount path""" - if not self._path_exists(mount_path): - self._execute('mkdir', '-p', mount_path) - - try: - self._execute('mount', '-t', 'nfs', nfs_share, mount_path, - run_as_root=True) - except exception.ProcessExecutionError as exc: - if ensure and 'already mounted' in exc.stderr: - LOG.warn(_("%s is already mounted"), nfs_share) - else: - raise - - def _path_exists(self, path): - """Check given path """ - try: - self._execute('stat', path, run_as_root=True) - return True - except exception.ProcessExecutionError as exc: - if 'No such file or directory' in exc.stderr: - return False - else: - raise - - def _get_hash_str(self, base_str): - """returns string that represents hash of base_str (in a hex format)""" - return hashlib.md5(base_str).hexdigest() diff --git a/cinder/volume/qos_specs.py b/cinder/volume/qos_specs.py new file mode 100644 index 0000000000..f905f748b8 --- /dev/null +++ b/cinder/volume/qos_specs.py @@ -0,0 +1,287 @@ +# Copyright (c) 2013 eBay Inc. +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The QoS Specs Implementation""" + + +from oslo.config import cfg + +from cinder import context +from cinder import db +from cinder import exception +from cinder.openstack.common.db import exception as db_exc +from cinder.openstack.common import log as logging +from cinder.volume import volume_types + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + +CONTROL_LOCATION = ['front-end', 'back-end', 'both'] + + +def _verify_prepare_qos_specs(specs, create=True): + """Check if 'consumer' value in qos specs is valid. + + Verify 'consumer' value in qos_specs is valid, raise + exception if not. Assign default value to 'consumer', which + is 'back-end' if input is empty. + + :params create a flag indicate if specs being verified is + for create. If it's false, that means specs is for update, + so that there's no need to add 'consumer' if that wasn't in + specs. + """ + + # Check control location, if it's missing in input, assign default + # control location: 'front-end' + if not specs: + specs = {} + # remove 'name' since we will handle that elsewhere. + if specs.get('name', None): + del specs['name'] + try: + if specs['consumer'] not in CONTROL_LOCATION: + msg = _("Valid consumer of QoS specs are: %s") % CONTROL_LOCATION + raise exception.InvalidQoSSpecs(reason=msg) + except KeyError: + # Default consumer is back-end, i.e Cinder volume service + if create: + specs['consumer'] = 'back-end' + + return specs + + +def create(context, name, specs=None): + """Creates qos_specs. + + :param specs dictionary that contains specifications for QoS + e.g. {'consumer': 'front-end', + 'total_iops_sec': 1000, + 'total_bytes_sec': 1024000} + """ + _verify_prepare_qos_specs(specs) + + values = dict(name=name, qos_specs=specs) + + LOG.debug("Dict for qos_specs: %s" % values) + + try: + qos_specs_ref = db.qos_specs_create(context, values) + except db_exc.DBError as e: + LOG.exception(_('DB error: %s') % e) + raise exception.QoSSpecsCreateFailed(name=name, + qos_specs=specs) + return qos_specs_ref + + +def update(context, qos_specs_id, specs): + """Update qos specs. + + :param specs dictionary that contains key/value pairs for updating + existing specs. + e.g. {'consumer': 'front-end', + 'total_iops_sec': 500, + 'total_bytes_sec': 512000,} + """ + # need to verify specs in case 'consumer' is passed + _verify_prepare_qos_specs(specs, create=False) + LOG.debug('qos_specs.update(): specs %s' % specs) + try: + res = db.qos_specs_update(context, qos_specs_id, specs) + except db_exc.DBError as e: + LOG.exception(_('DB error: %s') % e) + raise exception.QoSSpecsUpdateFailed(specs_id=qos_specs_id, + qos_specs=specs) + + return res + + +def delete(context, qos_specs_id, force=False): + """Marks qos specs as deleted. + + 'force' parameter is a flag to determine whether should destroy + should continue when there were entities associated with the qos specs. + force=True indicates caller would like to mark qos specs as deleted + even if there was entities associate with target qos specs. + Trying to delete a qos specs still associated with entities will + cause QoSSpecsInUse exception if force=False (default). + """ + if qos_specs_id is None: + msg = _("id cannot be None") + raise exception.InvalidQoSSpecs(reason=msg) + + # check if there is any entity associated with this qos specs + res = db.qos_specs_associations_get(context, qos_specs_id) + if res and not force: + raise exception.QoSSpecsInUse(specs_id=qos_specs_id) + elif res and force: + # remove all association + db.qos_specs_disassociate_all(context, qos_specs_id) + + db.qos_specs_delete(context, qos_specs_id) + + +def delete_keys(context, qos_specs_id, keys): + """Marks specified key of target qos specs as deleted.""" + if qos_specs_id is None: + msg = _("id cannot be None") + raise exception.InvalidQoSSpecs(reason=msg) + + # make sure qos_specs_id is valid + get_qos_specs(context, qos_specs_id) + for key in keys: + db.qos_specs_item_delete(context, qos_specs_id, key) + + +def get_associations(context, specs_id): + """Get all associations of given qos specs.""" + try: + # query returns a list of volume types associated with qos specs + associates = db.qos_specs_associations_get(context, specs_id) + except db_exc.DBError as e: + LOG.exception(_('DB error: %s') % e) + msg = _('Failed to get all associations of ' + 'qos specs %s') % specs_id + LOG.warn(msg) + raise exception.CinderException(message=msg) + + result = [] + for vol_type in associates: + member = dict(association_type='volume_type') + member.update(dict(name=vol_type['name'])) + member.update(dict(id=vol_type['id'])) + result.append(member) + + return result + + +def associate_qos_with_type(context, specs_id, type_id): + """Associate qos_specs with volume type. + + Associate target qos specs with specific volume type. Would raise + following exceptions: + VolumeTypeNotFound - if volume type doesn't exist; + QoSSpecsNotFound - if qos specs doesn't exist; + InvalidVolumeType - if volume type is already associated with + qos specs other than given one. + QoSSpecsAssociateFailed - if there was general DB error + :param specs_id: qos specs ID to associate with + :param type_id: volume type ID to associate with + """ + try: + get_qos_specs(context, specs_id) + res = volume_types.get_volume_type_qos_specs(type_id) + if res.get('qos_specs', None): + if res['qos_specs'].get('id') != specs_id: + msg = (_("Type %(type_id)s is already associated with another " + "qos specs: %(qos_specs_id)s") % + {'type_id': type_id, + 'qos_specs_id': res['qos_specs']['id']}) + raise exception.InvalidVolumeType(reason=msg) + else: + db.qos_specs_associate(context, specs_id, type_id) + except db_exc.DBError as e: + LOG.exception(_('DB error: %s') % e) + LOG.warn(_('Failed to associate qos specs ' + '%(id)s with type: %(vol_type_id)s') % + dict(id=specs_id, vol_type_id=type_id)) + raise exception.QoSSpecsAssociateFailed(specs_id=specs_id, + type_id=type_id) + + +def disassociate_qos_specs(context, specs_id, type_id): + """Disassociate qos_specs from volume type.""" + try: + get_qos_specs(context, specs_id) + db.qos_specs_disassociate(context, specs_id, type_id) + except db_exc.DBError as e: + LOG.exception(_('DB error: %s') % e) + LOG.warn(_('Failed to disassociate qos specs ' + '%(id)s with type: %(vol_type_id)s') % + dict(id=specs_id, vol_type_id=type_id)) + raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, + type_id=type_id) + + +def disassociate_all(context, specs_id): + """Disassociate qos_specs from all entities.""" + try: + get_qos_specs(context, specs_id) + db.qos_specs_disassociate_all(context, specs_id) + except db_exc.DBError as e: + LOG.exception(_('DB error: %s') % e) + LOG.warn(_('Failed to disassociate qos specs %s.') % specs_id) + raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, + type_id=None) + + +def get_all_specs(context, inactive=False, search_opts={}): + """Get all non-deleted qos specs. + + Pass inactive=True as argument and deleted volume types would return + as well. + """ + qos_specs = db.qos_specs_get_all(context, inactive) + + if search_opts: + LOG.debug(_("Searching by: %s") % str(search_opts)) + + def _check_specs_match(qos_specs, searchdict): + for k, v in searchdict.iteritems(): + if ((k not in qos_specs['specs'].keys() or + qos_specs['specs'][k] != v)): + return False + return True + + # search_option to filter_name mapping. + filter_mapping = {'qos_specs': _check_specs_match} + + result = {} + for name, args in qos_specs.iteritems(): + # go over all filters in the list + for opt, values in search_opts.iteritems(): + try: + filter_func = filter_mapping[opt] + except KeyError: + # no such filter - ignore it, go to next filter + continue + else: + if filter_func(args, values): + result[name] = args + break + qos_specs = result + return qos_specs + + +def get_qos_specs(ctxt, id): + """Retrieves single qos specs by id.""" + if id is None: + msg = _("id cannot be None") + raise exception.InvalidQoSSpecs(reason=msg) + + if ctxt is None: + ctxt = context.get_admin_context() + + return db.qos_specs_get(ctxt, id) + + +def get_qos_specs_by_name(context, name): + """Retrieves single qos specs by name.""" + if name is None: + msg = _("name cannot be None") + raise exception.InvalidQoSSpecs(reason=msg) + + return db.qos_specs_get_by_name(context, name) diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py new file mode 100644 index 0000000000..aaca9a1fbf --- /dev/null +++ b/cinder/volume/rpcapi.py @@ -0,0 +1,197 @@ +# Copyright 2012, Intel, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Client side of the volume RPC API. +""" + +from oslo.config import cfg + +from cinder.openstack.common import jsonutils +from cinder.openstack.common import rpc +import cinder.openstack.common.rpc.proxy + + +CONF = cfg.CONF + + +class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy): + '''Client side of the volume rpc API. + + API version history: + + 1.0 - Initial version. + 1.1 - Adds clone volume option to create_volume. + 1.2 - Add publish_service_capabilities() method. + 1.3 - Pass all image metadata (not just ID) in copy_volume_to_image. + 1.4 - Add request_spec, filter_properties and + allow_reschedule arguments to create_volume(). + 1.5 - Add accept_transfer. + 1.6 - Add extend_volume. + 1.7 - Adds host_name parameter to attach_volume() + to allow attaching to host rather than instance. + 1.8 - Add migrate_volume, rename_volume. + 1.9 - Add new_user and new_project to accept_transfer. + 1.10 - Add migrate_volume_completion, remove rename_volume. + 1.11 - Adds mode parameter to attach_volume() + to support volume read-only attaching. + 1.12 - Adds retype. + ''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic=None): + super(VolumeAPI, self).__init__( + topic=topic or CONF.volume_topic, + default_version=self.BASE_RPC_API_VERSION) + + def create_volume(self, ctxt, volume, host, + request_spec, filter_properties, + allow_reschedule=True, + snapshot_id=None, image_id=None, + source_volid=None): + + request_spec_p = jsonutils.to_primitive(request_spec) + self.cast(ctxt, + self.make_msg('create_volume', + volume_id=volume['id'], + request_spec=request_spec_p, + filter_properties=filter_properties, + allow_reschedule=allow_reschedule, + snapshot_id=snapshot_id, + image_id=image_id, + source_volid=source_volid), + topic=rpc.queue_get_for(ctxt, + self.topic, + host), + version='1.4') + + def delete_volume(self, ctxt, volume): + self.cast(ctxt, + self.make_msg('delete_volume', + volume_id=volume['id']), + topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) + + def create_snapshot(self, ctxt, volume, snapshot): + self.cast(ctxt, self.make_msg('create_snapshot', + volume_id=volume['id'], + snapshot_id=snapshot['id']), + topic=rpc.queue_get_for(ctxt, self.topic, volume['host'])) + + def delete_snapshot(self, ctxt, snapshot, host): + self.cast(ctxt, self.make_msg('delete_snapshot', + snapshot_id=snapshot['id']), + topic=rpc.queue_get_for(ctxt, self.topic, host)) + + def attach_volume(self, ctxt, volume, instance_uuid, host_name, + mountpoint, mode): + return self.call(ctxt, self.make_msg('attach_volume', + volume_id=volume['id'], + instance_uuid=instance_uuid, + host_name=host_name, + mountpoint=mountpoint, + mode=mode), + topic=rpc.queue_get_for(ctxt, + self.topic, + volume['host']), + version='1.11') + + def detach_volume(self, ctxt, volume): + return self.call(ctxt, self.make_msg('detach_volume', + volume_id=volume['id']), + topic=rpc.queue_get_for(ctxt, + self.topic, + volume['host'])) + + def copy_volume_to_image(self, ctxt, volume, image_meta): + self.cast(ctxt, self.make_msg('copy_volume_to_image', + volume_id=volume['id'], + image_meta=image_meta), + topic=rpc.queue_get_for(ctxt, + self.topic, + volume['host']), + version='1.3') + + def initialize_connection(self, ctxt, volume, connector): + return self.call(ctxt, self.make_msg('initialize_connection', + volume_id=volume['id'], + connector=connector), + topic=rpc.queue_get_for(ctxt, + self.topic, + volume['host'])) + + def terminate_connection(self, ctxt, volume, connector, force=False): + return self.call(ctxt, self.make_msg('terminate_connection', + volume_id=volume['id'], + connector=connector, + force=force), + topic=rpc.queue_get_for(ctxt, + self.topic, + volume['host'])) + + def publish_service_capabilities(self, ctxt): + self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'), + version='1.2') + + def accept_transfer(self, ctxt, volume, new_user, new_project): + self.cast(ctxt, + self.make_msg('accept_transfer', + volume_id=volume['id'], + new_user=new_user, + new_project=new_project), + topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), + version='1.9') + + def extend_volume(self, ctxt, volume, new_size): + self.cast(ctxt, + self.make_msg('extend_volume', + volume_id=volume['id'], + new_size=new_size), + topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), + version='1.6') + + def migrate_volume(self, ctxt, volume, dest_host, force_host_copy): + host_p = {'host': dest_host.host, + 'capabilities': dest_host.capabilities} + self.cast(ctxt, + self.make_msg('migrate_volume', + volume_id=volume['id'], + host=host_p, + force_host_copy=force_host_copy), + topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), + version='1.8') + + def migrate_volume_completion(self, ctxt, volume, new_volume, error): + return self.call(ctxt, + self.make_msg('migrate_volume_completion', + volume_id=volume['id'], + new_volume_id=new_volume['id'], + error=error), + topic=rpc.queue_get_for(ctxt, self.topic, + volume['host']), + version='1.10') + + def retype(self, ctxt, volume, new_type_id, dest_host, + migration_policy='never', reservations=None): + host_p = {'host': dest_host.host, + 'capabilities': dest_host.capabilities} + self.cast(ctxt, + self.make_msg('retype', + volume_id=volume['id'], + new_type_id=new_type_id, + host=host_p, + migration_policy=migration_policy, + reservations=reservations), + topic=rpc.queue_get_for(ctxt, self.topic, volume['host']), + version='1.12') diff --git a/cinder/volume/san.py b/cinder/volume/san.py deleted file mode 100644 index 7c06d859fb..0000000000 --- a/cinder/volume/san.py +++ /dev/null @@ -1,652 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Drivers for san-stored volumes. - -The unique thing about a SAN is that we don't expect that we can run the volume -controller on the SAN hardware. We expect to access it over SSH or some API. -""" - -import base64 -import httplib -import os -import paramiko -import random -import socket -import string -import uuid - -from lxml import etree - -from cinder import exception -from cinder import flags -from cinder.openstack.common import log as logging -from cinder.openstack.common import cfg -from cinder.openstack.common import jsonutils -from cinder import utils -import cinder.volume.driver - - -LOG = logging.getLogger(__name__) - -san_opts = [ - cfg.BoolOpt('san_thin_provision', - default=True, - help='Use thin provisioning for SAN volumes?'), - cfg.StrOpt('san_ip', - default='', - help='IP address of SAN controller'), - cfg.StrOpt('san_login', - default='admin', - help='Username for SAN controller'), - cfg.StrOpt('san_password', - default='', - help='Password for SAN controller'), - cfg.StrOpt('san_private_key', - default='', - help='Filename of private key to use for SSH authentication'), - cfg.StrOpt('san_clustername', - default='', - help='Cluster name to use for creating volumes'), - cfg.IntOpt('san_ssh_port', - default=22, - help='SSH port to use with SAN'), - cfg.BoolOpt('san_is_local', - default=False, - help='Execute commands locally instead of over SSH; ' - 'use if the volume service is running on the SAN device'), - cfg.StrOpt('san_zfs_volume_base', - default='rpool/', - help='The ZFS path under which to create zvols for volumes.'), -] - -FLAGS = flags.FLAGS -FLAGS.register_opts(san_opts) - - -class SanISCSIDriver(cinder.volume.driver.ISCSIDriver): - """Base class for SAN-style storage volumes - - A SAN-style storage value is 'different' because the volume controller - probably won't run on it, so we need to access is over SSH or another - remote protocol. - """ - - def __init__(self, *args, **kwargs): - super(SanISCSIDriver, self).__init__(*args, **kwargs) - self.run_local = FLAGS.san_is_local - - def _build_iscsi_target_name(self, volume): - return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) - - def _connect_to_ssh(self): - ssh = paramiko.SSHClient() - #TODO(justinsb): We need a better SSH key policy - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - if FLAGS.san_password: - ssh.connect(FLAGS.san_ip, - port=FLAGS.san_ssh_port, - username=FLAGS.san_login, - password=FLAGS.san_password) - elif FLAGS.san_private_key: - privatekeyfile = os.path.expanduser(FLAGS.san_private_key) - # It sucks that paramiko doesn't support DSA keys - privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile) - ssh.connect(FLAGS.san_ip, - port=FLAGS.san_ssh_port, - username=FLAGS.san_login, - pkey=privatekey) - else: - msg = _("Specify san_password or san_private_key") - raise exception.InvalidInput(reason=msg) - return ssh - - def _execute(self, *cmd, **kwargs): - if self.run_local: - return utils.execute(*cmd, **kwargs) - else: - check_exit_code = kwargs.pop('check_exit_code', None) - command = ' '.join(cmd) - return self._run_ssh(command, check_exit_code) - - def _run_ssh(self, command, check_exit_code=True): - #TODO(justinsb): SSH connection caching (?) - ssh = self._connect_to_ssh() - - #TODO(justinsb): Reintroduce the retry hack - ret = utils.ssh_execute(ssh, command, check_exit_code=check_exit_code) - - ssh.close() - - return ret - - def ensure_export(self, context, volume): - """Synchronously recreates an export for a logical volume.""" - pass - - def create_export(self, context, volume): - """Exports the volume.""" - pass - - def remove_export(self, context, volume): - """Removes an export for a logical volume.""" - pass - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met.""" - if not self.run_local: - if not (FLAGS.san_password or FLAGS.san_private_key): - raise exception.InvalidInput( - reason=_('Specify san_password or san_private_key')) - - # The san_ip must always be set, because we use it for the target - if not (FLAGS.san_ip): - raise exception.InvalidInput(reason=_("san_ip must be set")) - - -def _collect_lines(data): - """Split lines from data into an array, trimming them """ - matches = [] - for line in data.splitlines(): - match = line.strip() - matches.append(match) - - return matches - - -def _get_prefixed_values(data, prefix): - """Collect lines which start with prefix; with trimming""" - matches = [] - for line in data.splitlines(): - line = line.strip() - if line.startswith(prefix): - match = line[len(prefix):] - match = match.strip() - matches.append(match) - - return matches - - -class SolarisISCSIDriver(SanISCSIDriver): - """Executes commands relating to Solaris-hosted ISCSI volumes. - - Basic setup for a Solaris iSCSI server: - - pkg install storage-server SUNWiscsit - - svcadm enable stmf - - svcadm enable -r svc:/network/iscsi/target:default - - pfexec itadm create-tpg e1000g0 ${MYIP} - - pfexec itadm create-target -t e1000g0 - - - Then grant the user that will be logging on lots of permissions. - I'm not sure exactly which though: - - zfs allow justinsb create,mount,destroy rpool - - usermod -P'File System Management' justinsb - - usermod -P'Primary Administrator' justinsb - - Also make sure you can login using san_login & san_password/san_private_key - """ - def __init__(self, *cmd, **kwargs): - super(SolarisISCSIDriver, self).__init__(*cmd, - execute=self._execute, - **kwargs) - - def _execute(self, *cmd, **kwargs): - new_cmd = ['pfexec'] - new_cmd.extend(cmd) - return super(SolarisISCSIDriver, self)._execute(*new_cmd, - **kwargs) - - def _view_exists(self, luid): - (out, _err) = self._execute('/usr/sbin/stmfadm', - 'list-view', '-l', luid, - check_exit_code=False) - if "no views found" in out: - return False - - if "View Entry:" in out: - return True - msg = _("Cannot parse list-view output: %s") % out - raise exception.VolumeBackendAPIException(data=msg) - - def _get_target_groups(self): - """Gets list of target groups from host.""" - (out, _err) = self._execute('/usr/sbin/stmfadm', 'list-tg') - matches = _get_prefixed_values(out, 'Target group: ') - LOG.debug("target_groups=%s" % matches) - return matches - - def _target_group_exists(self, target_group_name): - return target_group_name not in self._get_target_groups() - - def _get_target_group_members(self, target_group_name): - (out, _err) = self._execute('/usr/sbin/stmfadm', - 'list-tg', '-v', target_group_name) - matches = _get_prefixed_values(out, 'Member: ') - LOG.debug("members of %s=%s" % (target_group_name, matches)) - return matches - - def _is_target_group_member(self, target_group_name, iscsi_target_name): - return iscsi_target_name in ( - self._get_target_group_members(target_group_name)) - - def _get_iscsi_targets(self): - (out, _err) = self._execute('/usr/sbin/itadm', 'list-target') - matches = _collect_lines(out) - - # Skip header - if len(matches) != 0: - assert 'TARGET NAME' in matches[0] - matches = matches[1:] - - targets = [] - for line in matches: - items = line.split() - assert len(items) == 3 - targets.append(items[0]) - - LOG.debug("_get_iscsi_targets=%s" % (targets)) - return targets - - def _iscsi_target_exists(self, iscsi_target_name): - return iscsi_target_name in self._get_iscsi_targets() - - def _build_zfs_poolname(self, volume): - zfs_poolname = '%s%s' % (FLAGS.san_zfs_volume_base, volume['name']) - return zfs_poolname - - def create_volume(self, volume): - """Creates a volume.""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] - - zfs_poolname = self._build_zfs_poolname(volume) - - # Create a zfs volume - cmd = ['/usr/sbin/zfs', 'create'] - if FLAGS.san_thin_provision: - cmd.append('-s') - cmd.extend(['-V', sizestr]) - cmd.append(zfs_poolname) - self._execute(*cmd) - - def _get_luid(self, volume): - zfs_poolname = self._build_zfs_poolname(volume) - zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname - - (out, _err) = self._execute('/usr/sbin/sbdadm', 'list-lu') - - lines = _collect_lines(out) - - # Strip headers - if len(lines) >= 1: - if lines[0] == '': - lines = lines[1:] - - if len(lines) >= 4: - assert 'Found' in lines[0] - assert '' == lines[1] - assert 'GUID' in lines[2] - assert '------------------' in lines[3] - - lines = lines[4:] - - for line in lines: - items = line.split() - assert len(items) == 3 - if items[2] == zvol_name: - luid = items[0].strip() - return luid - - msg = _('LUID not found for %(zfs_poolname)s. ' - 'Output=%(out)s') % locals() - raise exception.VolumeBackendAPIException(data=msg) - - def _is_lu_created(self, volume): - luid = self._get_luid(volume) - return luid - - def delete_volume(self, volume): - """Deletes a volume.""" - zfs_poolname = self._build_zfs_poolname(volume) - self._execute('/usr/sbin/zfs', 'destroy', zfs_poolname) - - def local_path(self, volume): - # TODO(justinsb): Is this needed here? - escaped_group = FLAGS.volume_group.replace('-', '--') - escaped_name = volume['name'].replace('-', '--') - return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) - - def ensure_export(self, context, volume): - """Synchronously recreates an export for a logical volume.""" - #TODO(justinsb): On bootup, this is called for every volume. - # It then runs ~5 SSH commands for each volume, - # most of which fetch the same info each time - # This makes initial start stupid-slow - return self._do_export(volume, force_create=False) - - def create_export(self, context, volume): - return self._do_export(volume, force_create=True) - - def _do_export(self, volume, force_create): - # Create a Logical Unit (LU) backed by the zfs volume - zfs_poolname = self._build_zfs_poolname(volume) - - if force_create or not self._is_lu_created(volume): - zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname - self._execute('/usr/sbin/sbdadm', 'create-lu', zvol_name) - - luid = self._get_luid(volume) - iscsi_name = self._build_iscsi_target_name(volume) - target_group_name = 'tg-%s' % volume['name'] - - # Create a iSCSI target, mapped to just this volume - if force_create or not self._target_group_exists(target_group_name): - self._execute('/usr/sbin/stmfadm', 'create-tg', target_group_name) - - # Yes, we add the initiatior before we create it! - # Otherwise, it complains that the target is already active - if force_create or not self._is_target_group_member(target_group_name, - iscsi_name): - self._execute('/usr/sbin/stmfadm', - 'add-tg-member', '-g', target_group_name, iscsi_name) - - if force_create or not self._iscsi_target_exists(iscsi_name): - self._execute('/usr/sbin/itadm', 'create-target', '-n', iscsi_name) - - if force_create or not self._view_exists(luid): - self._execute('/usr/sbin/stmfadm', - 'add-view', '-t', target_group_name, luid) - - #TODO(justinsb): Is this always 1? Does it matter? - iscsi_portal_interface = '1' - iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface - - db_update = {} - db_update['provider_location'] = ("%s %s" % - (iscsi_portal, - iscsi_name)) - - return db_update - - def remove_export(self, context, volume): - """Removes an export for a logical volume.""" - - # This is the reverse of _do_export - luid = self._get_luid(volume) - iscsi_name = self._build_iscsi_target_name(volume) - target_group_name = 'tg-%s' % volume['name'] - - if self._view_exists(luid): - self._execute('/usr/sbin/stmfadm', 'remove-view', '-l', luid, '-a') - - if self._iscsi_target_exists(iscsi_name): - self._execute('/usr/sbin/stmfadm', 'offline-target', iscsi_name) - self._execute('/usr/sbin/itadm', 'delete-target', iscsi_name) - - # We don't delete the tg-member; we delete the whole tg! - - if self._target_group_exists(target_group_name): - self._execute('/usr/sbin/stmfadm', 'delete-tg', target_group_name) - - if self._is_lu_created(volume): - self._execute('/usr/sbin/sbdadm', 'delete-lu', luid) - - -class HpSanISCSIDriver(SanISCSIDriver): - """Executes commands relating to HP/Lefthand SAN ISCSI volumes. - - We use the CLIQ interface, over SSH. - - Rough overview of CLIQ commands used: - - :createVolume: (creates the volume) - - :getVolumeInfo: (to discover the IQN etc) - - :getClusterInfo: (to discover the iSCSI target IP address) - - :assignVolumeChap: (exports it with CHAP security) - - The 'trick' here is that the HP SAN enforces security by default, so - normally a volume mount would need both to configure the SAN in the volume - layer and do the mount on the compute layer. Multi-layer operations are - not catered for at the moment in the cinder architecture, so instead we - share the volume using CHAP at volume creation time. Then the mount need - only use those CHAP credentials, so can take place exclusively in the - compute layer. - """ - - def _cliq_run(self, verb, cliq_args): - """Runs a CLIQ command over SSH, without doing any result parsing""" - cliq_arg_strings = [] - for k, v in cliq_args.items(): - cliq_arg_strings.append(" %s=%s" % (k, v)) - cmd = verb + ''.join(cliq_arg_strings) - - return self._run_ssh(cmd) - - def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True): - """Runs a CLIQ command over SSH, parsing and checking the output""" - cliq_args['output'] = 'XML' - (out, _err) = self._cliq_run(verb, cliq_args) - - LOG.debug(_("CLIQ command returned %s"), out) - - result_xml = etree.fromstring(out) - if check_cliq_result: - response_node = result_xml.find("response") - if response_node is None: - msg = (_("Malformed response to CLIQ command " - "%(verb)s %(cliq_args)s. Result=%(out)s") % - locals()) - raise exception.VolumeBackendAPIException(data=msg) - - result_code = response_node.attrib.get("result") - - if result_code != "0": - msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. " - " Result=%(out)s") % - locals()) - raise exception.VolumeBackendAPIException(data=msg) - - return result_xml - - def _cliq_get_cluster_info(self, cluster_name): - """Queries for info about the cluster (including IP)""" - cliq_args = {} - cliq_args['clusterName'] = cluster_name - cliq_args['searchDepth'] = '1' - cliq_args['verbose'] = '0' - - result_xml = self._cliq_run_xml("getClusterInfo", cliq_args) - - return result_xml - - def _cliq_get_cluster_vip(self, cluster_name): - """Gets the IP on which a cluster shares iSCSI volumes""" - cluster_xml = self._cliq_get_cluster_info(cluster_name) - - vips = [] - for vip in cluster_xml.findall("response/cluster/vip"): - vips.append(vip.attrib.get('ipAddress')) - - if len(vips) == 1: - return vips[0] - - _xml = etree.tostring(cluster_xml) - msg = (_("Unexpected number of virtual ips for cluster " - " %(cluster_name)s. Result=%(_xml)s") % - locals()) - raise exception.VolumeBackendAPIException(data=msg) - - def _cliq_get_volume_info(self, volume_name): - """Gets the volume info, including IQN""" - cliq_args = {} - cliq_args['volumeName'] = volume_name - result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args) - - # Result looks like this: - # - # - # - # - # - # - # - # - - # Flatten the nodes into a dictionary; use prefixes to avoid collisions - volume_attributes = {} - - volume_node = result_xml.find("response/volume") - for k, v in volume_node.attrib.items(): - volume_attributes["volume." + k] = v - - status_node = volume_node.find("status") - if not status_node is None: - for k, v in status_node.attrib.items(): - volume_attributes["status." + k] = v - - # We only consider the first permission node - permission_node = volume_node.find("permission") - if not permission_node is None: - for k, v in status_node.attrib.items(): - volume_attributes["permission." + k] = v - - LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") % - locals()) - return volume_attributes - - def create_volume(self, volume): - """Creates a volume.""" - cliq_args = {} - cliq_args['clusterName'] = FLAGS.san_clustername - #TODO(justinsb): Should we default to inheriting thinProvision? - cliq_args['thinProvision'] = '1' if FLAGS.san_thin_provision else '0' - cliq_args['volumeName'] = volume['name'] - if int(volume['size']) == 0: - cliq_args['size'] = '100MB' - else: - cliq_args['size'] = '%sGB' % volume['size'] - - self._cliq_run_xml("createVolume", cliq_args) - - volume_info = self._cliq_get_volume_info(volume['name']) - cluster_name = volume_info['volume.clusterName'] - iscsi_iqn = volume_info['volume.iscsiIqn'] - - #TODO(justinsb): Is this always 1? Does it matter? - cluster_interface = '1' - - cluster_vip = self._cliq_get_cluster_vip(cluster_name) - iscsi_portal = cluster_vip + ":3260," + cluster_interface - - model_update = {} - - # NOTE(jdg): LH volumes always at lun 0 ? - model_update['provider_location'] = ("%s %s %s" % - (iscsi_portal, - iscsi_iqn, - 0)) - - return model_update - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - raise NotImplementedError() - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - raise NotImplementedError() - - def delete_volume(self, volume): - """Deletes a volume.""" - cliq_args = {} - cliq_args['volumeName'] = volume['name'] - cliq_args['prompt'] = 'false' # Don't confirm - - self._cliq_run_xml("deleteVolume", cliq_args) - - def local_path(self, volume): - # TODO(justinsb): Is this needed here? - msg = _("local_path not supported") - raise exception.VolumeBackendAPIException(data=msg) - - def initialize_connection(self, volume, connector): - """Assigns the volume to a server. - - Assign any created volume to a compute node/host so that it can be - used from that host. HP VSA requires a volume to be assigned - to a server. - - This driver returns a driver_volume_type of 'iscsi'. - The format of the driver data is defined in _get_iscsi_properties. - Example return value: - - { - 'driver_volume_type': 'iscsi' - 'data': { - 'target_discovered': True, - 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', - 'target_protal': '127.0.0.1:3260', - 'volume_id': 1, - } - } - - """ - cliq_args = {} - cliq_args['volumeName'] = volume['name'] - cliq_args['serverName'] = connector['host'] - self._cliq_run_xml("assignVolumeToServer", cliq_args) - - iscsi_properties = self._get_iscsi_properties(volume) - return { - 'driver_volume_type': 'iscsi', - 'data': iscsi_properties - } - - def terminate_connection(self, volume, connector): - """Unassign the volume from the host.""" - cliq_args = {} - cliq_args['volumeName'] = volume['name'] - cliq_args['serverName'] = connector['host'] - self._cliq_run_xml("unassignVolumeToServer", cliq_args) diff --git a/cinder/volume/solidfire.py b/cinder/volume/solidfire.py deleted file mode 100644 index 0749237549..0000000000 --- a/cinder/volume/solidfire.py +++ /dev/null @@ -1,423 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Drivers for san-stored volumes. - -The unique thing about a SAN is that we don't expect that we can run the volume -controller on the SAN hardware. We expect to access it over SSH or some API. -""" - -import base64 -import httplib -import json -import random -import socket -import string -import uuid - -from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg -from cinder.openstack.common import log as logging -from cinder.volume.san import SanISCSIDriver - - -LOG = logging.getLogger(__name__) - -sf_opts = [ - cfg.BoolOpt('sf_emulate_512', - default=True, - help='Set 512 byte emulation on volume creation; '), - - cfg.StrOpt('sf_mvip', - default='', - help='IP address of SolidFire MVIP'), - - cfg.StrOpt('sf_login', - default='admin', - help='Username for SF Cluster Admin'), - - cfg.StrOpt('sf_password', - default='', - help='Password for SF Cluster Admin'), - - cfg.BoolOpt('sf_allow_tenant_qos', - default=True, - help='Allow tenants to specify QOS on create'), ] - -FLAGS = flags.FLAGS -FLAGS.register_opts(sf_opts) - - -class SolidFire(SanISCSIDriver): - - sf_qos_dict = {'slow': {'minIOPS': 100, - 'maxIOPS': 200, - 'burstIOPS': 200}, - 'medium': {'minIOPS': 200, - 'maxIOPS': 400, - 'burstIOPS': 400}, - 'fast': {'minIOPS': 500, - 'maxIOPS': 1000, - 'burstIOPS': 1000}, - 'performant': {'minIOPS': 2000, - 'maxIOPS': 4000, - 'burstIOPS': 4000}, - 'off': None} - - def __init__(self, *args, **kwargs): - super(SolidFire, self).__init__(*args, **kwargs) - - def _issue_api_request(self, method_name, params): - """All API requests to SolidFire device go through this method - - Simple json-rpc web based API calls. - each call takes a set of paramaters (dict) - and returns results in a dict as well. - """ - - host = FLAGS.san_ip - # For now 443 is the only port our server accepts requests on - port = 443 - - # NOTE(john-griffith): Probably don't need this, but the idea is - # we provide a request_id so we can correlate - # responses with requests - request_id = int(uuid.uuid4()) # just generate a random number - - cluster_admin = FLAGS.san_login - cluster_password = FLAGS.san_password - - command = {'method': method_name, - 'id': request_id} - - if params is not None: - command['params'] = params - - payload = json.dumps(command, ensure_ascii=False) - payload.encode('utf-8') - # we use json-rpc, webserver needs to see json-rpc in header - header = {'Content-Type': 'application/json-rpc; charset=utf-8'} - - if cluster_password is not None: - # base64.encodestring includes a newline character - # in the result, make sure we strip it off - auth_key = base64.encodestring('%s:%s' % (cluster_admin, - cluster_password))[:-1] - header['Authorization'] = 'Basic %s' % auth_key - - LOG.debug(_("Payload for SolidFire API call: %s"), payload) - connection = httplib.HTTPSConnection(host, port) - connection.request('POST', '/json-rpc/1.0', payload, header) - response = connection.getresponse() - data = {} - - if response.status != 200: - connection.close() - raise exception.SolidFireAPIException(status=response.status) - - else: - data = response.read() - try: - data = json.loads(data) - - except (TypeError, ValueError), exc: - connection.close() - msg = _("Call to json.loads() raised an exception: %s") % exc - raise exception.SfJsonEncodeFailure(msg) - - connection.close() - - LOG.debug(_("Results of SolidFire API call: %s"), data) - return data - - def _get_volumes_by_sfaccount(self, account_id): - params = {'accountID': account_id} - data = self._issue_api_request('ListVolumesForAccount', params) - if 'result' in data: - return data['result']['volumes'] - - def _get_sfaccount_by_name(self, sf_account_name): - sfaccount = None - params = {'username': sf_account_name} - data = self._issue_api_request('GetAccountByName', params) - if 'result' in data and 'account' in data['result']: - LOG.debug(_('Found solidfire account: %s'), sf_account_name) - sfaccount = data['result']['account'] - return sfaccount - - def _create_sfaccount(self, cinder_project_id): - """Create account on SolidFire device if it doesn't already exist. - - We're first going to check if the account already exits, if it does - just return it. If not, then create it. - """ - - sf_account_name = socket.gethostname() + '-' + cinder_project_id - sfaccount = self._get_sfaccount_by_name(sf_account_name) - if sfaccount is None: - LOG.debug(_('solidfire account: %s does not exist, create it...'), - sf_account_name) - chap_secret = self._generate_random_string(12) - params = {'username': sf_account_name, - 'initiatorSecret': chap_secret, - 'targetSecret': chap_secret, - 'attributes': {}} - data = self._issue_api_request('AddAccount', params) - if 'result' in data: - sfaccount = self._get_sfaccount_by_name(sf_account_name) - - return sfaccount - - def _get_cluster_info(self): - params = {} - data = self._issue_api_request('GetClusterInfo', params) - if 'result' not in data: - raise exception.SolidFireAPIDataException(data=data) - - return data['result'] - - def _do_export(self, volume): - """Gets the associated account, retrieves CHAP info and updates.""" - - sfaccount_name = '%s-%s' % (socket.gethostname(), volume['project_id']) - sfaccount = self._get_sfaccount_by_name(sfaccount_name) - - model_update = {} - model_update['provider_auth'] = ('CHAP %s %s' - % (sfaccount['username'], - sfaccount['targetSecret'])) - - return model_update - - def _generate_random_string(self, length): - """Generates random_string to use for CHAP password.""" - - char_set = string.ascii_uppercase + string.digits - return ''.join(random.sample(char_set, length)) - - def _do_volume_create(self, project_id, params): - cluster_info = self._get_cluster_info() - iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260' - sfaccount = self._create_sfaccount(project_id) - chap_secret = sfaccount['targetSecret'] - - params['accountID'] = sfaccount['accountID'] - data = self._issue_api_request('CreateVolume', params) - - if 'result' not in data or 'volumeID' not in data['result']: - raise exception.SolidFireAPIDataException(data=data) - - volume_id = data['result']['volumeID'] - - volume_list = self._get_volumes_by_sfaccount(sfaccount['accountID']) - iqn = None - for v in volume_list: - if v['volumeID'] == volume_id: - iqn = v['iqn'] - break - - model_update = {} - - # NOTE(john-griffith): SF volumes are always at lun 0 - model_update['provider_location'] = ('%s %s %s' - % (iscsi_portal, iqn, 0)) - model_update['provider_auth'] = ('CHAP %s %s' - % (sfaccount['username'], - chap_secret)) - - return model_update - - def create_volume(self, volume): - """Create volume on SolidFire device. - - The account is where CHAP settings are derived from, volume is - created and exported. Note that the new volume is immediately ready - for use. - - One caveat here is that an existing user account must be specified - in the API call to create a new volume. We use a set algorithm to - determine account info based on passed in cinder volume object. First - we check to see if the account already exists (and use it), or if it - does not already exist, we'll go ahead and create it. - - For now, we're just using very basic settings, QOS is - turned off, 512 byte emulation is off etc. Will be - looking at extensions for these things later, or - this module can be hacked to suit needs. - """ - GB = 1048576 * 1024 - slice_count = 1 - attributes = {} - qos = {} - qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS'] - valid_presets = self.sf_qos_dict.keys() - - if FLAGS.sf_allow_tenant_qos and \ - volume.get('volume_metadata')is not None: - - #First look to see if they included a preset - presets = [i.value for i in volume.get('volume_metadata') - if i.key == 'sf-qos' and i.value in valid_presets] - if len(presets) > 0: - if len(presets) > 1: - LOG.warning(_('More than one valid preset was ' - 'detected, using %s') % presets[0]) - qos = self.sf_qos_dict[presets[0]] - else: - #if there was no preset, look for explicit settings - for i in volume.get('volume_metadata'): - if i.key in qos_keys: - qos[i.key] = int(i.value) - - params = {'name': 'OS-VOLID-%s' % volume['id'], - 'accountID': None, - 'sliceCount': slice_count, - 'totalSize': volume['size'] * GB, - 'enable512e': FLAGS.sf_emulate_512, - 'attributes': attributes, - 'qos': qos} - - return self._do_volume_create(volume['project_id'], params) - - def delete_volume(self, volume, is_snapshot=False): - """Delete SolidFire Volume from device. - - SolidFire allows multipe volumes with same name, - volumeID is what's guaranteed unique. - - """ - - LOG.debug(_("Enter SolidFire delete_volume...")) - sf_account_name = socket.gethostname() + '-' + volume['project_id'] - sfaccount = self._get_sfaccount_by_name(sf_account_name) - if sfaccount is None: - raise exception.SfAccountNotFound(account_name=sf_account_name) - - params = {'accountID': sfaccount['accountID']} - data = self._issue_api_request('ListVolumesForAccount', params) - if 'result' not in data: - raise exception.SolidFireAPIDataException(data=data) - - if is_snapshot: - seek = 'OS-SNAPID-%s' % (volume['id']) - else: - seek = 'OS-VOLID-%s' % volume['id'] - #params = {'name': 'OS-VOLID-:%s' % volume['id'], - - found_count = 0 - volid = -1 - for v in data['result']['volumes']: - if v['name'] == seek: - found_count += 1 - volid = v['volumeID'] - - if found_count == 0: - raise exception.VolumeNotFound(volume_id=volume['id']) - - if found_count > 1: - LOG.debug(_("Deleting volumeID: %s"), volid) - raise exception.DuplicateSfVolumeNames(vol_name=volume['id']) - - params = {'volumeID': volid} - data = self._issue_api_request('DeleteVolume', params) - if 'result' not in data: - raise exception.SolidFireAPIDataException(data=data) - - LOG.debug(_("Leaving SolidFire delete_volume")) - - def ensure_export(self, context, volume): - LOG.debug(_("Executing SolidFire ensure_export...")) - return self._do_export(volume) - - def create_export(self, context, volume): - LOG.debug(_("Executing SolidFire create_export...")) - return self._do_export(volume) - - def _do_create_snapshot(self, snapshot, snapshot_name): - """Creates a snapshot.""" - LOG.debug(_("Enter SolidFire create_snapshot...")) - sf_account_name = socket.gethostname() + '-' + snapshot['project_id'] - sfaccount = self._get_sfaccount_by_name(sf_account_name) - if sfaccount is None: - raise exception.SfAccountNotFound(account_name=sf_account_name) - - params = {'accountID': sfaccount['accountID']} - data = self._issue_api_request('ListVolumesForAccount', params) - if 'result' not in data: - raise exception.SolidFireAPIDataException(data=data) - - found_count = 0 - volid = -1 - for v in data['result']['volumes']: - if v['name'] == 'OS-VOLID-%s' % snapshot['volume_id']: - found_count += 1 - volid = v['volumeID'] - - if found_count == 0: - raise exception.VolumeNotFound(volume_id=snapshot['volume_id']) - if found_count != 1: - raise exception.DuplicateSfVolumeNames( - vol_name='OS-VOLID-%s' % snapshot['volume_id']) - - params = {'volumeID': int(volid), - 'name': snapshot_name, - 'attributes': {'OriginatingVolume': volid}} - - data = self._issue_api_request('CloneVolume', params) - if 'result' not in data: - raise exception.SolidFireAPIDataException(data=data) - - return (data, sfaccount) - - def delete_snapshot(self, snapshot): - self.delete_volume(snapshot, True) - - def create_snapshot(self, snapshot): - snapshot_name = 'OS-SNAPID-%s' % ( - snapshot['id']) - (data, sf_account) = self._do_create_snapshot(snapshot, snapshot_name) - - def create_volume_from_snapshot(self, volume, snapshot): - cluster_info = self._get_cluster_info() - iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260' - sfaccount = self._create_sfaccount(snapshot['project_id']) - chap_secret = sfaccount['targetSecret'] - snapshot_name = 'OS-VOLID-%s' % volume['id'] - - (data, sf_account) = self._do_create_snapshot(snapshot, snapshot_name) - - if 'result' not in data or 'volumeID' not in data['result']: - raise exception.SolidFireAPIDataException(data=data) - - volume_id = data['result']['volumeID'] - volume_list = self._get_volumes_by_sfaccount(sf_account['accountID']) - iqn = None - for v in volume_list: - if v['volumeID'] == volume_id: - iqn = v['iqn'] - break - - model_update = {} - - # NOTE(john-griffith): SF volumes are always at lun 0 - model_update['provider_location'] = ('%s %s %s' - % (iscsi_portal, iqn, 0)) - model_update['provider_auth'] = ('CHAP %s %s' - % (sfaccount['username'], - chap_secret)) - return model_update diff --git a/cinder/volume/storwize_svc.py b/cinder/volume/storwize_svc.py deleted file mode 100644 index 1c3006de92..0000000000 --- a/cinder/volume/storwize_svc.py +++ /dev/null @@ -1,1230 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 IBM, Inc. -# Copyright (c) 2012 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Ronen Kat -# Avishay Traeger - -""" -Volume driver for IBM Storwize V7000 and SVC storage systems. - -Notes: -1. If you specify both a password and a key file, this driver will use the - key file only. -2. When using a key file for authentication, it is up to the user or - system administrator to store the private key in a safe manner. -3. The defaults for creating volumes are "-rsize 2% -autoexpand - -grainsize 256 -warning 0". These can be changed in the configuration - file or by using volume types(recommended only for advanced users). - -Limitations: -1. The driver was not tested with SVC or clustered configurations of Storwize - V7000. -2. The driver expects CLI output in English, error messages may be in a - localized format. -""" - -import random -import re -import string -import time - -from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg -from cinder.openstack.common import excutils -from cinder.openstack.common import log as logging -from cinder.volume import san - -LOG = logging.getLogger(__name__) - -storwize_svc_opts = [ - cfg.StrOpt('storwize_svc_volpool_name', - default='volpool', - help='Storage system storage pool for volumes'), - cfg.StrOpt('storwize_svc_vol_rsize', - default='2%', - help='Storage system space-efficiency parameter for volumes'), - cfg.StrOpt('storwize_svc_vol_warning', - default='0', - help='Storage system threshold for volume capacity warnings'), - cfg.BoolOpt('storwize_svc_vol_autoexpand', - default=True, - help='Storage system autoexpand parameter for volumes ' - '(True/False)'), - cfg.StrOpt('storwize_svc_vol_grainsize', - default='256', - help='Storage system grain size parameter for volumes ' - '(32/64/128/256)'), - cfg.BoolOpt('storwize_svc_vol_compression', - default=False, - help='Storage system compression option for volumes'), - cfg.BoolOpt('storwize_svc_vol_easytier', - default=True, - help='Enable Easy Tier for volumes'), - cfg.StrOpt('storwize_svc_flashcopy_timeout', - default='120', - help='Maximum number of seconds to wait for FlashCopy to be ' - 'prepared. Maximum value is 600 seconds (10 minutes).'), -] - -FLAGS = flags.FLAGS -FLAGS.register_opts(storwize_svc_opts) - - -class StorwizeSVCDriver(san.SanISCSIDriver): - """IBM Storwize V7000 and SVC iSCSI volume driver.""" - - def __init__(self, *args, **kwargs): - super(StorwizeSVCDriver, self).__init__(*args, **kwargs) - self.iscsi_ipv4_conf = None - self.iscsi_ipv6_conf = None - - # Build cleanup transaltion tables for hosts names to follow valid - # host names for Storwizew V7000 and SVC storage systems. - invalid_ch_in_host = '' - for num in range(0, 128): - ch = chr(num) - if ((not ch.isalnum()) and (ch != ' ') and (ch != '.') - and (ch != '-') and (ch != '_')): - invalid_ch_in_host = invalid_ch_in_host + ch - self._string_host_name_filter = string.maketrans(invalid_ch_in_host, - '-' * len(invalid_ch_in_host)) - - self._unicode_host_name_filter = dict((ord(unicode(char)), u'-') - for char in invalid_ch_in_host) - - def _get_hdr_dic(self, header, row, delim): - """Return CLI row data as a dictionary indexed by names from header. - - Create a dictionary object from the data row string using the header - string. The strings are converted to columns using the delimiter in - delim. - """ - - attributes = header.split(delim) - values = row.split(delim) - self._driver_assert(len(values) == len(attributes), - _('_get_hdr_dic: attribute headers and values do not match.\n ' - 'Headers: %(header)s\n Values: %(row)s') - % {'header': str(header), - 'row': str(row)}) - dic = {} - for attribute, value in map(None, attributes, values): - dic[attribute] = value - return dic - - def _driver_assert(self, assert_condition, exception_message): - """Internal assertion mechanism for CLI output.""" - if not assert_condition: - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - def check_for_setup_error(self): - """Check that we have all configuration details from the storage.""" - - LOG.debug(_('enter: check_for_setup_error')) - - # Validate that the pool exists - ssh_cmd = 'lsmdiskgrp -delim ! -nohdr' - out, err = self._run_ssh(ssh_cmd) - self._driver_assert(len(out) > 0, - _('check_for_setup_error: failed with unexpected CLI output.\n ' - 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') - % {'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)}) - search_text = '!%s!' % FLAGS.storwize_svc_volpool_name - if search_text not in out: - raise exception.InvalidInput( - reason=(_('pool %s doesn\'t exist') - % FLAGS.storwize_svc_volpool_name)) - - storage_nodes = {} - # Get the iSCSI names of the Storwize/SVC nodes - ssh_cmd = 'svcinfo lsnode -delim !' - out, err = self._run_ssh(ssh_cmd) - self._driver_assert(len(out) > 0, - _('check_for_setup_error: failed with unexpected CLI output.\n ' - 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') - % {'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)}) - - nodes = out.strip().split('\n') - self._driver_assert(len(nodes) > 0, - _('check_for_setup_error: failed with unexpected CLI output.\n ' - 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') - % {'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)}) - header = nodes.pop(0) - for node_line in nodes: - try: - node_data = self._get_hdr_dic(header, node_line, '!') - except exception.VolumeBackendAPIException as e: - with excutils.save_and_reraise_exception(): - LOG.error(_('check_for_setup_error: ' - 'failed with unexpected CLI output.\n ' - 'Command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s\n') - % {'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)}) - node = {} - try: - node['id'] = node_data['id'] - node['name'] = node_data['name'] - node['iscsi_name'] = node_data['iscsi_name'] - node['status'] = node_data['status'] - node['ipv4'] = [] - node['ipv6'] = [] - if node['iscsi_name'] != '': - storage_nodes[node['id']] = node - except KeyError as e: - LOG.error(_('Did not find expected column name in ' - 'svcinfo lsnode: %s') % str(e)) - exception_message = ( - _('check_for_setup_error: Unexpected CLI output.\n ' - 'Details: %(msg)s\n' - 'Command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s') - % {'msg': str(e), - 'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)}) - raise exception.VolumeBackendAPIException( - data=exception_message) - - # Get the iSCSI IP addresses of the Storwize/SVC nodes - ssh_cmd = 'lsportip -delim !' - out, err = self._run_ssh(ssh_cmd) - self._driver_assert(len(out) > 0, - _('check_for_setup_error: failed with unexpected CLI output.\n ' - 'Command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s') - % {'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)}) - - portips = out.strip().split('\n') - self._driver_assert(len(portips) > 0, - _('check_for_setup_error: failed with unexpected CLI output.\n ' - 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') - % {'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)}) - header = portips.pop(0) - for portip_line in portips: - try: - port_data = self._get_hdr_dic(header, portip_line, '!') - except exception.VolumeBackendAPIException as e: - with excutils.save_and_reraise_exception(): - LOG.error(_('check_for_setup_error: ' - 'failed with unexpected CLI output.\n ' - 'Command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s\n') - % {'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)}) - try: - port_node_id = port_data['node_id'] - port_ipv4 = port_data['IP_address'] - port_ipv6 = port_data['IP_address_6'] - except KeyError as e: - LOG.error(_('Did not find expected column name in ' - 'lsportip: %s') % str(e)) - exception_message = ( - _('check_for_setup_error: Unexpected CLI output.\n ' - 'Details: %(msg)s\n' - 'Command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s') - % {'msg': str(e), - 'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)}) - raise exception.VolumeBackendAPIException( - data=exception_message) - - if port_node_id in storage_nodes: - node = storage_nodes[port_node_id] - if len(port_ipv4) > 0: - node['ipv4'].append(port_ipv4) - if len(port_ipv6) > 0: - node['ipv6'].append(port_ipv6) - else: - raise exception.VolumeBackendAPIException( - data=_('check_for_setup_error: ' - 'fail to storage configuration: unknown ' - 'storage node %(node_id)s from CLI output.\n ' - 'stdout: %(out)s\n stderr: %(err)s\n') - % {'node_id': port_node_id, - 'out': str(out), - 'err': str(err)}) - - iscsi_ipv4_conf = [] - iscsi_ipv6_conf = [] - for node_key in storage_nodes: - node = storage_nodes[node_key] - if 'ipv4' in node and len(node['iscsi_name']) > 0: - iscsi_ipv4_conf.append({'iscsi_name': node['iscsi_name'], - 'ip': node['ipv4'], - 'node_id': node['id']}) - if 'ipv6' in node and len(node['iscsi_name']) > 0: - iscsi_ipv6_conf.append({'iscsi_name': node['iscsi_name'], - 'ip': node['ipv6'], - 'node_id': node['id']}) - if (len(node['ipv4']) == 0) and (len(node['ipv6']) == 0): - raise exception.VolumeBackendAPIException( - data=_('check_for_setup_error: ' - 'fail to storage configuration: storage ' - 'node %s has no IP addresses configured') - % node['id']) - - # Make sure we have at least one IPv4 address with a iSCSI name - # TODO(ronenkat) need to expand this to support IPv6 - self._driver_assert(len(iscsi_ipv4_conf) > 0, - _('could not obtain IP address and iSCSI name from the storage. ' - 'Please verify that the storage is configured for iSCSI.\n ' - 'Storage nodes: %(nodes)s\n portips: %(portips)s') - % {'nodes': nodes, 'portips': portips}) - - self.iscsi_ipv4_conf = iscsi_ipv4_conf - self.iscsi_ipv6_conf = iscsi_ipv6_conf - - LOG.debug(_('leave: check_for_setup_error')) - - def _check_num_perc(self, value): - """Return True if value is either a number or a percentage.""" - if value.endswith('%'): - value = value[0:-1] - return value.isdigit() - - def _check_flags(self): - """Ensure that the flags are set properly.""" - - required_flags = ['san_ip', 'san_ssh_port', 'san_login', - 'storwize_svc_volpool_name'] - for flag in required_flags: - if not getattr(FLAGS, flag, None): - raise exception.InvalidInput( - reason=_('%s is not set') % flag) - - # Ensure that either password or keyfile were set - if not (FLAGS.san_password or FLAGS.san_private_key): - raise exception.InvalidInput( - reason=_('Password or SSH private key is required for ' - 'authentication: set either san_password or ' - 'san_private_key option')) - - # Check that rsize is a number or percentage - rsize = FLAGS.storwize_svc_vol_rsize - if not self._check_num_perc(rsize) and (rsize != '-1'): - raise exception.InvalidInput( - reason=_('Illegal value specified for storwize_svc_vol_rsize: ' - 'set to either a number or a percentage')) - - # Check that warning is a number or percentage - warning = FLAGS.storwize_svc_vol_warning - if not self._check_num_perc(warning): - raise exception.InvalidInput( - reason=_('Illegal value specified for ' - 'storwize_svc_vol_warning: ' - 'set to either a number or a percentage')) - - # Check that grainsize is 32/64/128/256 - grainsize = FLAGS.storwize_svc_vol_grainsize - if grainsize not in ['32', '64', '128', '256']: - raise exception.InvalidInput( - reason=_('Illegal value specified for ' - 'storwize_svc_vol_grainsize: set to either ' - '\'32\', \'64\', \'128\', or \'256\'')) - - # Check that flashcopy_timeout is numeric and 32/64/128/256 - flashcopy_timeout = FLAGS.storwize_svc_flashcopy_timeout - if not (flashcopy_timeout.isdigit() and int(flashcopy_timeout) > 0 and - int(flashcopy_timeout) <= 600): - raise exception.InvalidInput( - reason=_('Illegal value %s specified for ' - 'storwize_svc_flashcopy_timeout: ' - 'valid values are between 0 and 600') - % flashcopy_timeout) - - # Check that rsize is set - volume_compression = FLAGS.storwize_svc_vol_compression - if ((volume_compression == True) and - (FLAGS.storwize_svc_vol_rsize == '-1')): - raise exception.InvalidInput( - reason=_('If compression is set to True, rsize must ' - 'also be set (not equal to -1)')) - - def do_setup(self, context): - """Validate the flags.""" - LOG.debug(_('enter: do_setup')) - self._check_flags() - LOG.debug(_('leave: do_setup')) - - def create_volume(self, volume): - """Create a new volume - uses the internal method.""" - return self._create_volume(volume, units='gb') - - def _create_volume(self, volume, units='gb'): - """Create a new volume.""" - - name = volume['name'] - model_update = None - - LOG.debug(_('enter: create_volume: volume %s ') % name) - - size = int(volume['size']) - - if FLAGS.storwize_svc_vol_autoexpand == True: - autoex = '-autoexpand' - else: - autoex = '' - - if FLAGS.storwize_svc_vol_easytier == True: - easytier = '-easytier on' - else: - easytier = '-easytier off' - - # Set space-efficient options - if FLAGS.storwize_svc_vol_rsize.strip() == '-1': - ssh_cmd_se_opt = '' - else: - ssh_cmd_se_opt = ('-rsize %(rsize)s %(autoex)s -warning %(warn)s' % - {'rsize': FLAGS.storwize_svc_vol_rsize, - 'autoex': autoex, - 'warn': FLAGS.storwize_svc_vol_warning}) - if FLAGS.storwize_svc_vol_compression: - ssh_cmd_se_opt = ssh_cmd_se_opt + ' -compressed' - else: - ssh_cmd_se_opt = ssh_cmd_se_opt + (' -grainsize %(grain)s' % - {'grain': FLAGS.storwize_svc_vol_grainsize}) - - ssh_cmd = ('mkvdisk -name %(name)s -mdiskgrp %(mdiskgrp)s ' - '-iogrp 0 -size %(size)s -unit ' - '%(unit)s %(easytier)s %(ssh_cmd_se_opt)s' - % {'name': name, - 'mdiskgrp': FLAGS.storwize_svc_volpool_name, - 'size': size, 'unit': units, 'easytier': easytier, - 'ssh_cmd_se_opt': ssh_cmd_se_opt}) - out, err = self._run_ssh(ssh_cmd) - self._driver_assert(len(out.strip()) > 0, - _('create volume %(name)s - did not find ' - 'success message in CLI output.\n ' - 'stdout: %(out)s\n stderr: %(err)s') - % {'name': name, 'out': str(out), 'err': str(err)}) - - # Ensure that the output is as expected - match_obj = re.search('Virtual Disk, id \[([0-9]+)\], ' - 'successfully created', out) - # Make sure we got a "successfully created" message with vdisk id - self._driver_assert(match_obj is not None, - _('create volume %(name)s - did not find ' - 'success message in CLI output.\n ' - 'stdout: %(out)s\n stderr: %(err)s') - % {'name': name, 'out': str(out), 'err': str(err)}) - - LOG.debug(_('leave: create_volume: volume %(name)s ') % {'name': name}) - - def delete_volume(self, volume): - self._delete_volume(volume, False) - - def _delete_volume(self, volume, force_opt): - """Driver entry point for destroying existing volumes.""" - - name = volume['name'] - LOG.debug(_('enter: delete_volume: volume %(name)s ') % {'name': name}) - - if force_opt: - force_flag = '-force' - else: - force_flag = '' - - volume_defined = self._is_volume_defined(name) - # Try to delete volume only if found on the storage - if volume_defined: - out, err = self._run_ssh('rmvdisk %(force)s %(name)s' - % {'force': force_flag, - 'name': name}) - # No output should be returned from rmvdisk - self._driver_assert(len(out.strip()) == 0, - _('delete volume %(name)s - non empty output from CLI.\n ' - 'stdout: %(out)s\n stderr: %(err)s') - % {'name': name, - 'out': str(out), - 'err': str(err)}) - else: - # Log that volume does not exist - LOG.info(_('warning: tried to delete volume %(name)s but ' - 'it does not exist.') % {'name': name}) - - LOG.debug(_('leave: delete_volume: volume %(name)s ') % {'name': name}) - - def ensure_export(self, context, volume): - """Check that the volume exists on the storage. - - The system does not "export" volumes as a Linux iSCSI target does, - and therefore we just check that the volume exists on the storage. - """ - volume_defined = self._is_volume_defined(volume['name']) - if not volume_defined: - LOG.error(_('ensure_export: volume %s not found on storage') - % volume['name']) - - def create_export(self, context, volume): - model_update = None - return model_update - - def remove_export(self, context, volume): - pass - - def initialize_connection(self, volume, connector): - """Perform the necessary work so that an iSCSI connection can be made. - - To be able to create an iSCSI connection from a given iSCSI name to a - volume, we must: - 1. Translate the given iSCSI name to a host name - 2. Create new host on the storage system if it does not yet exist - 2. Map the volume to the host if it is not already done - 3. Return iSCSI properties, including the IP address of the preferred - node for this volume and the LUN number. - """ - LOG.debug(_('enter: initialize_connection: volume %(vol)s with ' - 'connector %(conn)s') % {'vol': str(volume), - 'conn': str(connector)}) - - initiator_name = connector['initiator'] - volume_name = volume['name'] - - host_name = self._get_host_from_iscsiname(initiator_name) - # Check if a host is defined for the iSCSI initiator name - if host_name is None: - # Host does not exist - add a new host to Storwize/SVC - host_name = self._create_new_host('host%s' % initiator_name, - initiator_name) - # Verify that create_new_host succeeded - self._driver_assert(host_name is not None, - _('_create_new_host failed to return the host name.')) - - lun_id = self._map_vol_to_host(volume_name, host_name) - - # Get preferred path - # Only IPv4 for now because lack of OpenStack support - # TODO(ronenkat): Add support for IPv6 - volume_attributes = self._get_volume_attributes(volume_name) - if (volume_attributes is not None and - 'preferred_node_id' in volume_attributes): - preferred_node = volume_attributes['preferred_node_id'] - preferred_node_entry = None - for node in self.iscsi_ipv4_conf: - if node['node_id'] == preferred_node: - preferred_node_entry = node - break - if preferred_node_entry is None: - preferred_node_entry = self.iscsi_ipv4_conf[0] - LOG.error(_('initialize_connection: did not find preferred ' - 'node %(node)s for volume %(vol)s in iSCSI ' - 'configuration') % {'node': preferred_node, - 'vol': volume_name}) - else: - # Get 1st node - preferred_node_entry = self.iscsi_ipv4_conf[0] - LOG.error( - _('initialize_connection: did not find a preferred node ' - 'for volume %s in iSCSI configuration') % volume_name) - - properties = {} - # We didn't use iSCSI discover, as in server-based iSCSI - properties['target_discovered'] = False - # We take the first IP address for now. Ideally, OpenStack will - # support multipath for improved performance. - properties['target_portal'] = ('%s:%s' % - (preferred_node_entry['ip'][0], '3260')) - properties['target_iqn'] = preferred_node_entry['iscsi_name'] - properties['target_lun'] = lun_id - properties['volume_id'] = volume['id'] - - LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n ' - 'connector %(conn)s\n properties: %(prop)s') - % {'vol': str(volume), - 'conn': str(connector), - 'prop': str(properties)}) - - return {'driver_volume_type': 'iscsi', 'data': properties, } - - def terminate_connection(self, volume, connector): - """Cleanup after an iSCSI connection has been terminated. - - When we clean up a terminated connection between a given iSCSI name - and volume, we: - 1. Translate the given iSCSI name to a host name - 2. Remove the volume-to-host mapping if it exists - 3. Delete the host if it has no more mappings (hosts are created - automatically by this driver when mappings are created) - """ - LOG.debug(_('enter: terminate_connection: volume %(vol)s with ' - 'connector %(conn)s') % {'vol': str(volume), - 'conn': str(connector)}) - - vol_name = volume['name'] - initiator_name = connector['initiator'] - host_name = self._get_host_from_iscsiname(initiator_name) - # Verify that _get_host_from_iscsiname returned the host. - # This should always succeed as we terminate an existing connection. - self._driver_assert(host_name is not None, - _('_get_host_from_iscsiname failed to return the host name ' - 'for iscsi name %s') % initiator_name) - - # Check if vdisk-host mapping exists, remove if it does - mapping_data = self._get_hostvdisk_mappings(host_name) - if vol_name in mapping_data: - out, err = self._run_ssh('rmvdiskhostmap -host %s %s' - % (host_name, vol_name)) - # Verify CLI behaviour - no output is returned from - # rmvdiskhostmap - self._driver_assert(len(out.strip()) == 0, - _('delete mapping of volume %(vol)s to host %(host)s ' - '- non empty output from CLI.\n ' - 'stdout: %(out)s\n stderr: %(err)s') - % {'vol': vol_name, - 'host': host_name, - 'out': str(out), - 'err': str(err)}) - del mapping_data[vol_name] - else: - LOG.error(_('terminate_connection: no mapping of volume ' - '%(vol)s to host %(host)s found') % - {'vol': vol_name, 'host': host_name}) - - # If this host has no more mappings, delete it - if not mapping_data: - self._delete_host(host_name) - - LOG.debug(_('leave: terminate_connection: volume %(vol)s with ' - 'connector %(conn)s') % {'vol': str(volume), - 'conn': str(connector)}) - - def _flashcopy_cleanup(self, fc_map_id, source, target): - """Clean up a failed FlashCopy operation.""" - - try: - out, err = self._run_ssh('stopfcmap -force %s' % fc_map_id) - out, err = self._run_ssh('rmfcmap -force %s' % fc_map_id) - except exception.ProcessExecutionError as e: - LOG.error(_('_run_flashcopy: fail to cleanup failed FlashCopy ' - 'mapping %(fc_map_id)% ' - 'from %(source)s to %(target)s.\n' - 'stdout: %(out)s\n stderr: %(err)s') - % {'fc_map_id': fc_map_id, - 'source': source, - 'target': target, - 'out': e.stdout, - 'err': e.stderr}) - - def _run_flashcopy(self, source, target): - """Create a FlashCopy mapping from the source to the target.""" - - LOG.debug( - _('enter: _run_flashcopy: execute FlashCopy from source ' - '%(source)s to target %(target)s') % {'source': source, - 'target': target}) - - fc_map_cli_cmd = ('mkfcmap -source %s -target %s -autodelete ' - '-cleanrate 0' % (source, target)) - out, err = self._run_ssh(fc_map_cli_cmd) - self._driver_assert(len(out.strip()) > 0, - _('create FC mapping from %(source)s to %(target)s - ' - 'did not find success message in CLI output.\n' - ' stdout: %(out)s\n stderr: %(err)s\n') - % {'source': source, - 'target': target, - 'out': str(out), - 'err': str(err)}) - - # Ensure that the output is as expected - match_obj = re.search('FlashCopy Mapping, id \[([0-9]+)\], ' - 'successfully created', out) - # Make sure we got a "successfully created" message with vdisk id - self._driver_assert(match_obj is not None, - _('create FC mapping from %(source)s to %(target)s - ' - 'did not find success message in CLI output.\n' - ' stdout: %(out)s\n stderr: %(err)s\n') - % {'source': source, - 'target': target, - 'out': str(out), - 'err': str(err)}) - - try: - fc_map_id = match_obj.group(1) - self._driver_assert(fc_map_id is not None, - _('create FC mapping from %(source)s to %(target)s - ' - 'did not find mapping id in CLI output.\n' - ' stdout: %(out)s\n stderr: %(err)s\n') - % {'source': source, - 'target': target, - 'out': str(out), - 'err': str(err)}) - except IndexError: - self._driver_assert(False, - _('create FC mapping from %(source)s to %(target)s - ' - 'did not find mapping id in CLI output.\n' - ' stdout: %(out)s\n stderr: %(err)s\n') - % {'source': source, - 'target': target, - 'out': str(out), - 'err': str(err)}) - try: - out, err = self._run_ssh('prestartfcmap %s' % fc_map_id) - except exception.ProcessExecutionError as e: - with excutils.save_and_reraise_exception(): - LOG.error(_('_run_flashcopy: fail to prepare FlashCopy ' - 'from %(source)s to %(target)s.\n' - 'stdout: %(out)s\n stderr: %(err)s') - % {'source': source, - 'target': target, - 'out': e.stdout, - 'err': e.stderr}) - self._flashcopy_cleanup(fc_map_id, source, target) - - mapping_ready = False - wait_time = 5 - # Allow waiting of up to timeout (set as parameter) - max_retries = (int(FLAGS.storwize_svc_flashcopy_timeout) - / wait_time) + 1 - for try_number in range(1, max_retries): - mapping_attributes = self._get_flashcopy_mapping_attributes( - fc_map_id) - if (mapping_attributes is None or - 'status' not in mapping_attributes): - break - if mapping_attributes['status'] == 'prepared': - mapping_ready = True - break - elif mapping_attributes['status'] != 'preparing': - # Unexpected mapping status - exception_msg = (_('unexecpted mapping status %(status)s ' - 'for mapping %(id)s. Attributes: ' - '%(attr)s') - % {'status': mapping_attributes['status'], - 'id': fc_map_id, - 'attr': mapping_attributes}) - raise exception.VolumeBackendAPIException( - data=exception_msg) - # Need to wait for mapping to be prepared, wait a few seconds - time.sleep(wait_time) - - if not mapping_ready: - exception_msg = (_('mapping %(id)s prepare failed to complete ' - 'within the alloted %(to)s seconds timeout. ' - 'Terminating') % {'id': fc_map_id, - 'to': FLAGS.storwize_svc_flashcopy_timeout}) - LOG.error(_('_run_flashcopy: fail to start FlashCopy ' - 'from %(source)s to %(target)s with ' - 'exception %(ex)s') - % {'source': source, - 'target': target, - 'ex': exception_msg}) - self._flashcopy_cleanup(fc_map_id, source, target) - raise exception.InvalidSnapshot( - reason=_('_run_flashcopy: %s') % exception_msg) - - try: - out, err = self._run_ssh('startfcmap %s' % fc_map_id) - except exception.ProcessExecutionError as e: - with excutils.save_and_reraise_exception(): - LOG.error(_('_run_flashcopy: fail to start FlashCopy ' - 'from %(source)s to %(target)s.\n' - 'stdout: %(out)s\n stderr: %(err)s') - % {'source': source, - 'target': target, - 'out': e.stdout, - 'err': e.stderr}) - self._flashcopy_cleanup(fc_map_id, source, target) - - LOG.debug(_('leave: _run_flashcopy: FlashCopy started from ' - '%(source)s to %(target)s') % {'source': source, - 'target': target}) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a new snapshot from volume.""" - - source_volume = snapshot['name'] - tgt_volume = volume['name'] - - LOG.debug(_('enter: create_volume_from_snapshot: snapshot %(tgt)s ' - 'from volume %(src)s') % {'tgt': tgt_volume, - 'src': source_volume}) - - src_volume_attributes = self._get_volume_attributes(source_volume) - if src_volume_attributes is None: - exception_msg = (_('create_volume_from_snapshot: source volume %s ' - 'does not exist') % source_volume) - LOG.error(exception_msg) - raise exception.SnapshotNotFound(exception_msg, - volume_id=source_volume) - - self._driver_assert('capacity' in src_volume_attributes, - _('create_volume_from_snapshot: cannot get source ' - 'volume %(src)s capacity from volume attributes ' - '%(attr)s') % {'src': source_volume, - 'attr': src_volume_attributes}) - src_volume_size = src_volume_attributes['capacity'] - - tgt_volume_attributes = self._get_volume_attributes(tgt_volume) - # Does the snapshot target exist? - if tgt_volume_attributes is not None: - exception_msg = (_('create_volume_from_snapshot: target volume %s ' - 'already exists, cannot create') % tgt_volume) - LOG.error(exception_msg) - raise exception.InvalidSnapshot(reason=exception_msg) - - snapshot_volume = {} - snapshot_volume['name'] = tgt_volume - snapshot_volume['size'] = src_volume_size - - self._create_volume(snapshot_volume, units='b') - - try: - self._run_flashcopy(source_volume, tgt_volume) - except Exception: - with excutils.save_and_reraise_exception(): - # Clean up newly-created snapshot if the FlashCopy failed - self._delete_volume(snapshot_volume, True) - - LOG.debug( - _('leave: create_volume_from_snapshot: %s created successfully') - % tgt_volume) - - def create_snapshot(self, snapshot): - """Create a new snapshot using FlashCopy.""" - - src_volume = snapshot['volume_name'] - tgt_volume = snapshot['name'] - - # Flag to keep track of created volumes in case FlashCopy - tgt_volume_created = False - - LOG.debug(_('enter: create_snapshot: snapshot %(tgt)s from ' - 'volume %(src)s') % {'tgt': tgt_volume, - 'src': src_volume}) - - src_volume_attributes = self._get_volume_attributes(src_volume) - if src_volume_attributes is None: - exception_msg = ( - _('create_snapshot: source volume %s does not exist') - % src_volume) - LOG.error(exception_msg) - raise exception.VolumeNotFound(exception_msg, - volume_id=src_volume) - - self._driver_assert('capacity' in src_volume_attributes, - _('create_volume_from_snapshot: cannot get source ' - 'volume %(src)s capacity from volume attributes ' - '%(attr)s') % {'src': src_volume, - 'attr': src_volume_attributes}) - - source_volume_size = src_volume_attributes['capacity'] - - tgt_volume_attributes = self._get_volume_attributes(tgt_volume) - # Does the snapshot target exist? - snapshot_volume = {} - if tgt_volume_attributes is None: - # No, create a new snapshot volume - snapshot_volume['name'] = tgt_volume - snapshot_volume['size'] = source_volume_size - self._create_volume(snapshot_volume, units='b') - tgt_volume_created = True - else: - # Yes, target exists, verify exact same size as source - self._driver_assert('capacity' in tgt_volume_attributes, - _('create_volume_from_snapshot: cannot get source ' - 'volume %(src)s capacity from volume attributes ' - '%(attr)s') % {'src': tgt_volume, - 'attr': tgt_volume_attributes}) - target_volume_size = tgt_volume_attributes['capacity'] - if target_volume_size != source_volume_size: - exception_msg = ( - _('create_snapshot: source %(src)s and target ' - 'volume %(tgt)s have different capacities ' - '(source:%(ssize)s target:%(tsize)s)') % - {'src': src_volume, - 'tgt': tgt_volume, - 'ssize': source_volume_size, - 'tsize': target_volume_size}) - LOG.error(exception_msg) - raise exception.InvalidSnapshot(reason=exception_msg) - - try: - self._run_flashcopy(src_volume, tgt_volume) - except exception.InvalidSnapshot: - with excutils.save_and_reraise_exception(): - # Clean up newly-created snapshot if the FlashCopy failed - if tgt_volume_created: - self._delete_volume(snapshot_volume, True) - - LOG.debug(_('leave: create_snapshot: %s created successfully') - % tgt_volume) - - def delete_snapshot(self, snapshot): - self._delete_snapshot(snapshot, False) - - def _delete_snapshot(self, snapshot, force_opt): - """Delete a snapshot from the storage.""" - LOG.debug(_('enter: delete_snapshot: snapshot %s') % snapshot) - - snapshot_defined = self._is_volume_defined(snapshot['name']) - if snapshot_defined: - if force_opt: - self._delete_volume(snapshot, force_opt) - else: - self.delete_volume(snapshot) - - LOG.debug(_('leave: delete_snapshot: snapshot %s') % snapshot) - - def _get_host_from_iscsiname(self, iscsi_name): - """List the hosts defined in the storage. - - Return the host name with the given iSCSI name, or None if there is - no host name with that iSCSI name. - """ - - LOG.debug(_('enter: _get_host_from_iscsiname: iSCSI initiator %s') - % iscsi_name) - - # Get list of host in the storage - ssh_cmd = 'lshost -delim !' - out, err = self._run_ssh(ssh_cmd) - - if (len(out.strip()) == 0): - return None - - err_msg = _('_get_host_from_iscsiname: ' - 'failed with unexpected CLI output.\n' - ' command: %(cmd)s\n stdout: %(out)s\n ' - 'stderr: %(err)s') % {'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)} - host_lines = out.strip().split('\n') - self._driver_assert(len(host_lines) > 0, err_msg) - header = host_lines.pop(0).split('!') - self._driver_assert('name' in header, err_msg) - name_index = header.index('name') - - hosts = map(lambda x: x.split('!')[name_index], host_lines) - hostname = None - - # For each host, get its details and check for its iSCSI name - for host in hosts: - ssh_cmd = 'lshost -delim ! %s' % host - out, err = self._run_ssh(ssh_cmd) - self._driver_assert(len(out) > 0, - _('_get_host_from_iscsiname: ' - 'Unexpected response from CLI output. ' - 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') - % {'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)}) - for attrib_line in out.split('\n'): - # If '!' not found, return the string and two empty strings - attrib_name, foo, attrib_value = attrib_line.partition('!') - if attrib_name == 'iscsi_name': - if iscsi_name == attrib_value: - hostname = host - break - if hostname is not None: - break - - LOG.debug(_('leave: _get_host_from_iscsiname: iSCSI initiator %s') - % iscsi_name) - - return hostname - - def _create_new_host(self, host_name, initiator_name): - """Create a new host on the storage system. - - We modify the given host name, replace any invalid characters and - adding a random suffix to avoid conflicts due to the translation. The - host is associated with the given iSCSI initiator name. - """ - - LOG.debug(_('enter: _create_new_host: host %(name)s with iSCSI ' - 'initiator %(init)s') % {'name': host_name, - 'init': initiator_name}) - - if isinstance(host_name, unicode): - host_name = host_name.translate(self._unicode_host_name_filter) - elif isinstance(host_name, str): - host_name = host_name.translate(self._string_host_name_filter) - else: - msg = _('_create_new_host: cannot clean host name. Host name ' - 'is not unicode or string') - LOG.error(msg) - raise exception.NoValidHost(reason=msg) - - # Add 5 digit random suffix to the host name to avoid - # conflicts in host names after removing invalid characters - # for Storwize/SVC names - host_name = '%s_%s' % (host_name, random.randint(10000, 99999)) - out, err = self._run_ssh('mkhost -name "%s" -iscsiname "%s"' - % (host_name, initiator_name)) - self._driver_assert(len(out.strip()) > 0 and - 'successfully created' in out, - _('create host %(name)s with iSCSI initiator %(init)s - ' - 'did not find success message in CLI output.\n ' - 'stdout: %(out)s\n stderr: %(err)s\n') - % {'name': host_name, - 'init': initiator_name, - 'out': str(out), - 'err': str(err)}) - - LOG.debug(_('leave: _create_new_host: host %(host)s with iSCSI ' - 'initiator %(init)s') % {'host': host_name, - 'init': initiator_name}) - - return host_name - - def _delete_host(self, host_name): - """Delete a host and associated iSCSI initiator name.""" - - LOG.debug(_('enter: _delete_host: host %s ') % host_name) - - # Check if host exists on system, expect to find the host - is_defined = self._is_host_defined(host_name) - if is_defined: - # Delete host - out, err = self._run_ssh('rmhost %s ' % host_name) - else: - LOG.info(_('warning: tried to delete host %(name)s but ' - 'it does not exist.') % {'name': host_name}) - - LOG.debug(_('leave: _delete_host: host %s ') % host_name) - - def _is_volume_defined(self, volume_name): - """Check if volume is defined.""" - LOG.debug(_('enter: _is_volume_defined: volume %s ') % volume_name) - volume_attributes = self._get_volume_attributes(volume_name) - LOG.debug(_('leave: _is_volume_defined: volume %(vol)s with %(str)s ') - % {'vol': volume_name, - 'str': volume_attributes is not None}) - if volume_attributes is None: - return False - else: - return True - - def _is_host_defined(self, host_name): - """Check if a host is defined on the storage.""" - - LOG.debug(_('enter: _is_host_defined: host %s ') % host_name) - - # Get list of hosts with the name %host_name% - # We expect zero or one line if host does not exist, - # two lines if it does exist, otherwise error - out, err = self._run_ssh('lshost -filtervalue name=%s -delim !' - % host_name) - if len(out.strip()) == 0: - return False - - lines = out.strip().split('\n') - self._driver_assert(len(lines) <= 2, - _('_is_host_defined: Unexpected response from CLI output.\n ' - 'stdout: %(out)s\n stderr: %(err)s\n') - % {'out': str(out), - 'err': str(err)}) - - if len(lines) == 2: - host_info = self._get_hdr_dic(lines[0], lines[1], '!') - host_name_from_storage = host_info['name'] - # Make sure we got the data for the right host - self._driver_assert(host_name_from_storage == host_name, - _('Data received for host %(host1)s instead of host ' - '%(host2)s.\n ' - 'stdout: %(out)s\n stderr: %(err)s\n') - % {'host1': host_name_from_storage, - 'host2': host_name, - 'out': str(out), - 'err': str(err)}) - else: # 0 or 1 lines - host_name_from_storage = None - - LOG.debug(_('leave: _is_host_defined: host %(host)s with %(str)s ') % { - 'host': host_name, - 'str': host_name_from_storage is not None}) - - if host_name_from_storage is None: - return False - else: - return True - - def _get_hostvdisk_mappings(self, host_name): - """Return the defined storage mappings for a host.""" - - return_data = {} - ssh_cmd = 'lshostvdiskmap -delim ! %s' % host_name - out, err = self._run_ssh(ssh_cmd) - - mappings = out.strip().split('\n') - if len(mappings) > 0: - header = mappings.pop(0) - for mapping_line in mappings: - mapping_data = self._get_hdr_dic(header, mapping_line, '!') - return_data[mapping_data['vdisk_name']] = mapping_data - - return return_data - - def _map_vol_to_host(self, volume_name, host_name): - """Create a mapping between a volume to a host.""" - - LOG.debug(_('enter: _map_vol_to_host: volume %(vol)s to ' - 'host %(host)s') % {'vol': volume_name, - 'host': host_name}) - - # Check if this volume is already mapped to this host - mapping_data = self._get_hostvdisk_mappings(host_name) - - mapped_flag = False - result_lun = '-1' - if volume_name in mapping_data: - mapped_flag = True - result_lun = mapping_data[volume_name]['SCSI_id'] - else: - lun_used = [] - for k, v in mapping_data.iteritems(): - lun_used.append(int(v['SCSI_id'])) - lun_used.sort() - # Assume all luns are taken to this point, and then try to find - # an unused one - result_lun = str(len(lun_used)) - for index, n in enumerate(lun_used): - if n > index: - result_lun = str(index) - - # Volume is not mapped to host, create a new LUN - if not mapped_flag: - out, err = self._run_ssh('mkvdiskhostmap -host %s -scsi %s %s' - % (host_name, result_lun, volume_name)) - self._driver_assert(len(out.strip()) > 0 and - 'successfully created' in out, - _('_map_vol_to_host: mapping host %(host)s to ' - 'volume %(vol)s with LUN ' - '%(lun)s - did not find success message in CLI output. ' - 'stdout: %(out)s\n stderr: %(err)s\n') - % {'host': host_name, - 'vol': volume_name, - 'lun': result_lun, - 'out': str(out), - 'err': str(err)}) - - LOG.debug(_('leave: _map_vol_to_host: LUN %(lun)s, volume %(vol)s, ' - 'host %(host)s') % {'lun': result_lun, 'vol': volume_name, - 'host': host_name}) - - return result_lun - - def _get_flashcopy_mapping_attributes(self, fc_map_id): - """Return the attributes of a FlashCopy mapping. - - Returns the attributes for the specified FlashCopy mapping, or - None if the mapping does not exist. - An exception is raised if the information from system can not - be parsed or matched to a single FlashCopy mapping (this case - should not happen under normal conditions). - """ - - LOG.debug(_('enter: _get_flashcopy_mapping_attributes: mapping %s') - % fc_map_id) - # Get the lunid to be used - - fc_ls_map_cmd = ('lsfcmap -filtervalue id=%s -delim !' % fc_map_id) - out, err = self._run_ssh(fc_ls_map_cmd) - self._driver_assert(len(out) > 0, - _('_get_flashcopy_mapping_attributes: ' - 'Unexpected response from CLI output. ' - 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') - % {'cmd': fc_ls_map_cmd, - 'out': str(out), - 'err': str(err)}) - - # Get list of FlashCopy mappings - # We expect zero or one line if mapping does not exist, - # two lines if it does exist, otherwise error - lines = out.strip().split('\n') - self._driver_assert(len(lines) <= 2, - _('_get_flashcopy_mapping_attributes: ' - 'Unexpected response from CLI output. ' - 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') - % {'cmd': fc_ls_map_cmd, - 'out': str(out), - 'err': str(err)}) - - if len(lines) == 2: - attributes = self._get_hdr_dic(lines[0], lines[1], '!') - else: # 0 or 1 lines - attributes = None - - LOG.debug(_('leave: _get_flashcopy_mapping_attributes: mapping ' - '%(id)s, attributes %(attr)s') % - {'id': fc_map_id, - 'attr': attributes}) - - return attributes - - def _get_volume_attributes(self, volume_name): - """Return volume attributes, or None if volume does not exist - - Exception is raised if the information from system can not be - parsed/matched to a single volume. - """ - - LOG.debug(_('enter: _get_volume_attributes: volume %s') - % volume_name) - # Get the lunid to be used - - try: - ssh_cmd = 'lsvdisk -bytes -delim ! %s ' % volume_name - out, err = self._run_ssh(ssh_cmd) - except exception.ProcessExecutionError as e: - # Didn't get details from the storage, return None - LOG.error(_('CLI Exception output:\n command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s') % - {'cmd': ssh_cmd, - 'out': e.stdout, - 'err': e.stderr}) - return None - - self._driver_assert(len(out) > 0, - ('_get_volume_attributes: ' - 'Unexpected response from CLI output. ' - 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s') - % {'cmd': ssh_cmd, - 'out': str(out), - 'err': str(err)}) - attributes = {} - for attrib_line in out.split('\n'): - # If '!' not found, return the string and two empty strings - attrib_name, foo, attrib_value = attrib_line.partition('!') - if attrib_name is not None and attrib_name.strip() > 0: - attributes[attrib_name] = attrib_value - - LOG.debug(_('leave: _get_volume_attributes:\n volume %(vol)s\n ' - 'attributes: %(attr)s') - % {'vol': volume_name, - 'attr': str(attributes)}) - - return attributes diff --git a/cinder/volume/utils.py b/cinder/volume/utils.py index a79a39a69e..8f57509cfc 100644 --- a/cinder/volume/utils.py +++ b/cinder/volume/utils.py @@ -1,6 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 OpenStack, LLC. +# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -16,23 +14,40 @@ """Volume-related Utilities and helpers.""" -from cinder import flags -from cinder import utils -from cinder.openstack.common.notifier import api as notifier_api + +import math + +from oslo.config import cfg + +from cinder.brick.local_dev import lvm as brick_lvm +from cinder import exception from cinder.openstack.common import log as logging +from cinder.openstack.common.notifier import api as notifier_api +from cinder.openstack.common import processutils +from cinder.openstack.common import strutils from cinder.openstack.common import timeutils +from cinder import units +from cinder import utils -FLAGS = flags.FLAGS +CONF = cfg.CONF + LOG = logging.getLogger(__name__) +def get_host_from_queue(queuename): + # This assumes the queue is named something like cinder-volume + # and does not have dot separators in the queue name + return queuename.split('@', 1)[0].split('.', 1)[1] + + def notify_usage_exists(context, volume_ref, current_period=False): - """ Generates 'exists' notification for a volume for usage auditing - purposes. + """Generates 'exists' notification for a volume for usage auditing + purposes. - Generates usage for last completed period, unless 'current_period' - is True.""" + Generates usage for last completed period, unless 'current_period' + is True. + """ begin, end = utils.last_completed_audit_period() if current_period: audit_start = end @@ -44,41 +59,180 @@ def notify_usage_exists(context, volume_ref, current_period=False): extra_usage_info = dict(audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end)) - notify_about_volume_usage( - context, volume_ref, 'exists', extra_usage_info=extra_usage_info) + notify_about_volume_usage(context, volume_ref, + 'exists', extra_usage_info=extra_usage_info) + + +def null_safe_str(s): + return str(s) if s else '' def _usage_from_volume(context, volume_ref, **kw): - def null_safe_str(s): - return str(s) if s else '' - - usage_info = dict( - tenant_id=volume_ref['project_id'], - user_id=volume_ref['user_id'], - volume_id=volume_ref['id'], - volume_type=volume_ref['volume_type_id'], - display_name=volume_ref['display_name'], - launched_at=null_safe_str(volume_ref['launched_at']), - created_at=null_safe_str(volume_ref['created_at']), - status=volume_ref['status'], - snapshot_id=volume_ref['snapshot_id'], - size=volume_ref['size']) + usage_info = dict(tenant_id=volume_ref['project_id'], + user_id=volume_ref['user_id'], + availability_zone=volume_ref['availability_zone'], + volume_id=volume_ref['id'], + volume_type=volume_ref['volume_type_id'], + display_name=volume_ref['display_name'], + launched_at=null_safe_str(volume_ref['launched_at']), + created_at=null_safe_str(volume_ref['created_at']), + status=volume_ref['status'], + snapshot_id=volume_ref['snapshot_id'], + size=volume_ref['size']) usage_info.update(kw) return usage_info def notify_about_volume_usage(context, volume, event_suffix, - extra_usage_info=None, host=None): + extra_usage_info=None, host=None): if not host: - host = FLAGS.host + host = CONF.host if not extra_usage_info: extra_usage_info = {} - usage_info = _usage_from_volume( - context, volume, **extra_usage_info) + usage_info = _usage_from_volume(context, volume, **extra_usage_info) notifier_api.notify(context, 'volume.%s' % host, 'volume.%s' % event_suffix, notifier_api.INFO, usage_info) + + +def _usage_from_snapshot(context, snapshot_ref, **extra_usage_info): + usage_info = { + 'tenant_id': snapshot_ref['project_id'], + 'user_id': snapshot_ref['user_id'], + 'availability_zone': snapshot_ref.volume['availability_zone'], + 'volume_id': snapshot_ref['volume_id'], + 'volume_size': snapshot_ref['volume_size'], + 'snapshot_id': snapshot_ref['id'], + 'display_name': snapshot_ref['display_name'], + 'created_at': str(snapshot_ref['created_at']), + 'status': snapshot_ref['status'], + 'deleted': null_safe_str(snapshot_ref['deleted']) + } + + usage_info.update(extra_usage_info) + return usage_info + + +def notify_about_snapshot_usage(context, snapshot, event_suffix, + extra_usage_info=None, host=None): + if not host: + host = CONF.host + + if not extra_usage_info: + extra_usage_info = {} + + usage_info = _usage_from_snapshot(context, snapshot, **extra_usage_info) + + notifier_api.notify(context, 'snapshot.%s' % host, + 'snapshot.%s' % event_suffix, + notifier_api.INFO, usage_info) + + +def _calculate_count(size_in_m, blocksize): + + # Check if volume_dd_blocksize is valid + try: + # Rule out zero-sized/negative dd blocksize which + # cannot be caught by strutils + if blocksize.startswith(('-', '0')): + raise ValueError + bs = strutils.to_bytes(blocksize) + except (ValueError, TypeError): + msg = (_("Incorrect value error: %(blocksize)s, " + "it may indicate that \'volume_dd_blocksize\' " + "was configured incorrectly. Fall back to default.") + % {'blocksize': blocksize}) + LOG.warn(msg) + # Fall back to default blocksize + CONF.clear_override('volume_dd_blocksize') + blocksize = CONF.volume_dd_blocksize + bs = strutils.to_bytes(blocksize) + + count = math.ceil(size_in_m * units.MiB / float(bs)) + + return blocksize, int(count) + + +def copy_volume(srcstr, deststr, size_in_m, blocksize, sync=False, + execute=utils.execute): + # Use O_DIRECT to avoid thrashing the system buffer cache + extra_flags = ['iflag=direct', 'oflag=direct'] + + # Check whether O_DIRECT is supported + try: + execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr, + *extra_flags, run_as_root=True) + except processutils.ProcessExecutionError: + extra_flags = [] + + # If the volume is being unprovisioned then + # request the data is persisted before returning, + # so that it's not discarded from the cache. + if sync and not extra_flags: + extra_flags.append('conv=fdatasync') + + blocksize, count = _calculate_count(size_in_m, blocksize) + + # Perform the copy + execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % count, + 'bs=%s' % blocksize, + *extra_flags, run_as_root=True) + + +def clear_volume(volume_size, volume_path, volume_clear=None, + volume_clear_size=None): + """Unprovision old volumes to prevent data leaking between users.""" + if volume_clear is None: + volume_clear = CONF.volume_clear + + if volume_clear_size is None: + volume_clear_size = CONF.volume_clear_size + + LOG.info(_("Performing secure delete on volume: %s") % volume_path) + + if volume_clear == 'zero': + if volume_clear_size == 0: + return copy_volume('/dev/zero', volume_path, volume_size, + CONF.volume_dd_blocksize, + sync=True, execute=utils.execute) + else: + clear_cmd = ['shred', '-n0', '-z', '-s%dMiB' % volume_clear_size] + elif volume_clear == 'shred': + clear_cmd = ['shred', '-n3'] + if volume_clear_size: + clear_cmd.append('-s%dMiB' % volume_clear_size) + else: + raise exception.InvalidConfigurationValue( + option='volume_clear', + value=volume_clear) + + clear_cmd.append(volume_path) + utils.execute(*clear_cmd, run_as_root=True) + + +def supports_thin_provisioning(): + return brick_lvm.LVM.supports_thin_provisioning( + utils.get_root_helper()) + + +def get_all_volumes(vg_name=None): + return brick_lvm.LVM.get_all_volumes( + utils.get_root_helper(), + vg_name) + + +def get_all_physical_volumes(vg_name=None): + return brick_lvm.LVM.get_all_physical_volumes( + utils.get_root_helper(), + vg_name) + + +def get_all_volume_groups(vg_name=None): + return brick_lvm.LVM.get_all_volume_groups( + utils.get_root_helper(), + vg_name) diff --git a/cinder/volume/volume_types.py b/cinder/volume/volume_types.py index 3c8bb82643..3f4bb35269 100644 --- a/cinder/volume/volume_types.py +++ b/cinder/volume/volume_types.py @@ -1,7 +1,5 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. +# Copyright (c) 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright (c) 2010 Citrix Systems, Inc. @@ -21,35 +19,40 @@ """Built-in volume type properties.""" + +from oslo.config import cfg + from cinder import context from cinder import db from cinder import exception -from cinder import flags +from cinder.openstack.common.db import exception as db_exc from cinder.openstack.common import log as logging -FLAGS = flags.FLAGS + +CONF = cfg.CONF LOG = logging.getLogger(__name__) def create(context, name, extra_specs={}): """Creates volume types.""" try: - db.volume_type_create(context, - dict(name=name, - extra_specs=extra_specs)) - except exception.DBError, e: + type_ref = db.volume_type_create(context, + dict(name=name, + extra_specs=extra_specs)) + except db_exc.DBError as e: LOG.exception(_('DB error: %s') % e) raise exception.VolumeTypeCreateFailed(name=name, extra_specs=extra_specs) + return type_ref -def destroy(context, name): +def destroy(context, id): """Marks volume types as deleted.""" - if name is None: - msg = _("name cannot be None") + if id is None: + msg = _("id cannot be None") raise exception.InvalidVolumeType(reason=msg) else: - db.volume_type_destroy(context, name) + db.volume_type_destroy(context, id) def get_all_types(context, inactive=0, search_opts={}): @@ -66,7 +69,7 @@ def get_all_types(context, inactive=0, search_opts={}): def _check_extra_specs_match(vol_type, searchdict): for k, v in searchdict.iteritems(): if (k not in vol_type['extra_specs'].keys() - or vol_type['extra_specs'][k] != v): + or vol_type['extra_specs'][k] != v): return False return True @@ -111,15 +114,132 @@ def get_volume_type_by_name(context, name): return db.volume_type_get_by_name(context, name) -def is_key_value_present(volume_type_id, key, value, volume_type=None): +def get_default_volume_type(): + """Get the default volume type.""" + name = CONF.default_volume_type + vol_type = {} + + if name is not None: + ctxt = context.get_admin_context() + try: + vol_type = get_volume_type_by_name(ctxt, name) + except exception.VolumeTypeNotFoundByName as e: + # Couldn't find volume type with the name in default_volume_type + # flag, record this issue and move on + #TODO(zhiteng) consider add notification to warn admin + LOG.exception(_('Default volume type is not found, ' + 'please check default_volume_type config: %s'), e) + + return vol_type + + +def get_volume_type_extra_specs(volume_type_id, key=False): + volume_type = get_volume_type(context.get_admin_context(), + volume_type_id) + extra_specs = volume_type['extra_specs'] + if key: + if extra_specs.get(key): + return extra_specs.get(key) + else: + return False + else: + return extra_specs + + +def is_encrypted(context, volume_type_id): if volume_type_id is None: return False - if volume_type is None: - volume_type = get_volume_type(context.get_admin_context(), - volume_type_id) - if (volume_type.get('extra_specs') is None or - volume_type['extra_specs'].get(key) != value): - return False - else: - return True + encryption = db.volume_type_encryption_get(context, volume_type_id) + return encryption is not None + + +def get_volume_type_encryption(context, volume_type_id): + if volume_type_id is None: + return None + + encryption = db.volume_type_encryption_get(context, volume_type_id) + return encryption + + +def get_volume_type_qos_specs(volume_type_id): + ctxt = context.get_admin_context() + res = db.volume_type_qos_specs_get(ctxt, + volume_type_id) + return res + + +def volume_types_diff(context, vol_type_id1, vol_type_id2): + """Returns a 'diff' of two volume types and whether they are equal. + + Returns a tuple of (diff, equal), where 'equal' is a boolean indicating + whether there is any difference, and 'diff' is a dictionary with the + following format: + {'extra_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type), + 'key2': (value_in_1st_vol_type, value_in_2nd_vol_type), + ...} + 'qos_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type), + 'key2': (value_in_1st_vol_type, value_in_2nd_vol_type), + ...} + 'encryption': {'cipher': (value_in_1st_vol_type, value_in_2nd_vol_type), + {'key_size': (value_in_1st_vol_type, value_in_2nd_vol_type), + ...} + """ + def _fix_qos_specs(qos_specs): + if qos_specs: + qos_specs.pop('id', None) + qos_specs.pop('name', None) + qos_specs.update(qos_specs.pop('specs', {})) + + def _fix_encryption_specs(encryption): + if encryption1: + encryption = dict(encryption) + for param in ['volume_type_id', 'created_at', 'updated_at', + 'deleted_at']: + encryption.pop(param, None) + + def _dict_diff(dict1, dict2): + res = {} + equal = True + if dict1 is None: + dict1 = {} + if dict2 is None: + dict2 = {} + for k, v in dict1.iteritems(): + res[k] = (v, dict2.get(k)) + if k not in dict2 or res[k][0] != res[k][1]: + equal = False + for k, v in dict2.iteritems(): + res[k] = (dict1.get(k), v) + if k not in dict1 or res[k][0] != res[k][1]: + equal = False + return (res, equal) + + all_equal = True + diff = {} + vol_type1 = get_volume_type(context, vol_type_id1) + vol_type2 = get_volume_type(context, vol_type_id2) + + extra_specs1 = vol_type1.get('extra_specs') + extra_specs2 = vol_type2.get('extra_specs') + diff['extra_specs'], equal = _dict_diff(extra_specs1, extra_specs2) + if not equal: + all_equal = False + + qos_specs1 = get_volume_type_qos_specs(vol_type_id1).get('qos_specs') + _fix_qos_specs(qos_specs1) + qos_specs2 = get_volume_type_qos_specs(vol_type_id2).get('qos_specs') + _fix_qos_specs(qos_specs2) + diff['qos_specs'], equal = _dict_diff(qos_specs1, qos_specs2) + if not equal: + all_equal = False + + encryption1 = get_volume_type_encryption(context, vol_type_id1) + _fix_encryption_specs(encryption1) + encryption2 = get_volume_type_encryption(context, vol_type_id2) + _fix_encryption_specs(encryption2) + diff['encryption'], equal = _dict_diff(encryption1, encryption2) + if not equal: + all_equal = False + + return (diff, all_equal) diff --git a/cinder/volume/xensm.py b/cinder/volume/xensm.py deleted file mode 100644 index b7f324f1c4..0000000000 --- a/cinder/volume/xensm.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright (c) 2011 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import exception -from cinder import flags -from cinder.openstack.common import log as logging -from cinder import utils -from cinder.virt.xenapi import connection as xenapi_conn -from cinder.virt.xenapi import volumeops -import cinder.volume.driver - -LOG = logging.getLogger(__name__) -FLAGS = flags.FLAGS - - -class XenSMDriver(cinder.volume.driver.VolumeDriver): - - def _convert_config_params(self, conf_str): - params = dict([item.split("=") for item in conf_str.split()]) - return params - - def _get_introduce_sr_keys(self, params): - if 'name_label' in params: - del params['name_label'] - keys = params.keys() - keys.append('sr_type') - return keys - - def _create_storage_repo(self, context, backend_ref): - """Either creates or introduces SR on host - depending on whether it exists in xapi db.""" - params = self._convert_config_params(backend_ref['config_params']) - if 'name_label' in params: - label = params['name_label'] - del params['name_label'] - else: - label = 'SR-' + str(backend_ref['id']) - - params['sr_type'] = backend_ref['sr_type'] - - if backend_ref['sr_uuid'] is None: - # run the sr create command - try: - LOG.debug(_('SR name = %s') % label) - LOG.debug(_('Params: %s') % str(params)) - sr_uuid = self._volumeops.create_sr(label, params) - # update sr_uuid and created in db - except Exception as ex: - LOG.debug(_("Failed to create sr %s...continuing") % - str(backend_ref['id'])) - msg = _('Create failed') - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug(_('SR UUID of new SR is: %s') % sr_uuid) - try: - self.db.sm_backend_conf_update(context, - backend_ref['id'], - dict(sr_uuid=sr_uuid)) - except Exception as ex: - LOG.exception(ex) - msg = _("Failed to update db") - raise exception.VolumeBackendAPIException(data=msg) - - else: - # sr introduce, if not already done - try: - self._volumeops.introduce_sr(backend_ref['sr_uuid'], label, - params) - except Exception as ex: - LOG.exception(ex) - LOG.debug(_("Failed to introduce sr %s...continuing") - % str(backend_ref['id'])) - - def _create_storage_repos(self, context): - """Create/Introduce storage repositories at start.""" - backends = self.db.sm_backend_conf_get_all(context) - for backend in backends: - try: - self._create_storage_repo(context, backend) - except Exception as ex: - LOG.exception(ex) - msg = _('Failed to reach backend %d') % backend['id'] - raise exception.VolumeBackendAPIException(data=msg) - - def __init__(self, *args, **kwargs): - """Connect to the hypervisor.""" - - # This driver leverages Xen storage manager, and hence requires - # hypervisor to be Xen - if FLAGS.connection_type != 'xenapi': - msg = _('XenSMDriver requires xenapi connection') - raise exception.VolumeBackendAPIException(data=msg) - - url = FLAGS.xenapi_connection_url - username = FLAGS.xenapi_connection_username - password = FLAGS.xenapi_connection_password - try: - session = xenapi_conn.XenAPISession(url, username, password) - self._volumeops = volumeops.VolumeOps(session) - except Exception as ex: - LOG.exception(ex) - msg = _("Failed to initiate session") - raise exception.VolumeBackendAPIException(data=msg) - - super(XenSMDriver, self).__init__(execute=utils.execute, - sync_exec=utils.execute, - *args, **kwargs) - - def do_setup(self, ctxt): - """Setup includes creating or introducing storage repos - existing in the database and destroying deleted ones.""" - - # TODO(renukaapte) purge storage repos - self.ctxt = ctxt - self._create_storage_repos(ctxt) - - def create_volume(self, volume): - """Creates a logical volume. Can optionally return a Dictionary of - changes to the volume object to be persisted.""" - - # For now the scheduling logic will be to try to fit the volume in - # the first available backend. - # TODO(renukaapte) better scheduling once APIs are in place - sm_vol_rec = None - backends = self.db.sm_backend_conf_get_all(self.ctxt) - for backend in backends: - # Ensure that storage repo exists, if not create. - # This needs to be done because if cinder compute and - # volume are both running on this host, then, as a - # part of detach_volume, compute could potentially forget SR - self._create_storage_repo(self.ctxt, backend) - sm_vol_rec = self._volumeops.create_volume_for_sm(volume, - backend['sr_uuid']) - if sm_vol_rec: - LOG.debug(_('Volume will be created in backend - %d') - % backend['id']) - break - - if sm_vol_rec: - # Update db - sm_vol_rec['id'] = volume['id'] - sm_vol_rec['backend_id'] = backend['id'] - try: - self.db.sm_volume_create(self.ctxt, sm_vol_rec) - except Exception as ex: - LOG.exception(ex) - msg = _("Failed to update volume in db") - raise exception.VolumeBackendAPIException(data=msg) - - else: - msg = _('Unable to create volume') - raise exception.VolumeBackendAPIException(data=msg) - - def delete_volume(self, volume): - - vol_rec = self.db.sm_volume_get(self.ctxt, volume['id']) - - try: - # If compute runs on this node, detach could have disconnected SR - backend_ref = self.db.sm_backend_conf_get(self.ctxt, - vol_rec['backend_id']) - self._create_storage_repo(self.ctxt, backend_ref) - self._volumeops.delete_volume_for_sm(vol_rec['vdi_uuid']) - except Exception as ex: - LOG.exception(ex) - msg = _("Failed to delete vdi") - raise exception.VolumeBackendAPIException(data=msg) - - try: - self.db.sm_volume_delete(self.ctxt, volume['id']) - except Exception as ex: - LOG.exception(ex) - msg = _("Failed to delete volume in db") - raise exception.VolumeBackendAPIException(data=msg) - - def local_path(self, volume): - return str(volume['id']) - - def undiscover_volume(self, volume): - """Undiscover volume on a remote host.""" - pass - - def discover_volume(self, context, volume): - return str(volume['id']) - - def check_for_setup_error(self): - pass - - def create_export(self, context, volume): - """Exports the volume.""" - pass - - def remove_export(self, context, volume): - """Removes an export for a logical volume.""" - pass - - def ensure_export(self, context, volume): - """Safely, synchronously recreates an export for a logical volume.""" - pass - - def initialize_connection(self, volume, connector): - try: - xensm_properties = dict(self.db.sm_volume_get(self.ctxt, - volume['id'])) - except Exception as ex: - LOG.exception(ex) - msg = _("Failed to find volume in db") - raise exception.VolumeBackendAPIException(data=msg) - - # Keep the volume id key consistent with what ISCSI driver calls it - xensm_properties['volume_id'] = xensm_properties['id'] - del xensm_properties['id'] - - try: - backend_conf = self.db.sm_backend_conf_get(self.ctxt, - xensm_properties['backend_id']) - except Exception as ex: - LOG.exception(ex) - msg = _("Failed to find backend in db") - raise exception.VolumeBackendAPIException(data=msg) - - params = self._convert_config_params(backend_conf['config_params']) - - xensm_properties['flavor_id'] = backend_conf['flavor_id'] - xensm_properties['sr_uuid'] = backend_conf['sr_uuid'] - xensm_properties['sr_type'] = backend_conf['sr_type'] - xensm_properties.update(params) - _introduce_sr_keys = self._get_introduce_sr_keys(params) - xensm_properties['introduce_sr_keys'] = _introduce_sr_keys - return { - 'driver_volume_type': 'xensm', - 'data': xensm_properties - } - - def terminate_connection(self, volume, connector): - pass diff --git a/cinder/volume/xiv.py b/cinder/volume/xiv.py deleted file mode 100644 index 6cbad0c180..0000000000 --- a/cinder/volume/xiv.py +++ /dev/null @@ -1,128 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 IBM, Inc. -# Copyright (c) 2012 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Erik Zaadi -# Avishay Traeger - -""" -Volume driver for IBM XIV storage systems. -""" - -from cinder import exception -from cinder import flags -from cinder.openstack.common import cfg -from cinder.openstack.common import importutils -from cinder.openstack.common import log as logging -from cinder.volume import san - -ibm_xiv_opts = [ - cfg.StrOpt('xiv_proxy', - default='xiv_openstack.nova_proxy.XIVNovaProxy', - help='Proxy driver'), -] - -FLAGS = flags.FLAGS -FLAGS.register_opts(ibm_xiv_opts) - -LOG = logging.getLogger('cinder.volume.xiv') - - -class XIVDriver(san.SanISCSIDriver): - """IBM XIV volume driver.""" - - def __init__(self, *args, **kwargs): - """Initialize the driver.""" - - proxy = importutils.import_class(FLAGS.xiv_proxy) - - self.xiv_proxy = proxy({ - "xiv_user": FLAGS.san_login, - "xiv_pass": FLAGS.san_password, - "xiv_address": FLAGS.san_ip, - "xiv_vol_pool": FLAGS.san_clustername - }, - LOG, - exception) - san.SanISCSIDriver.__init__(self, *args, **kwargs) - - def do_setup(self, context): - """Setup and verify IBM XIV storage connection.""" - - self.xiv_proxy.setup(context) - - def ensure_export(self, context, volume): - """Ensure an export.""" - - return self.xiv_proxy.ensure_export(context, volume) - - def create_export(self, context, volume): - """Create an export.""" - - return self.xiv_proxy.create_export(context, volume) - - def create_volume(self, volume): - """Create a volume on the IBM XIV storage system.""" - - return self.xiv_proxy.create_volume(volume) - - def delete_volume(self, volume): - """Delete a volume on the IBM XIV storage system.""" - - self.xiv_proxy.delete_volume(volume) - - def remove_export(self, context, volume): - """Disconnect a volume from an attached instance.""" - - return self.xiv_proxy.remove_export(context, volume) - - def initialize_connection(self, volume, connector): - """Map the created volume.""" - - return self.xiv_proxy.initialize_connection( - volume, - connector) - - def terminate_connection(self, volume, connector): - """Terminate a connection to a volume.""" - - return self.xiv_proxy.terminate_connection( - volume, - connector) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot.""" - - return self.xiv_proxy.create_volume_from_snapshot( - volume, - snapshot) - - def create_snapshot(self, snapshot): - """Create a snapshot.""" - - return self.xiv_proxy.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Delete a snapshot.""" - - return self.xiv_proxy.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - """Get volume stats.""" - - return self.xiv_proxy.get_volume_stats(refresh) diff --git a/cinder/wsgi.py b/cinder/wsgi.py index 222efcad94..cee88a06e0 100644 --- a/cinder/wsgi.py +++ b/cinder/wsgi.py @@ -1,8 +1,6 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. -# Copyright 2010 OpenStack LLC. +# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -19,25 +17,56 @@ """Utility methods for working with WSGI servers.""" +from __future__ import print_function + +import errno +import os +import socket +import ssl import sys +import time import eventlet import eventlet.wsgi import greenlet +from oslo.config import cfg from paste import deploy import routes.middleware import webob.dec import webob.exc from cinder import exception -from cinder import flags from cinder.openstack.common import log as logging from cinder import utils -FLAGS = flags.FLAGS +socket_opts = [ + cfg.IntOpt('tcp_keepidle', + default=600, + help="Sets the value of TCP_KEEPIDLE in seconds for each " + "server socket. Not supported on OS X."), + cfg.StrOpt('ssl_ca_file', + default=None, + help="CA certificate file to use to verify " + "connecting clients"), + cfg.StrOpt('ssl_cert_file', + default=None, + help="Certificate file to use when starting " + "the server securely"), + cfg.StrOpt('ssl_key_file', + default=None, + help="Private key file to use when starting " + "the server securely"), +] + +CONF = cfg.CONF +CONF.register_opts(socket_opts) + LOG = logging.getLogger(__name__) +# Raise the default from 8192 to accommodate large tokens +eventlet.wsgi.MAX_HEADER_LINE = 16384 + class Server(object): """Server class to manage a WSGI server, serving a WSGI application.""" @@ -45,7 +74,7 @@ class Server(object): default_pool_size = 1000 def __init__(self, name, app, host=None, port=None, pool_size=None, - protocol=eventlet.wsgi.HttpProtocol): + protocol=eventlet.wsgi.HttpProtocol, backlog=128): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. @@ -58,8 +87,8 @@ def __init__(self, name, app, host=None, port=None, pool_size=None, """ self.name = name self.app = app - self.host = host or "0.0.0.0" - self.port = port or 0 + self._host = host or "0.0.0.0" + self._port = port or 0 self._server = None self._socket = None self._protocol = protocol @@ -67,6 +96,91 @@ def __init__(self, name, app, host=None, port=None, pool_size=None, self._logger = logging.getLogger("eventlet.wsgi.server") self._wsgi_logger = logging.WritableLogger(self._logger) + if backlog < 1: + raise exception.InvalidInput( + reason='The backlog must be more than 1') + self._socket = self._get_socket(self._host, + self._port, + backlog=backlog) + + def _get_socket(self, host, port, backlog): + bind_addr = (host, port) + # TODO(dims): eventlet's green dns/socket module does not actually + # support IPv6 in getaddrinfo(). We need to get around this in the + # future or monitor upstream for a fix + try: + info = socket.getaddrinfo(bind_addr[0], + bind_addr[1], + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0] + family = info[0] + bind_addr = info[-1] + except Exception: + family = socket.AF_INET + + cert_file = CONF.ssl_cert_file + key_file = CONF.ssl_key_file + ca_file = CONF.ssl_ca_file + use_ssl = cert_file or key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError(_("Unable to find key_file : %s") % key_file) + + if use_ssl and (not cert_file or not key_file): + raise RuntimeError(_("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + + def wrap_ssl(sock): + ssl_kwargs = { + 'server_side': True, + 'certfile': cert_file, + 'keyfile': key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl_ca_file: + ssl_kwargs['ca_certs'] = ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + return ssl.wrap_socket(sock, **ssl_kwargs) + + sock = None + retry_until = time.time() + 30 + while not sock and time.time() < retry_until: + try: + sock = eventlet.listen(bind_addr, + backlog=backlog, + family=family) + if use_ssl: + sock = wrap_ssl(sock) + + except socket.error as err: + if err.args[0] != errno.EADDRINUSE: + raise + eventlet.sleep(0.1) + if not sock: + raise RuntimeError(_("Could not bind to %(host)s:%(port)s " + "after trying for 30 seconds") % + {'host': host, 'port': port}) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + return sock + def _start(self): """Run the blocking eventlet WSGI server. @@ -87,13 +201,18 @@ def start(self, backlog=128): :raises: cinder.exception.InvalidInput """ - if backlog < 1: - raise exception.InvalidInput( - reason='The backlog must be more than 1') - self._socket = eventlet.listen((self.host, self.port), backlog=backlog) self._server = eventlet.spawn(self._start) - (self.host, self.port) = self._socket.getsockname() - LOG.info(_("Started %(name)s on %(host)s:%(port)s") % self.__dict__) + (self._host, self._port) = self._socket.getsockname()[0:2] + LOG.info(_("Started %(name)s on %(host)s:%(port)s") % + {'name': self.name, 'host': self.host, 'port': self.port}) + + @property + def host(self): + return self._host + + @property + def port(self): + return self._port def stop(self): """Stop this server. @@ -105,7 +224,10 @@ def stop(self): """ LOG.info(_("Stopping WSGI server.")) - self._server.kill() + if self._server is not None: + # Resize pool to stop new requests from being processed + self._pool.resize(0) + self._server.kill() def wait(self): """Block, until the server has stopped. @@ -116,7 +238,8 @@ def wait(self): """ try: - self._server.wait() + if self._server is not None: + self._server.wait() except greenlet.GreenletExit: LOG.info(_("WSGI server has stopped.")) @@ -263,16 +386,16 @@ class Debug(Middleware): @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): - print ('*' * 40) + ' REQUEST ENVIRON' + print(('*' * 40) + ' REQUEST ENVIRON') for key, value in req.environ.items(): - print key, '=', value - print + print(key, '=', value) + print() resp = req.get_response(self.application) - print ('*' * 40) + ' RESPONSE HEADERS' + print(('*' * 40) + ' RESPONSE HEADERS') for (key, value) in resp.headers.iteritems(): - print key, '=', value - print + print(key, '=', value) + print() resp.app_iter = self.print_generator(resp.app_iter) @@ -281,12 +404,12 @@ def __call__(self, req): @staticmethod def print_generator(app_iter): """Iterator that prints the contents of a wrapper string.""" - print ('*' * 40) + ' BODY' + print(('*' * 40) + ' BODY') for part in app_iter: sys.stdout.write(part) sys.stdout.flush() yield part - print + print() class Router(object): @@ -356,7 +479,7 @@ def __init__(self, config_path=None): :returns: None """ - config_path = config_path or FLAGS.api_paste_config + config_path = config_path or CONF.api_paste_config self.config_path = utils.find_config(config_path) def load_app(self, name): diff --git a/contrib/redhat-eventlet.patch b/contrib/redhat-eventlet.patch deleted file mode 100644 index 0b77e6f72c..0000000000 --- a/contrib/redhat-eventlet.patch +++ /dev/null @@ -1,16 +0,0 @@ ---- .cinder-venv/lib/python2.6/site-packages/eventlet/green/subprocess.py.orig -2011-05-25 -23:31:34.597271402 +0000 -+++ .cinder-venv/lib/python2.6/site-packages/eventlet/green/subprocess.py -2011-05-25 -23:33:24.055602468 +0000 -@@ -32,7 +32,7 @@ - setattr(self, attr, wrapped_pipe) - __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__ - -- def wait(self, check_interval=0.01): -+ def wait(self, check_interval=0.01, timeout=None): - # Instead of a blocking OS call, this version of wait() uses logic - # borrowed from the eventlet 0.2 processes.Process.wait() method. - try: - diff --git a/doc/ext/cinder_autodoc.py b/doc/ext/cinder_autodoc.py index a778f4a522..a6b7d68539 100644 --- a/doc/ext/cinder_autodoc.py +++ b/doc/ext/cinder_autodoc.py @@ -1,3 +1,5 @@ +from __future__ import print_function + import gettext import os @@ -7,6 +9,6 @@ def setup(app): - print "**Autodocumenting from %s" % os.path.abspath(os.curdir) + print("**Autodocumenting from %s" % os.path.abspath(os.curdir)) rv = utils.execute('./doc/generate_autodoc_index.sh') - print rv[0] + print(rv[0]) diff --git a/doc/source/_static/basic.css b/doc/source/_static/basic.css deleted file mode 100644 index d909ce37c7..0000000000 --- a/doc/source/_static/basic.css +++ /dev/null @@ -1,416 +0,0 @@ -/** - * Sphinx stylesheet -- basic theme - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -img { - border: 0; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable dl, table.indextable dd { - margin-top: 0; - margin-bottom: 0; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -/* -- general body styles --------------------------------------------------- */ - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.field-list ul { - padding-left: 1em; -} - -.first { -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 0; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.field-list td, table.field-list th { - border: 0 !important; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -/* -- other body styles ----------------------------------------------------- */ - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, .highlight { - background-color: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.refcount { - color: #060; -} - -.optional { - font-size: 1.3em; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -tt.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -tt.descclassname { - background-color: transparent; -} - -tt.xref, a tt { - background-color: transparent; - font-weight: bold; -} - -h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { - background-color: transparent; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} diff --git a/doc/source/_static/default.css b/doc/source/_static/default.css deleted file mode 100644 index c8091ecb4d..0000000000 --- a/doc/source/_static/default.css +++ /dev/null @@ -1,230 +0,0 @@ -/** - * Sphinx stylesheet -- default theme - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -body { - font-family: sans-serif; - font-size: 100%; - background-color: #11303d; - color: #000; - margin: 0; - padding: 0; -} - -div.document { - background-color: #1c4e63; -} - -div.documentwrapper { - float: left; - width: 100%; -} - -div.bodywrapper { - margin: 0 0 0 230px; -} - -div.body { - background-color: #ffffff; - color: #000000; - padding: 0 20px 30px 20px; -} - -div.footer { - color: #ffffff; - width: 100%; - padding: 9px 0 9px 0; - text-align: center; - font-size: 75%; -} - -div.footer a { - color: #ffffff; - text-decoration: underline; -} - -div.related { - background-color: #133f52; - line-height: 30px; - color: #ffffff; -} - -div.related a { - color: #ffffff; -} - -div.sphinxsidebar { -} - -div.sphinxsidebar h3 { - font-family: 'Trebuchet MS', sans-serif; - color: #ffffff; - font-size: 1.4em; - font-weight: normal; - margin: 0; - padding: 0; -} - -div.sphinxsidebar h3 a { - color: #ffffff; -} - -div.sphinxsidebar h4 { - font-family: 'Trebuchet MS', sans-serif; - color: #ffffff; - font-size: 1.3em; - font-weight: normal; - margin: 5px 0 0 0; - padding: 0; -} - -div.sphinxsidebar p { - color: #ffffff; -} - -div.sphinxsidebar p.topless { - margin: 5px 10px 10px 10px; -} - -div.sphinxsidebar ul { - margin: 10px; - padding: 0; - color: #ffffff; -} - -div.sphinxsidebar a { - color: #98dbcc; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -/* -- body styles ----------------------------------------------------------- */ - -a { - color: #355f7c; - text-decoration: none; -} - -a:hover { - text-decoration: underline; -} - -div.body p, div.body dd, div.body li { - text-align: left; - line-height: 130%; -} - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - font-family: 'Trebuchet MS', sans-serif; - background-color: #f2f2f2; - font-weight: normal; - color: #20435c; - border-bottom: 1px solid #ccc; - margin: 20px -20px 10px -20px; - padding: 3px 0 3px 10px; -} - -div.body h1 { margin-top: 0; font-size: 200%; } -div.body h2 { font-size: 160%; } -div.body h3 { font-size: 140%; } -div.body h4 { font-size: 120%; } -div.body h5 { font-size: 110%; } -div.body h6 { font-size: 100%; } - -a.headerlink { - color: #c60f0f; - font-size: 0.8em; - padding: 0 4px 0 4px; - text-decoration: none; -} - -a.headerlink:hover { - background-color: #c60f0f; - color: white; -} - -div.body p, div.body dd, div.body li { - text-align: left; - line-height: 130%; -} - -div.admonition p.admonition-title + p { - display: inline; -} - -div.admonition p { - margin-bottom: 5px; -} - -div.admonition pre { - margin-bottom: 5px; -} - -div.admonition ul, div.admonition ol { - margin-bottom: 5px; -} - -div.note { - background-color: #eee; - border: 1px solid #ccc; -} - -div.seealso { - background-color: #ffc; - border: 1px solid #ff6; -} - -div.topic { - background-color: #eee; -} - -div.warning { - background-color: #ffe4e4; - border: 1px solid #f66; -} - -p.admonition-title { - display: inline; -} - -p.admonition-title:after { - content: ":"; -} - -pre { - padding: 5px; - background-color: #eeffcc; - color: #333333; - line-height: 120%; - border: 1px solid #ac9; - border-left: none; - border-right: none; -} - -tt { - background-color: #ecf0f3; - padding: 0 1px 0 1px; - font-size: 0.95em; -} - -.warning tt { - background: #efc2c2; -} - -.note tt { - background: #d6d6d6; -} diff --git a/doc/source/_static/jquery.tweet.js b/doc/source/_static/jquery.tweet.js deleted file mode 100644 index 79bf0bdb4c..0000000000 --- a/doc/source/_static/jquery.tweet.js +++ /dev/null @@ -1,154 +0,0 @@ -(function($) { - - $.fn.tweet = function(o){ - var s = { - username: ["seaofclouds"], // [string] required, unless you want to display our tweets. :) it can be an array, just do ["username1","username2","etc"] - list: null, //[string] optional name of list belonging to username - avatar_size: null, // [integer] height and width of avatar if displayed (48px max) - count: 3, // [integer] how many tweets to display? - intro_text: null, // [string] do you want text BEFORE your your tweets? - outro_text: null, // [string] do you want text AFTER your tweets? - join_text: null, // [string] optional text in between date and tweet, try setting to "auto" - auto_join_text_default: "i said,", // [string] auto text for non verb: "i said" bullocks - auto_join_text_ed: "i", // [string] auto text for past tense: "i" surfed - auto_join_text_ing: "i am", // [string] auto tense for present tense: "i was" surfing - auto_join_text_reply: "i replied to", // [string] auto tense for replies: "i replied to" @someone "with" - auto_join_text_url: "i was looking at", // [string] auto tense for urls: "i was looking at" http:... - loading_text: null, // [string] optional loading text, displayed while tweets load - query: null // [string] optional search query - }; - - if(o) $.extend(s, o); - - $.fn.extend({ - linkUrl: function() { - var returning = []; - var regexp = /((ftp|http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?)/gi; - this.each(function() { - returning.push(this.replace(regexp,"$1")); - }); - return $(returning); - }, - linkUser: function() { - var returning = []; - var regexp = /[\@]+([A-Za-z0-9-_]+)/gi; - this.each(function() { - returning.push(this.replace(regexp,"@$1")); - }); - return $(returning); - }, - linkHash: function() { - var returning = []; - var regexp = / [\#]+([A-Za-z0-9-_]+)/gi; - this.each(function() { - returning.push(this.replace(regexp, ' #$1')); - }); - return $(returning); - }, - capAwesome: function() { - var returning = []; - this.each(function() { - returning.push(this.replace(/\b(awesome)\b/gi, '$1')); - }); - return $(returning); - }, - capEpic: function() { - var returning = []; - this.each(function() { - returning.push(this.replace(/\b(epic)\b/gi, '$1')); - }); - return $(returning); - }, - makeHeart: function() { - var returning = []; - this.each(function() { - returning.push(this.replace(/(<)+[3]/gi, "")); - }); - return $(returning); - } - }); - - function relative_time(time_value) { - var parsed_date = Date.parse(time_value); - var relative_to = (arguments.length > 1) ? arguments[1] : new Date(); - var delta = parseInt((relative_to.getTime() - parsed_date) / 1000); - var pluralize = function (singular, n) { - return '' + n + ' ' + singular + (n == 1 ? '' : 's'); - }; - if(delta < 60) { - return 'less than a minute ago'; - } else if(delta < (45*60)) { - return 'about ' + pluralize("minute", parseInt(delta / 60)) + ' ago'; - } else if(delta < (24*60*60)) { - return 'about ' + pluralize("hour", parseInt(delta / 3600)) + ' ago'; - } else { - return 'about ' + pluralize("day", parseInt(delta / 86400)) + ' ago'; - } - } - - function build_url() { - var proto = ('https:' == document.location.protocol ? 'https:' : 'http:'); - if (s.list) { - return proto+"//api.twitter.com/1/"+s.username[0]+"/lists/"+s.list+"/statuses.json?per_page="+s.count+"&callback=?"; - } else if (s.query == null && s.username.length == 1) { - return proto+'//twitter.com/status/user_timeline/'+s.username[0]+'.json?count='+s.count+'&callback=?'; - } else { - var query = (s.query || 'from:'+s.username.join('%20OR%20from:')); - return proto+'//search.twitter.com/search.json?&q='+query+'&rpp='+s.count+'&callback=?'; - } - } - - return this.each(function(){ - var list = $('
    ').appendTo(this); - var intro = '

    '+s.intro_text+'

    '; - var outro = '

    '+s.outro_text+'

    '; - var loading = $('

    '+s.loading_text+'

    '); - - if(typeof(s.username) == "string"){ - s.username = [s.username]; - } - - if (s.loading_text) $(this).append(loading); - $.getJSON(build_url(), function(data){ - if (s.loading_text) loading.remove(); - if (s.intro_text) list.before(intro); - $.each((data.results || data), function(i,item){ - // auto join text based on verb tense and content - if (s.join_text == "auto") { - if (item.text.match(/^(@([A-Za-z0-9-_]+)) .*/i)) { - var join_text = s.auto_join_text_reply; - } else if (item.text.match(/(^\w+:\/\/[A-Za-z0-9-_]+\.[A-Za-z0-9-_:%&\?\/.=]+) .*/i)) { - var join_text = s.auto_join_text_url; - } else if (item.text.match(/^((\w+ed)|just) .*/im)) { - var join_text = s.auto_join_text_ed; - } else if (item.text.match(/^(\w*ing) .*/i)) { - var join_text = s.auto_join_text_ing; - } else { - var join_text = s.auto_join_text_default; - } - } else { - var join_text = s.join_text; - }; - - var from_user = item.from_user || item.user.screen_name; - var profile_image_url = item.profile_image_url || item.user.profile_image_url; - var join_template = ' '+join_text+' '; - var join = ((s.join_text) ? join_template : ' '); - var avatar_template = ''+from_user+'\'s avatar'; - var avatar = (s.avatar_size ? avatar_template : ''); - var date = ''+relative_time(item.created_at)+''; - var text = '' +$([item.text]).linkUrl().linkUser().linkHash().makeHeart().capAwesome().capEpic()[0]+ ''; - - // until we create a template option, arrange the items below to alter a tweet's display. - list.append('
  • ' + avatar + date + join + text + '
  • '); - - list.children('li:first').addClass('tweet_first'); - list.children('li:odd').addClass('tweet_even'); - list.children('li:even').addClass('tweet_odd'); - }); - if (s.outro_text) list.after(outro); - }); - - }); - }; -})(jQuery); \ No newline at end of file diff --git a/doc/source/_static/tweaks.css b/doc/source/_static/tweaks.css deleted file mode 100644 index 046ead8405..0000000000 --- a/doc/source/_static/tweaks.css +++ /dev/null @@ -1,218 +0,0 @@ -ul.todo_list { - list-style-type: none; - margin: 0; - padding: 0; -} - -ul.todo_list li { - display: block; - margin: 0; - padding: 7px 0; - border-top: 1px solid #eee; -} - -ul.todo_list li p { - display: inline; -} - -ul.todo_list li p.link { - font-weight: bold; -} - -ul.todo_list li p.details { - font-style: italic; -} - -ul.todo_list li { -} - -div.admonition { - border: 1px solid #8F1000; -} - -div.admonition p.admonition-title { - background-color: #8F1000; - border-bottom: 1px solid #8E8E8E; -} - -a { - color: #CF2F19; -} - -div.related ul li a { - color: #CF2F19; -} - -div.sphinxsidebar h4 { - background-color:#8E8E8E; - border:1px solid #255E6E; - color:white; - font-size:1em; - margin:1em 0 0.5em; - padding:0.1em 0 0.1em 0.5em; -} - -em { - font-style: normal; -} - -table.docutils { - font-size: 11px; -} - -.tweet_list li { - font-size: 0.9em; - border-bottom: 1px solid #eee; - padding: 5px 0; -} - -.tweet_list li .tweet_avatar { - float: left; -} - -/* ------------------------------------------ -PURE CSS SPEECH BUBBLES -by Nicolas Gallagher -- http://nicolasgallagher.com/pure-css-speech-bubbles/ - -http://nicolasgallagher.com -http://twitter.com/necolas - -Created: 02 March 2010 -Version: 1.1 (21 October 2010) - -Dual licensed under MIT and GNU GPLv2 © Nicolas Gallagher ------------------------------------------- */ -/* THE SPEECH BUBBLE -------------------------------------------------------------------------------------------------------------------------------- */ - -/* THE SPEECH BUBBLE -------------------------------------------------------------------------------------------------------------------------------- */ - -.triangle-border { - position:relative; - padding:15px; - margin:1em 0 3em; - border:5px solid #BC1518; - color:#333; - background:#fff; - - /* css3 */ - -moz-border-radius:10px; - -webkit-border-radius:10px; - border-radius:10px; -} - -/* Variant : for left positioned triangle ------------------------------------------- */ - -.triangle-border.left { - margin-left:30px; -} - -/* Variant : for right positioned triangle ------------------------------------------- */ - -.triangle-border.right { - margin-right:30px; -} - -/* THE TRIANGLE -------------------------------------------------------------------------------------------------------------------------------- */ - -.triangle-border:before { - content:""; - display:block; /* reduce the damage in FF3.0 */ - position:absolute; - bottom:-40px; /* value = - border-top-width - border-bottom-width */ - left:40px; /* controls horizontal position */ - width:0; - height:0; - border:20px solid transparent; - border-top-color:#BC1518; -} - -/* creates the smaller triangle */ -.triangle-border:after { - content:""; - display:block; /* reduce the damage in FF3.0 */ - position:absolute; - bottom:-26px; /* value = - border-top-width - border-bottom-width */ - left:47px; /* value = (:before left) + (:before border-left) - (:after border-left) */ - width:0; - height:0; - border:13px solid transparent; - border-top-color:#fff; -} - -/* Variant : top ------------------------------------------- */ - -/* creates the larger triangle */ -.triangle-border.top:before { - top:-40px; /* value = - border-top-width - border-bottom-width */ - right:40px; /* controls horizontal position */ - bottom:auto; - left:auto; - border:20px solid transparent; - border-bottom-color:#BC1518; -} - -/* creates the smaller triangle */ -.triangle-border.top:after { - top:-26px; /* value = - border-top-width - border-bottom-width */ - right:47px; /* value = (:before right) + (:before border-right) - (:after border-right) */ - bottom:auto; - left:auto; - border:13px solid transparent; - border-bottom-color:#fff; -} - -/* Variant : left ------------------------------------------- */ - -/* creates the larger triangle */ -.triangle-border.left:before { - top:10px; /* controls vertical position */ - left:-30px; /* value = - border-left-width - border-right-width */ - bottom:auto; - border-width:15px 30px 15px 0; - border-style:solid; - border-color:transparent #BC1518; -} - -/* creates the smaller triangle */ -.triangle-border.left:after { - top:16px; /* value = (:before top) + (:before border-top) - (:after border-top) */ - left:-21px; /* value = - border-left-width - border-right-width */ - bottom:auto; - border-width:9px 21px 9px 0; - border-style:solid; - border-color:transparent #fff; -} - -/* Variant : right ------------------------------------------- */ - -/* creates the larger triangle */ -.triangle-border.right:before { - top:10px; /* controls vertical position */ - right:-30px; /* value = - border-left-width - border-right-width */ - bottom:auto; - left:auto; - border-width:15px 0 15px 30px; - border-style:solid; - border-color:transparent #BC1518; -} - -/* creates the smaller triangle */ -.triangle-border.right:after { - top:16px; /* value = (:before top) + (:before border-top) - (:after border-top) */ - right:-21px; /* value = - border-left-width - border-right-width */ - bottom:auto; - left:auto; - border-width:9px 0 9px 21px; - border-style:solid; - border-color:transparent #fff; -} - diff --git a/doc/source/_theme/layout.html b/doc/source/_theme/layout.html deleted file mode 100644 index 1e0e69e986..0000000000 --- a/doc/source/_theme/layout.html +++ /dev/null @@ -1,95 +0,0 @@ -{% extends "sphinxdoc/layout.html" %} -{% set css_files = css_files + ['_static/tweaks.css'] %} -{% set script_files = script_files + ['_static/jquery.tweet.js'] %} -{% block extrahead %} - -{% endblock %} - -{%- macro sidebar() %} - {%- if not embedded %}{% if not theme_nosidebar|tobool %} -
    -
    - {%- block sidebarlogo %} - {%- if logo %} - - {%- endif %} - {%- endblock %} - {%- block sidebartoc %} - {%- if display_toc %} -

    {{ _('Table Of Contents') }}

    - {{ toc }} - {%- endif %} - {%- endblock %} - {%- block sidebarrel %} - {%- if prev %} -

    {{ _('Previous topic') }}

    -

    {{ prev.title }}

    - {%- endif %} - {%- if next %} -

    {{ _('Next topic') }}

    -

    {{ next.title }}

    - {%- endif %} - {%- endblock %} - {%- block sidebarsourcelink %} - {%- if show_source and has_source and sourcename %} -

    {{ _('This Page') }}

    - - {%- endif %} - {%- endblock %} - {%- if customsidebar %} - {% include customsidebar %} - {%- endif %} - {%- block sidebarsearch %} - {%- if pagename != "search" %} - - - -

    - Psst... hey. You're reading the latest content, but it might be out of sync with code. You can read Cinder 2011.2 docs or all OpenStack docs too. -

    - - {%- endif %} - - {%- if pagename == "index" %} - - -

    {{ _('Twitter Feed') }}

    - - {%- endif %} - - - - - {%- endblock %} -
    -
    - {%- endif %}{% endif %} -{%- endmacro %} diff --git a/doc/source/_theme/theme.conf b/doc/source/_theme/theme.conf deleted file mode 100644 index e039fe01f9..0000000000 --- a/doc/source/_theme/theme.conf +++ /dev/null @@ -1,5 +0,0 @@ -[theme] -inherit = sphinxdoc -stylesheet = sphinxdoc.css -pygments_style = friendly - diff --git a/doc/source/conf.py b/doc/source/conf.py index 7436039e1a..97ee288baf 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -34,7 +34,9 @@ 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', - 'sphinx.ext.graphviz'] + 'sphinx.ext.graphviz', + 'oslo.sphinx', + ] # autodoc generation is a bit aggressive and a nuisance # when doing heavy text edit cycles. Execute "export SPHINX_DEBUG=1" @@ -65,18 +67,17 @@ # General information about the project. project = u'cinder' -copyright = u'2010-present, OpenStack, LLC' +copyright = u'2010-present, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # -from cinder import version as cinder_version -#import cinder.version +from cinder.version import version_info # The full version, including alpha/beta/rc tags. -release = cinder_version.version_string() +release = version_info.release_string() # The short X.Y version. -version = cinder_version.canonical_version_string() +version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -91,7 +92,6 @@ # List of documents that shouldn't be included in the build. unused_docs = [ 'api_ext/rst_extension_template', - 'vmwareapi_readme', 'installer', ] @@ -134,8 +134,8 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -html_theme_path = ["."] -html_theme = '_theme' +# html_theme_path = ["."] +# html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the diff --git a/doc/source/devref/addmethod.openstackapi.rst b/doc/source/devref/addmethod.openstackapi.rst index 579b295a0a..50bdeb0611 100644 --- a/doc/source/devref/addmethod.openstackapi.rst +++ b/doc/source/devref/addmethod.openstackapi.rst @@ -1,5 +1,5 @@ .. - Copyright 2010-2011 OpenStack LLC + Copyright 2010-2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/doc/source/devref/api.rst b/doc/source/devref/api.rst index 1d8e770c15..1848ae2157 100644 --- a/doc/source/devref/api.rst +++ b/doc/source/devref/api.rst @@ -26,7 +26,8 @@ Common Components ----------------- The :mod:`cinder.api` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. automodule:: cinder.api :noindex: :members: @@ -34,7 +35,7 @@ The :mod:`cinder.api` Module :show-inheritance: The :mod:`cinder.api.cloud` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.api.cloud :noindex: @@ -46,7 +47,8 @@ OpenStack API ------------- The :mod:`openstack` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. automodule:: cinder.api.openstack :noindex: :members: @@ -54,7 +56,8 @@ The :mod:`openstack` Module :show-inheritance: The :mod:`auth` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~ + .. automodule:: cinder.api.openstack.auth :noindex: :members: @@ -65,7 +68,7 @@ EC2 API ------- The :mod:`cinder.api.ec2` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.api.ec2 :noindex: diff --git a/doc/source/devref/architecture.rst b/doc/source/devref/architecture.rst index 667eae600e..6da07b4367 100644 --- a/doc/source/devref/architecture.rst +++ b/doc/source/devref/architecture.rst @@ -16,7 +16,7 @@ under the License. Cinder System Architecture -======================== +========================== The Cinder Block Storage Service is intended to be ran on one or more nodes. diff --git a/doc/source/devref/auth.rst b/doc/source/devref/auth.rst index ee84e3ae5c..fc3219f8ca 100644 --- a/doc/source/devref/auth.rst +++ b/doc/source/devref/auth.rst @@ -21,7 +21,7 @@ Authentication and Authorization ================================ The :mod:`cinder.quota` Module ----------------------------- +------------------------------ .. automodule:: cinder.quota :noindex: @@ -31,7 +31,7 @@ The :mod:`cinder.quota` Module The :mod:`cinder.auth.signer` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.auth.signer :noindex: @@ -44,7 +44,7 @@ Auth Manager ------------ The :mod:`cinder.auth.manager` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.auth.manager :noindex: @@ -213,7 +213,7 @@ Establishing declarative semantics for individual API calls will allow the cloud See related work within the Infrastructure 2.0 working group for more information on how the IMF Metadata specification could be utilized to manage registration of these vendors and their C&A credentials. -Dirty Cloud – Hybrid Data Centers +Dirty Cloud - Hybrid Data Centers --------------------------------- * CloudAudit bridge interfaces diff --git a/doc/source/devref/cinder.rst b/doc/source/devref/cinder.rst index 57aba56be5..a8b338666b 100644 --- a/doc/source/devref/cinder.rst +++ b/doc/source/devref/cinder.rst @@ -23,7 +23,7 @@ very well yet. The :mod:`cinder.adminclient` Module ----------------------------------- +------------------------------------ .. automodule:: cinder.adminclient :noindex: @@ -33,7 +33,7 @@ The :mod:`cinder.adminclient` Module The :mod:`cinder.context` Module ------------------------------- +-------------------------------- .. automodule:: cinder.context :noindex: @@ -43,7 +43,7 @@ The :mod:`cinder.context` Module The :mod:`cinder.exception` Module --------------------------------- +---------------------------------- .. automodule:: cinder.exception :noindex: @@ -52,10 +52,10 @@ The :mod:`cinder.exception` Module :show-inheritance: -The :mod:`cinder.flags` Module ----------------------------- +The :mod:`cinder.common.config` Module +------------------------------ -.. automodule:: cinder.flags +.. automodule:: cinder.common.config :noindex: :members: :undoc-members: @@ -63,7 +63,7 @@ The :mod:`cinder.flags` Module The :mod:`cinder.process` Module ------------------------------- +-------------------------------- .. automodule:: cinder.process :noindex: @@ -73,7 +73,7 @@ The :mod:`cinder.process` Module The :mod:`cinder.rpc` Module --------------------------- +---------------------------- .. automodule:: cinder.rpc :noindex: @@ -83,7 +83,7 @@ The :mod:`cinder.rpc` Module The :mod:`cinder.server` Module ------------------------------ +------------------------------- .. automodule:: cinder.server :noindex: @@ -93,7 +93,7 @@ The :mod:`cinder.server` Module The :mod:`cinder.test` Module ---------------------------- +----------------------------- .. automodule:: cinder.test :noindex: @@ -103,7 +103,7 @@ The :mod:`cinder.test` Module The :mod:`cinder.utils` Module ----------------------------- +------------------------------ .. automodule:: cinder.utils :noindex: @@ -113,7 +113,7 @@ The :mod:`cinder.utils` Module The :mod:`cinder.validate` Module -------------------------------- +--------------------------------- .. automodule:: cinder.validate :noindex: @@ -123,7 +123,7 @@ The :mod:`cinder.validate` Module The :mod:`cinder.wsgi` Module ---------------------------- +----------------------------- .. automodule:: cinder.wsgi :noindex: @@ -135,30 +135,20 @@ The :mod:`cinder.wsgi` Module Tests ----- -The :mod:`declare_flags` Module +The :mod:`declare_conf` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: cinder.tests.declare_flags +.. automodule:: cinder.tests.declare_conf :noindex: :members: :undoc-members: :show-inheritance: -The :mod:`fake_flags` Module +The :mod:`conf_fixture` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: cinder.tests.fake_flags - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`flags_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.flags_unittest +.. automodule:: cinder.tests.conf_fixture :noindex: :members: :undoc-members: @@ -175,16 +165,6 @@ The :mod:`process_unittest` Module :show-inheritance: -The :mod:`real_flags` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.real_flags - :noindex: - :members: - :undoc-members: - :show-inheritance: - - The :mod:`rpc_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -195,10 +175,10 @@ The :mod:`rpc_unittest` Module :show-inheritance: -The :mod:`runtime_flags` Module +The :mod:`runtime_conf` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: cinder.tests.runtime_flags +.. automodule:: cinder.tests.runtime_conf :noindex: :members: :undoc-members: diff --git a/doc/source/devref/database.rst b/doc/source/devref/database.rst index 9d6b9daa94..2897c8b49d 100644 --- a/doc/source/devref/database.rst +++ b/doc/source/devref/database.rst @@ -19,7 +19,7 @@ The Database Layer ================== The :mod:`cinder.db.api` Module ------------------------------ +------------------------------- .. automodule:: cinder.db.api :noindex: @@ -32,13 +32,13 @@ The Sqlalchemy Driver --------------------- The :mod:`cinder.db.sqlalchemy.api` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.db.sqlalchemy.api :noindex: The :mod:`cinder.db.sqlalchemy.models` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.db.sqlalchemy.models :noindex: @@ -47,7 +47,7 @@ The :mod:`cinder.db.sqlalchemy.models` Module :show-inheritance: The :mod:`cinder.db.sqlalchemy.session` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: cinder.db.sqlalchemy.session :noindex: diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index 739035a13d..81db91c2f2 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -54,7 +54,7 @@ Linux Systems .. note:: - This section is tested for Cinder on Ubuntu (10.10-64) and + This section is tested for Cinder on Ubuntu (12.04-64) and Fedora-based (RHEL 6.1) distributions. Feel free to add notes and change according to your experiences or operating system. @@ -62,11 +62,11 @@ Install the prerequisite packages. On Ubuntu:: - sudo apt-get install python-dev libssl-dev python-pip git-core + sudo apt-get install python-dev libssl-dev python-pip git-core libmysqlclient-dev libpq-dev On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):: - sudo yum install python-devel openssl-devel python-pip git + sudo yum install python-devel openssl-devel python-pip git libmysqlclient-dev libqp-dev Mac OS X Systems @@ -121,8 +121,8 @@ You can manually install the virtual environment instead of having python tools/install_venv.py This will install all of the Python packages listed in the -``tools/pip-requires`` file into your virtualenv. There will also be some -additional packages (pip, distribute, greenlet) that are installed +``requirements.txt`` file into your virtualenv. There will also be some +additional packages (pip, setuptools) that are installed by the ``tools/install_venv.py`` file into the virutalenv. If all goes well, you should get a message something like this:: diff --git a/doc/source/devref/down.sh b/doc/source/devref/down.sh deleted file mode 100644 index 5c1888870b..0000000000 --- a/doc/source/devref/down.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh - -BR=$1 -DEV=$2 - -/usr/sbin/brctl delif $BR $DEV -/sbin/ifconfig $DEV down diff --git a/doc/source/devref/drivers.rst b/doc/source/devref/drivers.rst new file mode 100644 index 0000000000..8eeca2e095 --- /dev/null +++ b/doc/source/devref/drivers.rst @@ -0,0 +1,67 @@ +.. + Copyright (c) 2013 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Drivers +======= + +Cinder exposes an API to users to interact with different storage backend +solutions. The following are standards across all drivers for Cinder services +to properly interact with a driver. + +Minimum Features +---------------- + +Minimum features are enforced to avoid having a grid of what features are +supported by which drivers and which releases. Cinder Core requires that all +drivers implement the following minimum features. + +Havana +------ + +* Volume Create/Delete +* Volume Attach/Detach +* Snapshot Create/Delete +* Create Volume from Snapshot +* Get Volume Stats +* Copy Image to Volume +* Copy Volume to Image +* Clone Volume + +Icehouse +-------- + +* All of the above plus +* Extend Volume + +Volume Stats +------------ + +Volume stats are used by the different schedulers for the drivers to provide +a report on their current state of the backend. The following should be +provided by a driver. + +* driver_version +* free_capacity_gb +* reserved_percentage +* storage_protocol +* total_capacity_gb +* vendor_name +* volume_backend_name + +**NOTE:** If the driver is unable to provide a value for free_capacity_gb or +total_capacity_gb, keywords can be provided instead. Please use 'unknown' if +the array cannot report the value or 'infinite' if the array has no upper +limit. diff --git a/doc/source/devref/fakes.rst b/doc/source/devref/fakes.rst index ecc3ce277f..25a769be5f 100644 --- a/doc/source/devref/fakes.rst +++ b/doc/source/devref/fakes.rst @@ -25,7 +25,7 @@ fake implementations of various drivers let you get on with your day. The :mod:`cinder.virt.fake` Module --------------------------------- +---------------------------------- .. automodule:: cinder.virt.fake :noindex: @@ -35,7 +35,7 @@ The :mod:`cinder.virt.fake` Module The :mod:`cinder.auth.fakeldap` Module ------------------------------------- +-------------------------------------- .. automodule:: cinder.auth.fakeldap :noindex: @@ -45,7 +45,7 @@ The :mod:`cinder.auth.fakeldap` Module The :mod:`cinder.testing.fake.rabbit` Module ------------------------------------------- +-------------------------------------------- .. automodule:: cinder.testing.fake.rabbit :noindex: @@ -55,7 +55,7 @@ The :mod:`cinder.testing.fake.rabbit` Module The :class:`cinder.volume.driver.FakeAOEDriver` Class ---------------------------------------------------- +----------------------------------------------------- .. autoclass:: cinder.volume.driver.FakeAOEDriver :noindex: @@ -65,7 +65,7 @@ The :class:`cinder.volume.driver.FakeAOEDriver` Class The :class:`cinder.tests.service_unittest.FakeManager` Class ----------------------------------------------------------- +------------------------------------------------------------ .. autoclass:: cinder.tests.service_unittest.FakeManager :noindex: @@ -75,7 +75,7 @@ The :class:`cinder.tests.service_unittest.FakeManager` Class The :mod:`cinder.tests.api.openstack.fakes` Module ------------------------------------------------- +-------------------------------------------------- .. automodule:: cinder.tests.api.openstack.fakes :noindex: diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst deleted file mode 100644 index ef98e3b3b7..0000000000 --- a/doc/source/devref/filter_scheduler.rst +++ /dev/null @@ -1,258 +0,0 @@ -Filter Scheduler -================ - -The **Filter Scheduler** supports `filtering` and `weighting` to make informed -decisions on where a new instance should be created. This Scheduler supports -only working with Compute Nodes. - -Filtering ---------- - -.. image:: /images/filteringWorkflow1.png - -During its work Filter Scheduler firstly makes dictionary of unfiltered hosts, -then filters them using filter properties and finally chooses hosts for the -requested number of instances (each time it chooses the least costed host and -appends it to the list of selected costs). - -If it turns up, that it can't find candidates for the next instance, it means -that there are no more appropriate instances locally. - -If we speak about `filtering` and `weighting`, their work is quite flexible -in the Filter Scheduler. There are a lot of filtering strategies for the -Scheduler to support. Also you can even implement `your own algorithm of -filtering`. - -There are some standard filter classes to use (:mod:`cinder.scheduler.filters`): - -* |AllHostsFilter| - frankly speaking, this filter does no operation. It - returns all the available hosts after its work. -* |AvailabilityZoneFilter| - filters hosts by availability zone. It returns - hosts with the same availability zone as the requested instance has in its - properties. -* |ComputeFilter| - checks that the capabilities provided by the compute - service satisfy the extra specifications, associated with the instance type. - It returns a list of hosts that can create instance type. -* |CoreFilter| - filters based on CPU core utilization. It will approve host if - it has sufficient number of CPU cores. -* |IsolatedHostsFilter| - filter based on "image_isolated" and "host_isolated" - flags. -* |JsonFilter| - allows simple JSON-based grammar for selecting hosts. -* |RamFilter| - filters hosts by their RAM. So, it returns only the hosts with - enough available RAM. -* |SimpleCIDRAffinityFilter| - allows to put a new instance on a host within - the same IP block. -* |DifferentHostFilter| - allows to put the instance on a different host from a - set of instances. -* |SameHostFilter| - puts the instance on the same host as another instance in - a set of of instances. - -Now we can focus on these standard filter classes in details. I will pass the -simplest ones, such as |AllHostsFilter|, |CoreFilter| and |RamFilter| are, -because their functionality is quite simple and can be understood just from the -code. For example class |RamFilter| has the next realization: - -:: - - class RamFilter(filters.BaseHostFilter): - """Ram Filter with over subscription flag""" - - def host_passes(self, host_state, filter_properties): - """Only return hosts with sufficient available RAM.""" - instance_type = filter_properties.get('instance_type') - requested_ram = instance_type['memory_mb'] - free_ram_mb = host_state.free_ram_mb - return free_ram_mb * FLAGS.ram_allocation_ratio >= requested_ram - -Here `ram_allocation_ratio` means the virtual RAM to physical RAM allocation -ratio (it is 1.5 by default). Really, nice and simple. - -Next standard filter to describe is |AvailabilityZoneFilter| and it isn't -difficult too. This filter just looks at the availability zone of compute node -and availability zone from the properties of the request. Each compute service -has its own availability zone. So deployment engineers have an option to run -scheduler with availability zones support and can configure availability zones -on each compute host. This classes method `host_passes` returns `True` if -availability zone mentioned in request is the same on the current compute host. - -|ComputeFilter| checks if host can create `instance_type`. Let's note that -instance types describe the compute, memory and storage capacity of cinder -compute nodes, it is the list of characteristics such as number of vCPUs, -amount RAM and so on. So |ComputeFilter| looks at hosts' capabilities (host -without requested specifications can't be chosen for the creating of the -instance), checks if the hosts service is up based on last heartbeat. Finally, -this Scheduler can verify if host satisfies some `extra specifications` -associated with the instance type (of course if there are no such extra -specifications, every host suits them). - -Now we are going to |IsolatedHostsFilter|. There can be some special hosts -reserved for specific images. These hosts are called **isolated**. So the -images to run on the isolated hosts are also called isolated. This Scheduler -checks if `image_isolated` flag named in instance specifications is the same -that the host has. - -|DifferentHostFilter| - its method `host_passes` returns `True` if host to -place instance on is different from all the hosts used by set of instances. - -|SameHostFilter| does the opposite to what |DifferentHostFilter| does. So its -`host_passes` returns `True` if the host we want to place instance on is one -of the set of instances uses. - -|SimpleCIDRAffinityFilter| looks at the subnet mask and investigates if -the network address of the current host is in the same sub network as it was -defined in the request. - -|JsonFilter| - this filter provides the opportunity to write complicated -queries for the hosts capabilities filtering, based on simple JSON-like syntax. -There can be used the following operations for the host states properties: -'=', '<', '>', 'in', '<=', '>=', that can be combined with the following -logical operations: 'not', 'or', 'and'. For example, there is the query you can -find in tests: - -:: - - ['and', - ['>=', '$free_ram_mb', 1024], - ['>=', '$free_disk_mb', 200 * 1024] - ] - -This query will filter all hosts with free RAM greater or equal than 1024 MB -and at the same time with free disk space greater or equal than 200 GB. - -Many filters use data from `scheduler_hints`, that is defined in the moment of -creation of the new server for the user. The only exeption for this rule is -|JsonFilter|, that takes data in some strange difficult to understand way. - -To use filters you specify next two settings: - -* `scheduler_available_filters` - points available filters. -* `scheduler_default_filters` - points filters to be used by default from the - list of available ones. - -Host Manager sets up these flags in `cinder.conf` by default on the next values: - -:: - - --scheduler_available_filters=cinder.scheduler.filters.standard_filters - --scheduler_default_filters=RamFilter,ComputeFilter,AvailabilityZoneFilter - -These two lines mean, that all the filters in the `cinder.scheduler.filters` -would be available, and the default ones would be |RamFilter|, |ComputeFilter| -and |AvailabilityZoneFilter|. - -If you want to create **your own filter** you just need to inherit from -|BaseHostFilter| and implement one method: -`host_passes`. This method should return `True` if host passes the filter. It -takes `host_state` (describes host) and `filter_properties` dictionary as the -parameters. - -So in the end file cinder.conf should contain lines like these: - -:: - - --scheduler_driver=cinder.scheduler.distributed_scheduler.FilterScheduler - --scheduler_available_filters=cinder.scheduler.filters.standard_filters - --scheduler_available_filters=myfilter.MyFilter - --scheduler_default_filters=RamFilter,ComputeFilter,MyFilter - -As you see, flag `scheduler_driver` is set up for the `FilterSchedule`, -available filters can be specified more than once and description of the -default filters should not contain full paths with class names you need, only -class names. - -Costs and weights ------------------ - -Filter Scheduler uses so-called **weights** and **costs** during its work. - -`Costs` are the computed integers, expressing hosts measure of fitness to be -chosen as a result of the request. Of course, costs are computed due to hosts -characteristics compared with characteristics from the request. So trying to -put instance on a not appropriate host (for example, trying to put really -simple and plain instance on a high performance host) would have high cost, and -putting instance on an appropriate host would have low. - -So let's find out, how does all this computing work happen. - -Before weighting Filter Scheduler creates the list of tuples containing weights -and cost functions to use for weighing hosts. These functions can be got from -cache, if this operation had been done before (this cache depends on `topic` of -node, Filter Scheduler works with only the Compute Nodes, so the topic would be -"`compute`" here). If there is no cost functions in cache associated with -"compute", Filter Scheduler tries to get these cost functions from `cinder.conf`. -Weight in tuple means weight of cost function matching with it. It also can be -got from `cinder.conf`. After that Scheduler weights host, using selected cost -functions. It does this using `weighted_sum` method, which parameters are: - -* `weighted_fns` - list of cost functions created with their weights; -* `host_states` - hosts to be weighted; -* `weighing_properties` - dictionary of values that can influence weights. - -This method firstly creates a grid of function results (it just counts value of -each function using `host_state` and `weighing_properties`) - `scores`, where -it would be one row per host and one function per column. The next step is to -multiply value from the each cell of the grid by the weight of appropriate cost -function. And the final step is to sum values in the each row - it would be the -weight of host, described in this line. This method returns the host with the -lowest weight - the best one. - -If we concentrate on cost functions, it would be important to say that we use -`compute_fill_first_cost_fn` function by default, which simply returns hosts -free RAM: - -:: - - def compute_fill_first_cost_fn(host_state, weighing_properties): - """More free ram = higher weight. So servers will less free ram will be - preferred.""" - return host_state.free_ram_mb - -You can implement your own variant of cost function for the hosts capabilities -you would like to mention. Using different cost functions (as you understand, -there can be a lot of ones used in the same time) can make the chose of next -host for the creating of the new instance flexible. - -These cost functions should be set up in the `cinder.conf` with the flag -`least_cost_functions` (there can be more than one functions separated by -commas). By default this line would look like this: - -:: - - --least_cost_functions=cinder.scheduler.least_cost.compute_fill_first_cost_fn - -As for weights of cost functions, they also should be described in `cinder.conf`. -The line with this description looks the following way: -**function_name_weight**. - -As for default cost function, it would be: `compute_fill_first_cost_fn_weight`, -and by default it is 1.0. - -:: - - --compute_fill_first_cost_fn_weight=1.0 - -Filter Scheduler finds local list of acceptable hosts by repeated filtering and -weighing. Each time it chooses a host, it virtually consumes resources on it, -so subsequent selections can adjust accordingly. It is useful if the customer -asks for the some large amount of instances, because weight is computed for -each instance requested. - -.. image:: /images/filteringWorkflow2.png - -In the end Filter Scheduler sorts selected hosts by their weight and provisions -instances on them. - -P.S.: you can find more examples of using Filter Scheduler and standard filters -in :mod:`cinder.tests.scheduler`. - -.. |AllHostsFilter| replace:: :class:`AllHostsFilter ` -.. |AvailabilityZoneFilter| replace:: :class:`AvailabilityZoneFilter ` -.. |BaseHostFilter| replace:: :class:`BaseHostFilter ` -.. |ComputeFilter| replace:: :class:`ComputeFilter ` -.. |CoreFilter| replace:: :class:`CoreFilter ` -.. |IsolatedHostsFilter| replace:: :class:`IsolatedHostsFilter ` -.. |JsonFilter| replace:: :class:`JsonFilter ` -.. |RamFilter| replace:: :class:`RamFilter ` -.. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter ` -.. |DifferentHostFilter| replace:: :class:`DifferentHostFilter ` -.. |SameHostFilter| replace:: :class:`SameHostFilter ` diff --git a/doc/source/devref/il8n.rst b/doc/source/devref/il8n.rst index fabd2ce49f..e175af477f 100644 --- a/doc/source/devref/il8n.rst +++ b/doc/source/devref/il8n.rst @@ -24,11 +24,10 @@ in cinder/tests/test_localization.py. The ``_()`` function is brought into the global scope by doing:: - import gettext - gettext.install("cinder", unicode=1) + from cinder.openstack.common import gettextutils + gettextutils.install("cinder") -In general, you shouldn't need to add these to any cinder files, since the lines -are present in ``cinder/__init__.py``. However, if this code is missing, it may -result in an error that looks like like:: +These lines are needed in any toplevel script before any cinder modules are +imported. If this code is missing, it may result in an error that looks like:: NameError: name '_' is not defined diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst index 675c05fbc6..15542dfad2 100644 --- a/doc/source/devref/index.rst +++ b/doc/source/devref/index.rst @@ -29,10 +29,11 @@ Programming HowTos and Tutorials development.environment unit_tests addmethod.openstackapi + drivers Background Concepts for Cinder ----------------------------- +------------------------------ .. toctree:: :maxdepth: 3 @@ -65,7 +66,6 @@ Module Reference services database volume - xensmvolume auth api scheduler diff --git a/doc/source/devref/jenkins.rst b/doc/source/devref/jenkins.rst index 6e4a08c14f..299c13ee3a 100644 --- a/doc/source/devref/jenkins.rst +++ b/doc/source/devref/jenkins.rst @@ -22,10 +22,6 @@ Jenkins performs tasks such as: `cinder-docs`_ Build this documentation and push it to http://cinder.openstack.org. -`cinder-pylint`_ - Run `pylint `_ on the cinder code and - report violations. - `cinder-tarball`_ Do ``python setup.py sdist`` to create a tarball of the cinder code and upload it to http://cinder.openstack.org/tarballs diff --git a/doc/source/devref/launchpad.rst b/doc/source/devref/launchpad.rst index a72ec35a90..60815fe2d9 100644 --- a/doc/source/devref/launchpad.rst +++ b/doc/source/devref/launchpad.rst @@ -18,13 +18,12 @@ OpenStack-related sites. These sites include: Mailing list ------------ -The mailing list email is ``openstack@lists.launchpad.net``. This is a common +The mailing list email is ``openstack@lists.openstack.org``. This is a common mailing list across the OpenStack projects. To participate in the mailing list: -#. Join the `Cinder Team`_ on Launchpad. -#. Subscribe to the list on the `OpenStack Team`_ page on Launchpad. +#. Subscribe to the list at http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack -The mailing list archives are at https://lists.launchpad.net/openstack. +The mailing list archives are at http://lists.openstack.org/pipermail/openstack/. Bug tracking @@ -44,11 +43,11 @@ Technical support (Answers) Cinder uses Launchpad Answers to track Cinder technical support questions. The Cinder Answers page is at https://answers.launchpad.net/cinder. -Note that the `OpenStack Forums`_ (which are not hosted on Launchpad) can also +Note that `Ask OpenStack`_ (which is not hosted on Launchpad) can also be used for technical support requests. .. _Launchpad: http://launchpad.net .. _Wiki: http://wiki.openstack.org .. _Cinder Team: https://launchpad.net/~cinder .. _OpenStack Team: https://launchpad.net/~openstack -.. _OpenStack Forums: http://forums.openstack.org/ \ No newline at end of file +.. _Ask OpenStack: http://ask.openstack.org diff --git a/doc/source/devref/rc.local b/doc/source/devref/rc.local deleted file mode 100644 index d1ccf0cbc7..0000000000 --- a/doc/source/devref/rc.local +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -e -# -# rc.local -# -# This script is executed at the end of each multiuser runlevel. -# Make sure that the script will "exit 0" on success or any other -# value on error. -# -# In order to enable or disable this script just change the execution -# bits. -# -# By default this script does nothing. -####### These lines go at the end of /etc/rc.local ####### -. /lib/lsb/init-functions - -echo Downloading payload from userdata -wget http://169.254.169.254/latest/user-data -O /tmp/payload.b64 -echo Decrypting base64 payload -openssl enc -d -base64 -in /tmp/payload.b64 -out /tmp/payload.zip - -mkdir -p /tmp/payload -echo Unzipping payload file -unzip -o /tmp/payload.zip -d /tmp/payload/ - -# if the autorun.sh script exists, run it -if [ -e /tmp/payload/autorun.sh ]; then - echo Running autorun.sh - cd /tmp/payload - sh /tmp/payload/autorun.sh - -else - echo rc.local : No autorun script to run -fi - - -exit 0 diff --git a/doc/source/devref/rpc.rst b/doc/source/devref/rpc.rst index f122ed3d3c..7ac823fcb2 100644 --- a/doc/source/devref/rpc.rst +++ b/doc/source/devref/rpc.rst @@ -15,7 +15,7 @@ under the License. AMQP and Cinder -============= +=============== AMQP is the messaging technology chosen by the OpenStack cloud. The AMQP broker, either RabbitMQ or Qpid, sits between any two Cinder components and allows them to communicate in a loosely coupled fashion. More precisely, Cinder components (the compute fabric of OpenStack) use Remote Procedure Calls (RPC hereinafter) to communicate to one another; however such a paradigm is built atop the publish/subscribe paradigm so that the following benefits can be achieved: @@ -33,7 +33,7 @@ Cinder uses direct, fanout, and topic-based exchanges. The architecture looks li Cinder implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Cinder service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Cinder-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only. Cinder RPC Mappings ------------------ +------------------- The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every Cinder component connects to the message broker and, depending on its personality (for example a compute node or a network node), may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Compute, Volume or Network). Invokers and Workers do not actually exist in the Cinder object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) rpc.call and ii) rpc.cast; a Worker is a component that receives messages from the queuing system and reply accordingly to rcp.call operations. diff --git a/doc/source/devref/scheduler.rst b/doc/source/devref/scheduler.rst index 304a4572c9..e2b7e5f16e 100644 --- a/doc/source/devref/scheduler.rst +++ b/doc/source/devref/scheduler.rst @@ -19,7 +19,7 @@ Scheduler ========= The :mod:`cinder.scheduler.manager` Module ----------------------------------------- +------------------------------------------ .. automodule:: cinder.scheduler.manager :noindex: @@ -29,7 +29,7 @@ The :mod:`cinder.scheduler.manager` Module The :mod:`cinder.scheduler.driver` Module ---------------------------------------- +----------------------------------------- .. automodule:: cinder.scheduler.driver :noindex: @@ -38,10 +38,10 @@ The :mod:`cinder.scheduler.driver` Module :show-inheritance: -The :mod:`cinder.scheduler.simple` Driver ---------------------------------------- +The :mod:`cinder.scheduler.filter_scheduler` Driver +----------------------------------------- -.. automodule:: cinder.scheduler.simple +.. automodule:: cinder.scheduler.filter_scheduler :noindex: :members: :undoc-members: diff --git a/doc/source/devref/server.conf.template b/doc/source/devref/server.conf.template deleted file mode 100644 index feee3185b8..0000000000 --- a/doc/source/devref/server.conf.template +++ /dev/null @@ -1,34 +0,0 @@ -port 1194 -proto udp -dev tap0 -up "/etc/openvpn/up.sh br0" -down "/etc/openvpn/down.sh br0" - -persist-key -persist-tun - -ca ca.crt -cert server.crt -key server.key # This file should be kept secret - -dh dh1024.pem -ifconfig-pool-persist ipp.txt - -server-bridge VPN_IP DHCP_SUBNET DHCP_LOWER DHCP_UPPER - -client-to-client -keepalive 10 120 -comp-lzo - -max-clients 1 - -user nobody -group nogroup - -persist-key -persist-tun - -status openvpn-status.log - -verb 3 -mute 20 \ No newline at end of file diff --git a/doc/source/devref/services.rst b/doc/source/devref/services.rst index 801f7c7a76..e2dd6d2b05 100644 --- a/doc/source/devref/services.rst +++ b/doc/source/devref/services.rst @@ -26,7 +26,7 @@ Currently, Managers and Drivers are specified by flags and loaded using utils.lo The :mod:`cinder.service` Module ------------------------------- +-------------------------------- .. automodule:: cinder.service :noindex: @@ -36,7 +36,7 @@ The :mod:`cinder.service` Module The :mod:`cinder.manager` Module ------------------------------- +-------------------------------- .. automodule:: cinder.manager :noindex: diff --git a/doc/source/devref/unit_tests.rst b/doc/source/devref/unit_tests.rst index b42386b0d7..8f41c65596 100644 --- a/doc/source/devref/unit_tests.rst +++ b/doc/source/devref/unit_tests.rst @@ -100,7 +100,7 @@ If you wish to recreate the virtualenv, call ``run_tests.sh`` with the flag:: -f, --force Recreating the virtualenv is useful if the package dependencies have changed -since the virtualenv was last created. If the ``tools/pip-requires`` or +since the virtualenv was last created. If the ``requirements.txt`` or ``tools/install_venv.py`` files have changed, it's a good idea to recreate the virtualenv. @@ -139,9 +139,9 @@ Gotchas If you are running the unit tests from a shared folder, you may see tests start to fail or stop completely as a result of Python lockfile issues [#f4]_. You can get around this by manually setting or updating the following line in -``cinder/tests/fake_flags.py``:: +``cinder/tests/conf_fixture.py``:: - FLAGS['lock_path'].SetDefault('/tmp') + CONF['lock_path'].SetDefault('/tmp') Note that you may use any location (not just ``/tmp``!) as long as it is not a shared folder. diff --git a/doc/source/devref/up.sh b/doc/source/devref/up.sh deleted file mode 100644 index 073a58e158..0000000000 --- a/doc/source/devref/up.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh - -BR=$1 -DEV=$2 -MTU=$3 -/sbin/ifconfig $DEV mtu $MTU promisc up -/usr/sbin/brctl addif $BR $DEV diff --git a/doc/source/devref/volume.rst b/doc/source/devref/volume.rst index 89a53146f3..d9f73c3158 100644 --- a/doc/source/devref/volume.rst +++ b/doc/source/devref/volume.rst @@ -22,7 +22,7 @@ Storage Volumes, Disks The :mod:`cinder.volume.manager` Module -------------------------------------- +--------------------------------------- .. automodule:: cinder.volume.manager :noindex: @@ -31,7 +31,7 @@ The :mod:`cinder.volume.manager` Module :show-inheritance: The :mod:`cinder.volume.driver` Module -------------------------------------- +-------------------------------------- .. automodule:: cinder.volume.driver :noindex: diff --git a/doc/source/devref/xensmvolume.rst b/doc/source/devref/xensmvolume.rst deleted file mode 100644 index 4b736dcaf0..0000000000 --- a/doc/source/devref/xensmvolume.rst +++ /dev/null @@ -1,88 +0,0 @@ -Xen Storage Manager Volume Driver -================================= - -The Xen Storage Manager (xensm) driver for Cinder-Volume is based on XenAPI Storage Manager. This will not only provide basic storage functionality (like volume creation, and destruction) on a number of different storage back-ends, such as Netapp, NFS, etc. but it will also enable the capability of using more sophisticated storage back-ends for operations like cloning/snapshotting etc. To have an idea of the benefits of using XenAPI SM to provide back-end storage services, the list below shows some of the storage plugins already supported in XenServer/XCP: - -- NFS VHD: SR plugin which stores disks as VHD files on a remote NFS filesystem -- Local VHD on LVM: SR plugin which represents disks as VHD disks on Logical Volumes within a locally-attached Volume Group -- HBA LUN-per-VDI driver: SR plugin which represents LUNs as VDIs sourced by hardware HBA adapters, e.g. hardware-based iSCSI or FC support -- NetApp: SR driver for mapping of LUNs to VDIs on a NETAPP server, providing use of fast snapshot and clone features on the filer -- LVHD over FC: SR plugin which represents disks as VHDs on Logical Volumes within a Volume Group created on an HBA LUN, e.g. hardware-based iSCSI or FC support -- iSCSI: Base ISCSI SR driver, provides a LUN-per-VDI. Does not support creation of VDIs but accesses existing LUNs on a target. -- LVHD over iSCSI: SR plugin which represents disks as Logical Volumes within a Volume Group created on an iSCSI LUN -- EqualLogic: SR driver for mapping of LUNs to VDIs on a EQUALLOGIC array group, providing use of fast snapshot and clone features on the array - -Glossary -========= - - XenServer: Commercial, supported product from Citrix - - Xen Cloud Platform (XCP): Open-source equivalent of XenServer (and the development project for the toolstack). Everything said about XenServer below applies equally to XCP - - XenAPI: The management API exposed by XenServer and XCP - - xapi: The primary daemon on XenServer and Xen Cloud Platform; the one that exposes the XenAPI - - -Design -======= - -Definitions ------------ - -Backend: A term for a particular storage backend. This could be iSCSI, NFS, Netapp etc. -Backend-config: All the parameters required to connect to a specific backend. For e.g. For NFS, this would be the server, path, etc. -Flavor: This term is equivalent to volume "types". A user friendly term to specify some notion of quality of service. For example, "gold" might mean that the volumes will use a backend where backups are possible. - -A flavor can be associated with multiple backends. The volume scheduler, with the help of the driver, will decide which backend will be used to create a volume of a particular flavor. Currently, the driver uses a simple "first-fit" policy, where the first backend that can successfully create this volume is the one that is used. - -Operation ----------- - -Using the cinder-manage command detailed in the implementation, an admin can add flavors and backends. - -One or more cinder-volume service instances will be deployed per availability zone. When an instance is started, it will create storage repositories (SRs) to connect to the backends available within that zone. All cinder-volume instances within a zone can see all the available backends. These instances are completely symmetric and hence should be able to service any create_volume request within the zone. - - -Commands -========= - -A category called "sm" has been added to cinder-manage in the class StorageManagerCommands. - -The following actions will be added: - -- flavor_list -- flavor_create -- flavor_delete -- backend_list -- backend_add -- backend_remove - -Usage: ------- - -cinder-manage sm flavor_create